Example #1
0
    def __init__(self,
                 nf=64,
                 nframes=5,
                 groups=8,
                 front_RBs=5,
                 back_RBs=10,
                 center=None,
                 predeblur=False,
                 HR_in=False,
                 w_TSA=False):
        super(EDVR_pyramid, self).__init__()
        self.nf = nf
        self.center = nframes // 2 if center is None else center
        self.is_predeblur = True if predeblur else False
        self.HR_in = True if HR_in else False
        self.w_TSA = w_TSA
        ResidualBlock_noBN_f = functools.partial(arch_util.ResidualBlock_noBN,
                                                 nf=nf)

        #### extract features (for each frame)
        if self.is_predeblur:
            self.pre_deblur = Predeblur_ResNet_Pyramid(nf=nf, HR_in=self.HR_in)
            self.conv_1x1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
        else:
            if self.HR_in:
                self.conv_first_1 = nn.Conv2d(3, nf, 3, 1, 1, bias=True)
                self.conv_first_2 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
                self.conv_first_3 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
            else:
                self.conv_first = nn.Conv2d(3, nf, 3, 1, 1, bias=True)
        self.feature_extraction = arch_util.make_layer(ResidualBlock_noBN_f,
                                                       front_RBs)
        self.fea_L2_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
        self.fea_L2_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.fea_L3_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
        self.fea_L3_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)

        self.pcd_align = PCD_Align(nf=nf, groups=groups)

        ## x2 SR
        self.x2_residual = arch_util.make_layer(ResidualBlock_noBN_f,
                                                front_RBs)
        self.x2_output = nn.Conv2d(64, 3, 3, 1, 1, bias=True)

        if self.w_TSA:
            self.tsa_fusion = TSA_Fusion(nf=nf,
                                         nframes=nframes,
                                         center=self.center)
        else:
            self.tsa_fusion = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)

        #### reconstruction
        self.recon_trunk1 = arch_util.make_layer(ResidualBlock_noBN_f,
                                                 back_RBs)
        self.x2_output = nn.Conv2d(64, 3, 3, 1, 1, bias=True)
        self.recon_trunk2 = arch_util.make_layer(ResidualBlock_noBN_f,
                                                 back_RBs)

        #### upsampling
        self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
        self.upconv2 = nn.Conv2d(nf, 64 * 4, 3, 1, 1, bias=True)
        self.pixel_shuffle = nn.PixelShuffle(2)
        self.HRconv1 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
        self.HRconv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
        self.conv_last = nn.Conv2d(64, 3, 3, 1, 1, bias=True)

        #### activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
Example #2
0
	def __init__(self, scale, act=False):
		super(sub_pixel, self).__init__()
		modules = []
		modules.append(nn.PixelShuffle(scale))
		self.body = nn.Sequential(*modules)
Example #3
0
    def __init__(self, ngf=64):
        super(NetG, self).__init__()

        self.toH = nn.Sequential(
            nn.Conv2d(4, ngf, kernel_size=7, stride=1, padding=3),
            nn.LeakyReLU(0.2, True))

        self.to0 = nn.Sequential(
            nn.Conv2d(1, ngf // 2, kernel_size=3, stride=1, padding=1),  # 512
            nn.LeakyReLU(0.2, True))
        self.to1 = nn.Sequential(
            nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2,
                      padding=1),  # 256
            nn.LeakyReLU(0.2, True))
        self.to2 = nn.Sequential(
            nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1),  # 128
            nn.LeakyReLU(0.2, True))
        self.to3 = nn.Sequential(
            nn.Conv2d(ngf * 3, ngf * 4, kernel_size=4, stride=2,
                      padding=1),  # 64
            nn.LeakyReLU(0.2, True))
        self.to4 = nn.Sequential(
            nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2,
                      padding=1),  # 32
            nn.LeakyReLU(0.2, True))

        tunnel4 = nn.Sequential(*[
            ResNeXtBottleneck(ngf * 8, ngf * 8, cardinality=32, dilate=1)
            for _ in range(20)
        ])

        self.tunnel4 = nn.Sequential(
            nn.Conv2d(ngf * 8 + 512,
                      ngf * 8,
                      kernel_size=3,
                      stride=1,
                      padding=1), nn.LeakyReLU(0.2, True), tunnel4,
            nn.Conv2d(ngf * 8, ngf * 4 * 4, kernel_size=3,
                      stride=1, padding=1), nn.PixelShuffle(2),
            nn.LeakyReLU(0.2, True))  # 64

        depth = 2
        tunnel = [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=4)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2),
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1)
        ]
        tunnel3 = nn.Sequential(*tunnel)

        self.tunnel3 = nn.Sequential(
            nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2, True), tunnel3,
            nn.Conv2d(ngf * 4, ngf * 2 * 4, kernel_size=3,
                      stride=1, padding=1), nn.PixelShuffle(2),
            nn.LeakyReLU(0.2, True))  # 128

        tunnel = [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=4)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2),
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1)
        ]
        tunnel2 = nn.Sequential(*tunnel)

        self.tunnel2 = nn.Sequential(
            nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2, True), tunnel2,
            nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=1, padding=1),
            nn.PixelShuffle(2), nn.LeakyReLU(0.2, True))

        tunnel = [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)]
        tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2)]
        tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=4)]
        tunnel += [
            ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2),
            ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)
        ]
        tunnel1 = nn.Sequential(*tunnel)

        self.tunnel1 = nn.Sequential(
            nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2, True), tunnel1,
            nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=1, padding=1),
            nn.PixelShuffle(2), nn.LeakyReLU(0.2, True))

        self.exit = nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1)
Example #4
0
    def __init__(self):
        super(Net, self).__init__()

        self.conv_input = nn.Conv2d(in_channels=3,
                                    out_channels=64,
                                    kernel_size=9,
                                    stride=1,
                                    padding=4,
                                    bias=True)
        self.relu = nn.LeakyReLU(0.2, inplace=True)

        self.residual1 = self.make_layer(_Residual_Block, 8)
        self.residual2 = self.make_layer(_Residual_Block, 8)

        self.conv_mid = nn.Conv2d(in_channels=64,
                                  out_channels=64,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1,
                                  bias=True)
        # self.bn_mid = nn.BatchNorm2d(64)

        self.upscale4x = nn.Sequential(
            nn.Conv2d(in_channels=64,
                      out_channels=256,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=True),
            nn.PixelShuffle(2),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(in_channels=64,
                      out_channels=256,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=True),
            nn.PixelShuffle(2),
            nn.LeakyReLU(0.2, inplace=True),
        )

        self.conv_output = nn.Conv2d(in_channels=64,
                                     out_channels=3,
                                     kernel_size=9,
                                     stride=1,
                                     padding=4,
                                     bias=True)
        self.conv_clswise_mid = nn.Conv2d(in_channels=20 * 64,
                                          out_channels=20 * 64,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1,
                                          bias=True,
                                          groups=20)
        self.clswise_residual = self.make_layer(_Clswise_Residual_Block, 2)
        self.conv_clswise = nn.Conv2d(in_channels=20 * 3,
                                      out_channels=20 * 64,
                                      kernel_size=5,
                                      stride=1,
                                      padding=2,
                                      bias=True,
                                      groups=20)
        self.conv_pointwise = nn.Conv2d(in_channels=20 * 64,
                                        out_channels=64,
                                        kernel_size=1,
                                        stride=1,
                                        padding=0,
                                        bias=True)
        self.conv_merge = nn.Conv2d(in_channels=128,
                                    out_channels=64,
                                    kernel_size=3,
                                    stride=1,
                                    padding=1,
                                    bias=True)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # init.orthogonal(m.weight, math.sqrt(2))
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                if m.bias is not None:
                    m.bias.data.zero_()
    def __init__(self, scale=4, num_of_res_blocks=16, num_of_channels=32):
        super(SRResNetLight, self).__init__()

        self.scale = scale

        self.conv_input = nn.Conv2d(in_channels=3,
                                    out_channels=num_of_channels,
                                    kernel_size=9,
                                    stride=1,
                                    padding=4,
                                    bias=False)
        self.relu = nn.ReLU(inplace=True)

        self.residual = make_layer(ResBlock(num_of_channels),
                                   num_of_res_blocks)

        self.conv_mid = nn.Conv2d(in_channels=num_of_channels,
                                  out_channels=num_of_channels,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1,
                                  bias=False)
        self.bn_mid = nn.InstanceNorm2d(num_of_channels, affine=True)

        if scale == 2:
            factor = 2
            self.upscale = nn.Sequential(
                nn.Conv2d(in_channels=num_of_channels,
                          out_channels=num_of_channels * factor * factor,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False),
                nn.PixelShuffle(factor),
                nn.ReLU(inplace=True),
            )
        elif scale == 4:
            factor = 2
            self.upscale = nn.Sequential(
                nn.Conv2d(in_channels=num_of_channels,
                          out_channels=num_of_channels * factor * factor,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False),
                nn.PixelShuffle(factor),
                nn.ReLU(inplace=True),
                nn.Conv2d(num_of_channels,
                          num_of_channels,
                          groups=num_of_channels,
                          kernel_size=3,
                          padding=1,
                          stride=1,
                          bias=False),
                nn.Conv2d(num_of_channels,
                          num_of_channels * factor * factor,
                          kernel_size=1,
                          bias=True),
                nn.PixelShuffle(factor),
                nn.ReLU(inplace=True),
            )
        elif scale == 3:
            factor = 3
            self.upscale = nn.Sequential(
                nn.Conv2d(in_channels=num_of_channels,
                          out_channels=num_of_channels * factor * factor,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=True),
                nn.PixelShuffle(factor),
                nn.ReLU(inplace=True),
            )
        else:
            raise NotImplementedError

        self.conv_output = nn.Conv2d(in_channels=num_of_channels,
                                     out_channels=3,
                                     kernel_size=9,
                                     stride=1,
                                     padding=4,
                                     bias=False)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
Example #6
0
    def __init__(self):
        super(MotionCompensateSubnet, self).__init__()
        self.downsample_4x = nn.Sequential(
            nn.Conv2d(2, 24, kernel_size=5, stride=2, padding=2, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=5, stride=2, padding=2, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 32, kernel_size=3, stride=1, padding=1, bias=False),
        )
        self.ps_4x = nn.PixelShuffle(4)

        self.downsample_2x = nn.Sequential(
            nn.Conv2d(5, 24, kernel_size=5, stride=2, padding=2, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=5, stride=1, padding=2, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(24),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 8, kernel_size=3, stride=1, padding=1, bias=False),
        )
        self.ps_2x = nn.PixelShuffle(2)

        self.pixelwise_mc = nn.Sequential(
            nn.Conv2d(5, 24, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=5, stride=1, padding=2, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 24, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),
            nn.Conv2d(24, 2, kernel_size=3, stride=1, padding=1, bias=False),
        )

        self.ps_1x = nn.PixelShuffle(1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight.data,
                                     a=0.1,
                                     mode='fan_in',
                                     nonlinearity='relu')
                if m.bias is not None:
                    init.constant_(m.bias.data, 0.0)
            elif isinstance(m, nn.Linear):
                init.kaiming_normal_(m.weight.data,
                                     a=0.1,
                                     mode='fan_in',
                                     nonlinearity='relu')
                init.constant_(m.bias.data, 0.0)
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0)
                init.constant_(m.bias.data, 0.0)
Example #7
0
	def __init__(self,h,nx,ny,nVarIn=1,nVarOut=1,initWay=None,k=5,s=1,p=2):
		super(USCNNSep, self).__init__()
		"""
		Extract basic information
		"""
		self.initWay=initWay
		self.nVarIn=nVarIn
		self.nVarOut=nVarOut
		self.k=k
		self.s=1
		self.p=2
		self.deltaX=h
		self.nx=nx
		self.ny=ny
		"""
		Define net
		"""
		W1=16
		W2=32
		self.relu=nn.ReLU()
		self.US=nn.Upsample(size=[self.ny-2,self.nx-2],mode='bicubic')
		self.conv1=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
		self.conv2=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
		self.conv3=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
		self.conv4=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
		self.pixel_shuffle1 = nn.PixelShuffle(1)
		self.conv11=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
		self.conv22=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
		self.conv33=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
		self.conv44=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
		self.pixel_shuffle11 = nn.PixelShuffle(1)
		self.conv111=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
		self.conv222=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
		self.conv333=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
		self.conv444=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
		self.pixel_shuffle111 = nn.PixelShuffle(1)
		if self.initWay is not None:
			self._initialize_weights()
		#Specify filter
		dxiFilter=torch.Tensor([[[[0.,  0.,  0.,  0.,  0.],
								 [0.,  0.,  0.,  0.,  0.],
								 [1., -8.,  0.,  8.,  -1.],
								 [0.,  0.,  0.,  0.,  0.],
								 [0.,  0.,  0.,  0.,  0.]]]]).to("cuda")/12./self.deltaX 
		self.convdxi=nn.Conv2d(1, 1, (5,5),stride=1, padding=0, bias=None)
		self.convdxi.weight=nn.Parameter(dxiFilter, requires_grad=False)

		detaFilter=torch.Tensor([[[[0.,  0.,  1., 0.,  0.],
								 [0.,  0.,  -8.,  0.,  0.],
								 [0.,  0.,  0.,  0.,  0.],
								 [0.,  0.,  8., 0.,  0.],
								 [0.,  0.,  -1.,  0.,  0.]]]]).to("cuda")/12./self.deltaX
		self.convdeta=nn.Conv2d(1,1,(5,5),stride=1,padding=0,bias=None)
		self.convdeta.weight=nn.Parameter(detaFilter,requires_grad=False)

		lapFilter=torch.Tensor([[[[0.,  0.,  -1.,  0.,   0.],
								  [0.,  0.,  16.,  0.,   0.],
								  [-1., 16., -60., 16., -1.],
								  [0.,  0.,  16.,  0.,   0.],
								  [0.,  0.,  -1.,  0.,   0.]]]]).to("cuda")/12./self.deltaX/self.deltaX
		self.convlap = nn.Conv2d(1, 1, (5,5),stride=1, padding=0, bias=None)
		self.convlap.weight=nn.Parameter(lapFilter, requires_grad=False)
Example #8
0
    def __init__(self, num_res_blocks, n_feats, res_scale):
        super(MainNet, self).__init__()
        self.num_res_blocks = num_res_blocks  ### a list containing number of resblocks of different stages
        self.n_feats = n_feats

        self.SFE = SFE(self.num_res_blocks[0], n_feats, res_scale)

        ### stage11
        self.conv11_head = conv3x3(256 + n_feats, n_feats)
        self.RB11 = nn.ModuleList()
        for i in range(self.num_res_blocks[1]):
            self.RB11.append(
                ResBlock(in_channels=n_feats,
                         out_channels=n_feats,
                         res_scale=res_scale))
        self.conv11_tail = conv3x3(n_feats, n_feats)

        ### subpixel 1 -> 2
        self.conv12 = conv3x3(n_feats, n_feats * 4)
        self.ps12 = nn.PixelShuffle(2)

        ### stage21, 22
        #self.conv21_head = conv3x3(n_feats, n_feats)
        self.conv22_head = conv3x3(128 + n_feats, n_feats)

        self.ex12 = CSFI2(n_feats)

        self.RB21 = nn.ModuleList()
        self.RB22 = nn.ModuleList()
        for i in range(self.num_res_blocks[2]):
            self.RB21.append(
                ResBlock(in_channels=n_feats,
                         out_channels=n_feats,
                         res_scale=res_scale))
            self.RB22.append(
                ResBlock(in_channels=n_feats,
                         out_channels=n_feats,
                         res_scale=res_scale))

        self.conv21_tail = conv3x3(n_feats, n_feats)
        self.conv22_tail = conv3x3(n_feats, n_feats)

        ### subpixel 2 -> 3
        self.conv23 = conv3x3(n_feats, n_feats * 4)
        self.ps23 = nn.PixelShuffle(2)

        ### stage31, 32, 33
        #self.conv31_head = conv3x3(n_feats, n_feats)
        #self.conv32_head = conv3x3(n_feats, n_feats)
        self.conv33_head = conv3x3(64 + n_feats, n_feats)

        self.ex123 = CSFI3(n_feats)

        self.RB31 = nn.ModuleList()
        self.RB32 = nn.ModuleList()
        self.RB33 = nn.ModuleList()
        for i in range(self.num_res_blocks[3]):
            self.RB31.append(
                ResBlock(in_channels=n_feats,
                         out_channels=n_feats,
                         res_scale=res_scale))
            self.RB32.append(
                ResBlock(in_channels=n_feats,
                         out_channels=n_feats,
                         res_scale=res_scale))
            self.RB33.append(
                ResBlock(in_channels=n_feats,
                         out_channels=n_feats,
                         res_scale=res_scale))

        self.conv31_tail = conv3x3(n_feats, n_feats)
        self.conv32_tail = conv3x3(n_feats, n_feats)
        self.conv33_tail = conv3x3(n_feats, n_feats)

        self.merge_tail = MergeTail(n_feats)
Example #9
0
    def __init__(self, opt, device):
        super().__init__()
        in_channels = opt['in_channels']
        out_channels = opt['out_channels']
        num_groups = opt['num_groups']
        hg_num_feature = opt['hg_num_feature']
        hg_num_keypoints = opt['hg_num_keypoints']
        act_type = 'prelu'
        norm_type = None

        self.num_steps = opt['num_steps']
        num_features = opt['num_features']
        self.upscale_factor = opt['scale']
        self.detach_attention = opt['detach_attention']
        if self.detach_attention:
            print('Detach attention!')
        else:
            print('Not detach attention!')

        if self.upscale_factor == 8:
            # with PixelShuffle at start, need to upscale 4x only
            stride = 4
            padding = 2
            kernel_size = 8
        else:
            raise NotImplementedError("Upscale factor %d not implemented!" %
                                      self.upscale_factor)

        # LR feature extraction block
        self.conv_in = ConvBlock(in_channels,
                                 4 * num_features,
                                 kernel_size=3,
                                 act_type=act_type,
                                 norm_type=norm_type)
        self.feat_in = nn.PixelShuffle(2)

        # basic block
        self.first_block = FeedbackBlockCustom(num_features, num_groups,
                                               self.upscale_factor, act_type,
                                               norm_type, num_features)
        self.block = FeedbackBlockHeatmapAttention(num_features,
                                                   num_groups,
                                                   self.upscale_factor,
                                                   act_type,
                                                   norm_type,
                                                   5,
                                                   opt['num_fusion_block'],
                                                   device=device)
        self.block.should_reset = False

        # reconstruction block
        # self.upsample = nn.Upsample(scale_factor=upscale_factor, mode='bilinear')

        self.out = DeconvBlock(num_features,
                               num_features,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               act_type='prelu',
                               norm_type=norm_type)
        self.conv_out = ConvBlock(num_features,
                                  out_channels,
                                  kernel_size=3,
                                  act_type=None,
                                  norm_type=norm_type)

        self.HG = FeedbackHourGlass(hg_num_feature, hg_num_keypoints)
 def __init__(self, n_chan, factor=2):
     super(UpSample, self).__init__()
     out_chan = n_chan * factor * factor
     self.proj = nn.Conv2d(n_chan, out_chan, 1, 1, 0)
     self.up = nn.PixelShuffle(factor)
     self.init_weight()
Example #11
0
    def __init__(self,
                 upscale_factor,
                 in_channels=3,
                 out_channels=3,
                 num_features=64,
                 num_blocks=20,
                 num_layers=6):
        super(RDN, self).__init__()
        r = upscale_factor
        G0 = num_features
        kSize = 3

        # number of RDB blocks, conv layers, out channels
        self.D, C, G = [num_blocks, num_layers, num_features]

        # Shallow feature extraction net
        self.SFENet1 = nn.Conv2d(in_channels,
                                 G0,
                                 kSize,
                                 padding=(kSize - 1) // 2,
                                 stride=1)
        self.SFENet2 = nn.Conv2d(G0,
                                 G0,
                                 kSize,
                                 padding=(kSize - 1) // 2,
                                 stride=1)

        # Redidual dense blocks and dense feature fusion
        self.RDBs = nn.ModuleList()
        for i in range(self.D):
            self.RDBs.append(RDB(growRate0=G0, growRate=G, nConvLayers=C))

        # Global Feature Fusion
        self.GFF = nn.Sequential(*[
            nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),
            nn.Conv2d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
        ])

        # Up-sampling net
        if r == 2 or r == 3:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(
                    G0, G * r * r, kSize, padding=(kSize - 1) // 2, stride=1),
                nn.PixelShuffle(r),
                nn.Conv2d(
                    G, out_channels, kSize, padding=(kSize - 1) // 2, stride=1)
            ])
        elif r == 4:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(G0, G * 4, kSize, padding=(kSize - 1) //
                          2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, G * 4, kSize, padding=(kSize - 1) // 2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(
                    G, out_channels, kSize, padding=(kSize - 1) // 2, stride=1)
            ])
        elif r == 8:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(G0, G * 4, kSize, padding=(kSize - 1) //
                          2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, G * 4, kSize, padding=(kSize - 1) // 2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, G * 4, kSize, padding=(kSize - 1) // 2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(
                    G, out_channels, kSize, padding=(kSize - 1) // 2, stride=1)
            ])
        else:
            raise ValueError("scale must be 2 or 3 or 4.")
Example #12
0
    def __init__(self, dropout=None, version=None, **kwargs):
        super().__init__()
        self.version = version[1:]
        # Input/output channels
        in_channels = 3
        out_channels = 1
        # Hyper-parameters
        ni, no = 64, out_channels
        n1, n2, n3, n4, n5 = 64, 64, 128, 256, 512
        num_blocks = [2, 2, 3, 3]
        pack_kernel = [5, 3, 3, 3, 3]
        unpack_kernel = [3, 3, 3, 3, 3]
        iconv_kernel = [3, 3, 3, 3, 3]
        # Initial convolutional layer
        self.pre_calc = Conv2D(in_channels, ni, 5, 1)
        # Support for different versions
        if self.version == 'A':  # Channel concatenation
            n1o, n1i = n1, n1 + ni + no
            n2o, n2i = n2, n2 + n1 + no
            n3o, n3i = n3, n3 + n2 + no
            n4o, n4i = n4, n4 + n3
            n5o, n5i = n5, n5 + n4
        elif self.version == 'B':  # Channel addition
            n1o, n1i = n1, n1 + no
            n2o, n2i = n2, n2 + no
            n3o, n3i = n3//2, n3//2 + no
            n4o, n4i = n4//2, n4//2
            n5o, n5i = n5//2, n5//2
        else:
            raise ValueError('Unknown PackNet version {}'.format(version))

        # Encoder

        self.pack1 = PackLayerConv3d(n1, pack_kernel[0])
        self.pack2 = PackLayerConv3d(n2, pack_kernel[1])
        self.pack3 = PackLayerConv3d(n3, pack_kernel[2])
        self.pack4 = PackLayerConv3d(n4, pack_kernel[3])
        self.pack5 = PackLayerConv3d(n5, pack_kernel[4])

        self.conv1 = Conv2D(ni, n1, 7, 1)
        self.conv2 = ResidualBlock(n1, n2, num_blocks[0], 1, dropout=dropout)
        self.conv3 = ResidualBlock(n2, n3, num_blocks[1], 1, dropout=dropout)
        self.conv4 = ResidualBlock(n3, n4, num_blocks[2], 1, dropout=dropout)
        self.conv5 = ResidualBlock(n4, n5, num_blocks[3], 1, dropout=dropout)

        # Decoder

        self.unpack5 = UnpackLayerConv3d(n5, n5o, unpack_kernel[0])
        self.unpack4 = UnpackLayerConv3d(n5, n4o, unpack_kernel[1])
        self.unpack3 = UnpackLayerConv3d(n4, n3o, unpack_kernel[2])
        self.unpack2 = UnpackLayerConv3d(n3, n2o, unpack_kernel[3])
        self.unpack1 = UnpackLayerConv3d(n2, n1o, unpack_kernel[4])

        self.iconv5 = Conv2D(n5i, n5, iconv_kernel[0], 1)
        self.iconv4 = Conv2D(n4i, n4, iconv_kernel[1], 1)
        self.iconv3 = Conv2D(n3i, n3, iconv_kernel[2], 1)
        self.iconv2 = Conv2D(n2i, n2, iconv_kernel[3], 1)
        self.iconv1 = Conv2D(n1i, n1, iconv_kernel[4], 1)

        # Depth Layers

        self.unpack_disps = nn.PixelShuffle(2)
        self.unpack_disp4 = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None)
        self.unpack_disp3 = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None)
        self.unpack_disp2 = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None)

        self.disp4_layer = InvDepth(n4, out_channels=out_channels)
        self.disp3_layer = InvDepth(n3, out_channels=out_channels)
        self.disp2_layer = InvDepth(n2, out_channels=out_channels)
        self.disp1_layer = InvDepth(n1, out_channels=out_channels)

        self.init_weights()
Example #13
0
 def __init__(self, inplanes, planes, color_fc_out, block_num):
     super(DecoderBlock, self).__init__()
     self.secat_layer = make_secat_layer(SECatBottleneckX, inplanes,
                                         planes // 4, color_fc_out,
                                         block_num)
     self.ps = nn.PixelShuffle(2)
Example #14
0
 def __init__(self, in_c, scale_factor):
     super().__init__()
     self.conv = nn.Conv2d(in_c, in_c * scale_factor ** 2, 3, 1, 1)
     self.ps = nn.PixelShuffle(scale_factor)  # in_c * 4, H, W --> in_c, H*2, W*2
     self.act = nn.PReLU(num_parameters=in_c)
Example #15
0
    def __init__(self,
                 depth,
                 num_stages=4,
                 strides=(1, 2, 2, 2),
                 dilations=(1, 1, 1, 1),
                 out_indices=(0, 1, 2, 3),
                 style='pytorch',
                 frozen_stages=-1,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN', requires_grad=True),
                 norm_eval=True,
                 dcn=None,
                 stage_with_dcn=(False, False, False, False),
                 gcb=None,
                 stage_with_gcb=(False, False, False, False),
                 gen_attention=None,
                 stage_with_gen_attention=((), (), (), ()),
                 with_cp=False,
                 zero_init_residual=True):
        super(ResNetDCT, self).__init__()
        if depth not in self.arch_settings:
            raise KeyError('invalid depth {} for resnet'.format(depth))
        self.depth = depth
        self.num_stages = num_stages
        assert num_stages >= 1 and num_stages <= 4
        self.strides = strides
        self.dilations = dilations
        assert len(strides) == len(dilations) == num_stages
        self.out_indices = out_indices
        assert max(out_indices) < num_stages
        self.style = style
        self.frozen_stages = frozen_stages
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.with_cp = with_cp
        self.norm_eval = norm_eval
        self.dcn = dcn
        self.stage_with_dcn = stage_with_dcn
        if dcn is not None:
            assert len(stage_with_dcn) == num_stages
        self.gen_attention = gen_attention
        self.gcb = gcb
        self.stage_with_gcb = stage_with_gcb
        if gcb is not None:
            assert len(stage_with_gcb) == num_stages
        self.zero_init_residual = zero_init_residual
        self.block, stage_blocks = self.arch_settings[depth]
        self.stage_blocks = stage_blocks[:num_stages]
        self.inplanes = 64

        self._make_stem_layer()

        self.res_layers = []
        for i, num_blocks in enumerate(self.stage_blocks):
            stride = strides[i]
            dilation = dilations[i]
            dcn = self.dcn if self.stage_with_dcn[i] else None
            gcb = self.gcb if self.stage_with_gcb[i] else None
            planes = 64 * 2**i
            res_layer = make_res_layer(
                self.block,
                self.inplanes,
                planes,
                num_blocks,
                stride=stride,
                dilation=dilation,
                style=self.style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                gcb=gcb,
                gen_attention=gen_attention,
                gen_attention_blocks=stage_with_gen_attention[i])
            self.inplanes = planes * self.block.expansion
            layer_name = 'layer{}'.format(i + 1)
            self.add_module(layer_name, res_layer)
            self.res_layers.append(layer_name)

        def make_input_layer(type='conv', in_ch=64, out_ch=64):
            if type == 'conv':
                model = nn.Sequential(
                    nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1, bias=False),
                    nn.BatchNorm2d(out_ch),
                    nn.ReLU6(inplace=True)
                )
            elif type == 'deconv':
                model = nn.Sequential(
                    nn.ConvTranspose2d(in_channels=in_ch, out_channels=out_ch, kernel_size=4, stride=2, padding=1, bias=False),
                    nn.BatchNorm2d(out_ch),
                    nn.ReLU6(inplace=True)
                )
            else: raise NotImplementedError
            return model

        upscale_factor_y, upscale_factor_cb, upscale_factor_cr = 2, 4, 4
        self.upconv_y = nn.Conv2d(in_channels=64, out_channels=22*upscale_factor_y*upscale_factor_y,
                                  kernel_size=3, stride=1, padding=1, bias=False)
        self.upconv_cb = nn.Conv2d(in_channels=64, out_channels=21*upscale_factor_cb*upscale_factor_cb,
                                  kernel_size=3, stride=1, padding=1, bias=False)
        self.upconv_cr = nn.Conv2d(in_channels=64, out_channels=21*upscale_factor_cr*upscale_factor_cr,
                                   kernel_size=3, stride=1, padding=1, bias=False)
        self.bn_y = nn.BatchNorm2d(22)
        self.bn_cb = nn.BatchNorm2d(21)
        self.bn_cr = nn.BatchNorm2d(21)
        self.pixelshuffle_y = nn.PixelShuffle(2)
        self.pixelshuffle_cb = nn.PixelShuffle(4)
        self.pixelshuffle_cr = nn.PixelShuffle(4)
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        self._freeze_stages()

        self.feat_dim = self.block.expansion * 64 * 2**(
            len(self.stage_blocks) - 1)
Example #16
0
    def __init__(self, ngf=64):
        super(Generator, self).__init__()

        self.feature_conv = FeatureConv()

        self.to0 = self._make_encoder_block_first(5, 32)
        self.to1 = self._make_encoder_block(32, 64)
        self.to2 = self._make_encoder_block(64, 128)
        self.to3 = self._make_encoder_block(128, 256)
        self.to4 = self._make_encoder_block(256, 512)

        self.deconv_for_decoder = nn.Sequential(
            nn.ConvTranspose2d(256,
                               128,
                               3,
                               stride=2,
                               padding=1,
                               output_padding=1),  # output is 64 * 64
            nn.LeakyReLU(0.2),
            nn.ConvTranspose2d(128,
                               64,
                               3,
                               stride=2,
                               padding=1,
                               output_padding=1),  # output is 128 * 128
            nn.LeakyReLU(0.2),
            nn.ConvTranspose2d(64,
                               32,
                               3,
                               stride=2,
                               padding=1,
                               output_padding=1),  # output is 256 * 256
            nn.LeakyReLU(0.2),
            nn.ConvTranspose2d(32, 3, 3, stride=1, padding=1,
                               output_padding=0),  # output is 256 * 256
            nn.Tanh(),
        )

        tunnel4 = nn.Sequential(*[
            ResNeXtBottleneck(ngf * 8, ngf * 8, cardinality=32, dilate=1)
            for _ in range(20)
        ])

        self.tunnel4 = nn.Sequential(
            nn.Conv2d(ngf * 8 + 512,
                      ngf * 8,
                      kernel_size=3,
                      stride=1,
                      padding=1), nn.LeakyReLU(0.2, True), tunnel4,
            nn.Conv2d(ngf * 8, ngf * 4 * 4, kernel_size=3,
                      stride=1, padding=1), nn.PixelShuffle(2),
            nn.LeakyReLU(0.2, True))  # 64

        depth = 2
        tunnel = [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=4)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=2),
            ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=32, dilate=1)
        ]
        tunnel3 = nn.Sequential(*tunnel)

        self.tunnel3 = nn.Sequential(
            nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2, True), tunnel3,
            nn.Conv2d(ngf * 4, ngf * 2 * 4, kernel_size=3,
                      stride=1, padding=1), nn.PixelShuffle(2),
            nn.LeakyReLU(0.2, True))  # 128

        tunnel = [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=4)
            for _ in range(depth)
        ]
        tunnel += [
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=2),
            ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=32, dilate=1)
        ]
        tunnel2 = nn.Sequential(*tunnel)

        self.tunnel2 = nn.Sequential(
            nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2, True), tunnel2,
            nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=1, padding=1),
            nn.PixelShuffle(2), nn.LeakyReLU(0.2, True))

        tunnel = [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)]
        tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2)]
        tunnel += [ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=4)]
        tunnel += [
            ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=2),
            ResNeXtBottleneck(ngf, ngf, cardinality=16, dilate=1)
        ]
        tunnel1 = nn.Sequential(*tunnel)

        self.tunnel1 = nn.Sequential(
            nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2, True), tunnel1,
            nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=1, padding=1),
            nn.PixelShuffle(2), nn.LeakyReLU(0.2, True))

        self.exit = nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1)
Example #17
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(FinalConv, self).__init__()
     self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=9, bias=False)
     # self.bn1 = nn.BatchNorm2d(planes)
     self.pixel_shuffle = nn.PixelShuffle(upscale_factor=2)
     self.stride = stride
Example #18
0
 def test_pixelshuffle(self):
     arr = np.random.randn(2,4,224,224)
     check_equal(arr, jnn.PixelShuffle(upscale_factor=2), tnn.PixelShuffle(upscale_factor=2))
     arr = np.random.randn(1,3*3,224,224)
     check_equal(arr, jnn.PixelShuffle(upscale_factor=3), tnn.PixelShuffle(upscale_factor=3))
Example #19
0
 def __init__(self, in_c, factor=2):
     super(PixelShuffle, self).__init__()
     self.conv = nn.Conv2d(in_c, in_c * factor**2, 3, padding=1)
     self.out = nn.PixelShuffle(factor)
Example #20
0
 def __init__(self, in_channel, out_channel, upscale_factor, kernel=3, stride=1, padding=1):
     super(PixelShuffleBlock, self).__init__()
     self.conv1 = nn.Conv2d(in_channel, out_channel * upscale_factor ** 2, kernel, stride, padding)
     self.ps = nn.PixelShuffle(upscale_factor)
Example #21
0
    def __init__(self, channel_in):
        super(_up, self).__init__()

        self.conv = nn.PixelShuffle(2)
        self.relu = nn.PReLU()
Example #22
0
def pixelshuffle_block(in_channels, out_channels, upscale_factor=2, kernel_size=3, stride=1):
    conv = conv_layer(in_channels, out_channels * (upscale_factor ** 2), kernel_size, stride)
    pixel_shuffle = nn.PixelShuffle(upscale_factor)
    return sequential(conv, pixel_shuffle)
    def __init__(self,
                 scale=3,
                 num_of_ch_enc=16,
                 num_of_ch_dec=8,
                 num_of_res_blocks=4):
        super(SmallModel, self).__init__()

        self.conv_input = nn.Conv2d(in_channels=3,
                                    out_channels=num_of_ch_enc,
                                    kernel_size=3,
                                    stride=1,
                                    padding=1,
                                    bias=True)
        self.relu = nn.ReLU(inplace=False)
        self.sigmoid = nn.Sigmoid()

        self.conv_cubic1 = nn.Conv2d(in_channels=3,
                                     out_channels=num_of_ch_dec,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1,
                                     bias=True)
        self.conv_cubic2 = nn.Conv2d(in_channels=num_of_ch_dec,
                                     out_channels=1,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1,
                                     bias=True)

        self.residual1 = SmallBlock(num_of_ch_enc)
        self.residual2 = SmallBlock(num_of_ch_enc)
        self.residual3 = SmallBlock(num_of_ch_enc)
        self.residual4 = SmallBlock(num_of_ch_enc)

        self.conv_mid = nn.Conv2d(in_channels=num_of_ch_enc *
                                  (num_of_res_blocks + 1),
                                  out_channels=num_of_ch_dec,
                                  kernel_size=3,
                                  stride=1,
                                  padding=1,
                                  bias=True)

        if scale == 4:
            factor = 2
            self.upscale = nn.Sequential(
                nn.Conv2d(in_channels=num_of_ch_dec,
                          out_channels=num_of_ch_dec * factor * factor,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=True), nn.PixelShuffle(factor),
                nn.ReLU(inplace=True),
                nn.Conv2d(num_of_ch_dec,
                          num_of_ch_dec * factor * factor,
                          kernel_size=3,
                          padding=1,
                          stride=1,
                          bias=True), nn.PixelShuffle(factor),
                nn.ReLU(inplace=True))
        elif scale == 3:
            self.upscale = nn.Sequential(
                nn.Conv2d(in_channels=num_of_ch_dec,
                          out_channels=num_of_ch_dec * scale * scale,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=True), nn.PixelShuffle(scale),
                nn.ReLU(inplace=True))
        else:
            raise NotImplementedError

        self.conv_output = nn.Conv2d(in_channels=num_of_ch_dec,
                                     out_channels=3,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1,
                                     bias=True)
Example #24
0
    def __init__(self, args):
        super(MODEL, self).__init__()

        C_in = args.n_colors
        n_feats = args.n_feats
        block_feats = args.block_feats
        n_layers = args.n_layers
        scale = int(args.scale)
        use_hsigmoid = args.use_hsigmoid
        use_ca = args.use_ca
        #print('scale:{0}'.format(scale))
        res_s = args.res_scale

        self.rgb_mean = torch.autograd.Variable(
            torch.FloatTensor([0.4488, 0.4371, 0.4040])).view([1, 3, 1, 1])

        wn = lambda x: torch.nn.utils.weight_norm(x)

        # define head module

        head = []
        head.append(wn(nn.Conv2d(C_in, n_feats, 3, padding=3 // 2, bias=True)))

        # define body module
        body = []
        self.x_scale_list = nn.ModuleList()
        self.res_scale_list = nn.ModuleList()
        self.auxilary_scale_list = nn.ModuleList()
        for _ in range(n_layers):
            body.append(Block_a(n_feats, 3, block_feats, wn=wn))
            self.x_scale_list.append(Scale(res_s))
            self.res_scale_list.append(Scale(res_s))
            self.auxilary_scale_list.append(Scale(res_s))

        # define tail module
        out_feats = scale * scale * C_in
        tail = []
        tail.append(
            wn(nn.Conv2d(n_feats, out_feats, 3, padding=3 // 2, bias=True)))
        tail.append(nn.PixelShuffle(scale))

        # define skip module
        skip = []
        skip.append(
            wn(nn.Conv2d(C_in, out_feats, 3, padding=3 // 2, bias=True)))
        skip.append(nn.PixelShuffle(scale))

        # make object members
        self.head = nn.Sequential(*head)
        self.body = nn.Sequential(*body)
        self.tail = nn.Sequential(*tail)
        self.skip = nn.Sequential(*skip)

        # auxilary features
        self.fusion_conv_list = nn.ModuleList()
        for j in range(n_layers):
            if use_ca:
                tmp = nn.Sequential(*[
                    nn.Conv2d(
                        n_feats * (j + 1), n_feats, 1, padding=0, bias=False),
                    CALayer(n_feats, 1, use_hsigmoid=use_hsigmoid)
                ])
            else:
                tmp = nn.Sequential(*[
                    nn.Conv2d(
                        n_feats * (j + 1), n_feats, 1, padding=0, bias=False)
                ])
            self.fusion_conv_list.append(tmp)

        self.test_flops = False
Example #25
0
 def __init__(self, num_features, out_channels, kSize, scale):
     super(Upsampler, self).__init__()
     # Up-sampling net
     if scale == 2 or scale == 3:
         self.UPNet = nn.Sequential(*[
             nn.Conv2d(num_features,
                       num_features * scale * scale,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1),
             nn.PixelShuffle(scale),
             nn.Conv2d(num_features,
                       out_channels,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1)
         ])
     elif scale == 4:
         self.UPNet = nn.Sequential(*[
             nn.Conv2d(num_features,
                       num_features * 4,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1),
             nn.PixelShuffle(2),
             nn.Conv2d(num_features,
                       num_features * 4,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1),
             nn.PixelShuffle(2),
             nn.Conv2d(num_features,
                       out_channels,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1)
         ])
     elif scale == 8:
         self.UPNet = nn.Sequential(*[
             nn.Conv2d(num_features,
                       num_features * 4,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1),
             nn.PixelShuffle(2),
             nn.Conv2d(num_features,
                       num_features * 4,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1),
             nn.PixelShuffle(2),
             nn.Conv2d(num_features,
                       num_features * 4,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1),
             nn.PixelShuffle(2),
             nn.Conv2d(num_features,
                       out_channels,
                       kSize,
                       padding=(kSize - 1) // 2,
                       stride=1)
         ])
     else:
         raise ValueError("scale must be 2 or 3 or 4.")
Example #26
0
    def __init__(self):
        super(NetA, self).__init__()
        self.in_padding = NN.Sequential(NN.ReplicationPad2d((8, 8, 8, 8)))
        self.conv_in = NN.Sequential(NN.Conv2d(1, 32, 7, 1, 3),
                                     NN.GroupNorm(4, 32),
                                     NN.Conv2d(32, 32, 7, 1, 3),
                                     NN.GroupNorm(4, 32), NN.ELU(inplace=True))
        self.conv0 = NN.Sequential(
            NN.Conv2d(32, 64, 3, 2, 1),
            NN.GroupNorm(8, 64),
            NN.ELU(inplace=True),
        )
        self.conv1 = NN.Sequential(
            NN.Conv2d(64, 64, 3, 1, 1),
            NN.GroupNorm(8, 64),
            NN.ELU(inplace=True),
            NN.Conv2d(64, 128, 3, 2, 1),
            #NN.GroupNorm(8,128),
            NN.SELU(inplace=True),
        )

        self.conv2 = NN.Sequential(
            NN.Conv2d(128, 128, 3, 1, 1),
            NN.GroupNorm(8, 128),
            NN.ELU(inplace=True),
            NN.Conv2d(128, 128, 3, 1, 1),
            #NN.GroupNorm(8,128),
            NN.SELU(inplace=True),
        )

        self.conv3 = NN.Sequential(
            NN.Conv2d(128, 128, 3, 1, 1),
            NN.GroupNorm(8, 128),
            NN.ELU(inplace=True),
            NN.Conv2d(128, 256, 3, 2, 1),
            #NN.GroupNorm(16,256),
            NN.SELU(inplace=True),
        )
        self.conv4 = NN.Sequential(NN.Conv2d(256, 256, 3, 1, 1),
                                   NN.GroupNorm(16, 256), NN.ELU(inplace=True))
        self.conv_in_up = NN.Sequential(
            NN.Conv2d(32, 256, 1, 1, 0),
            #NN.GroupNorm(16, 256),
            NN.SELU(inplace=True))
        self.conv0_up = NN.Sequential(
            NN.Conv2d(64, 256, 1, 1, 0),
            #NN.GroupNorm(16, 256),
            NN.SELU(inplace=True))

        self.conv1_up = NN.Sequential(
            NN.Conv2d(128, 128, 1, 1, 0),
            #NN.GroupNorm(8, 128),
            NN.SELU(inplace=True))
        self.conv2_up = NN.Sequential(
            NN.Conv2d(128, 128, 1, 1, 0),
            #NN.GroupNorm(8, 128),
            NN.SELU(inplace=True))

        self.conv3_up = NN.Sequential(
            NN.Conv2d(256, 256, 1, 1, 0),
            #NN.GroupNorm(8, 64),
            NN.SELU(inplace=True))
        self.conv4_up = NN.Sequential(
            NN.Conv2d(256, 256, 1, 1, 0),
            NN.SELU(inplace=True)
            #NN.GroupNorm(16, 256),
            #NN.ELU(inplace=True)
        )

        self.convUp_0 = NN.Sequential(
            NN.Conv2d(256, 256, 1, 1, 0),
            NN.SELU(inplace=True),
            NN.PixelShuffle(2),
            NN.Conv2d(64, 128, 3, 1, 1),
            #NN.ReplicationPad2d((0, 1, 0, 1)),
            NN.SELU(inplace=True))
        self.convUp_1 = NN.Sequential(
            NN.Conv2d(128, 256, 1, 1, 0),
            NN.SELU(inplace=True),
            NN.PixelShuffle(2),
            NN.Conv2d(64, 256, 3, 1, 1),
            #NN.ReplicationPad2d((1, 0, 1, 0)),
            NN.SELU(inplace=True))
        self.convUp_2 = NN.Sequential(
            NN.Conv2d(256, 512, 1, 1, 0),
            NN.SELU(inplace=True),
            NN.PixelShuffle(2),
            NN.Conv2d(128, 256, 3, 1, 1),
            # NN.ReplicationPad2d((1, 0, 1, 0)),
            NN.SELU(inplace=True))

        self.res1 = NN.Sequential(NN.AvgPool2d(3, 2, 1),
                                  NN.Conv2d(64, 128, 1, 1, 0),
                                  NN.SELU(inplace=True))
        self.res2 = NN.Sequential(
            # Nothing is Everything
        )
        self.res3 = NN.Sequential(NN.AvgPool2d(3, 2, 1),
                                  NN.Conv2d(128, 256, 1, 1, 0),
                                  NN.SELU(inplace=True))
        self.res4 = NN.Sequential(
            # Nothing is Everything
        )

        self.end = NN.Sequential(NN.Conv2d(256, 64, 3, 1, 1),
                                 NN.GroupNorm(8, 64), NN.ELU(inplace=True),
                                 NN.Conv2d(64, 1, 3, 1, 1),
                                 NN.SELU(inplace=True))
        #self.end_ssim = NN.Sequential(
        #
        #)
        # self.output=NN.Sigmoid()
        self.criterion = NN.L1Loss()
        #self.criterion_psnr = NN.SmoothL1Loss()
        #self.criterion_ssim = NN.SmoothL1Loss()
        self.weightInit(self.conv_in)
        self.weightInit(self.conv0)
        self.weightInit(self.conv1)
        self.weightInit(self.conv2)
        self.weightInit(self.conv3)
        self.weightInit(self.conv4)
        self.weightInit(self.res1)
        self.weightInit(self.res2)
        self.weightInit(self.res3)
        self.weightInit(self.res4)
        self.weightInit(self.conv_in_up)
        self.weightInit(self.conv0_up)
        self.weightInit(self.conv1_up)
        self.weightInit(self.conv2_up)
        self.weightInit(self.conv3_up)
        self.weightInit(self.conv4_up)
        self.weightInit(self.convUp_0)
        self.weightInit(self.convUp_1)
        self.weightInit(self.convUp_2)
        self.weightInit(self.end)
Example #27
0
    def __init__(self, args):
        super(TSSN, self).__init__()
        r = args.scale[0]
        G0 = args.G0
        kSize = args.TSSNkSize
        
        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)
        self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)

        #conv layers, out channels
        C = 16 
        G = 64

        # Shallow feature extraction net
        self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
        self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)

        # multi-branch
        self.branch1 = nn.Sequential(*[
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C)
        ])
        self.branch2 = nn.Sequential(*[
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C),
            SRB(growRate0 = G0, growRate = G, nConvLayers = C)
        ])

        # Global Feature Fusion
        self.GFF = nn.Sequential(*[
            common.SELayer(2*G0),
            nn.Conv2d(2*G0, G0, kSize, padding=(kSize-1)//2, stride=1)
        ])

        # Up-sampling net
        if r == 2 or r == 3:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
                nn.PixelShuffle(r),
                nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
            ])
        elif r == 4:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
            ])
        elif r == 8:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
                nn.PixelShuffle(2),
                nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
            ])

        else:
            raise ValueError("scale must be 2 or 3 or 4.")

        self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
Example #28
0
    def __init__(self, bnd):
        super(VAEDecoder, self).__init__()

        self.dec_bin = nn.Sequential(
            nn.Conv2d(in_channels=bnd,
                      out_channels=512,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      dilation=1,
                      bias=True), nn.ReLU())

        self.decoder = nn.Sequential(

            # H/16 x W/16
            nn.Conv2d(in_channels=512,
                      out_channels=512,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      dilation=1,
                      bias=True),
            nn.ReLU(),

            # Depth-to-Space
            nn.PixelShuffle(upscale_factor=2),

            # H/8 x W/8
            nn.Conv2d(in_channels=128,
                      out_channels=512,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      dilation=1,
                      bias=True),
            nn.ReLU(),

            # Depth-to-Space
            nn.PixelShuffle(upscale_factor=2),

            # H/4x W/4
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      dilation=1,
                      bias=True),
            nn.ReLU(),

            # Depth-to-Space
            nn.PixelShuffle(upscale_factor=2),

            # H/2 x W/2
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      dilation=1,
                      bias=True),
            nn.ReLU(),

            # Depth-to-Space
            nn.PixelShuffle(upscale_factor=2),

            # H x W
            nn.Conv2d(in_channels=32,
                      out_channels=3,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      dilation=1,
                      bias=True),
            nn.Tanh())
Example #29
0
    def __init__(self,
                 num_in_ch=3,
                 num_out_ch=3,
                 num_feat=64,
                 num_frame=5,
                 deformable_groups=8,
                 num_extract_block=5,
                 num_reconstruct_block=10,
                 center_frame_idx=None,
                 hr_in=False,
                 with_predeblur=False,
                 with_tsa=True,
                 scale=4,
                 do_twice=False):
        super(EDVR, self).__init__()
        if center_frame_idx is None:
            self.center_frame_idx = num_frame // 2
        else:
            self.center_frame_idx = center_frame_idx
        self.hr_in = hr_in
        self.with_predeblur = with_predeblur
        self.with_tsa = with_tsa
        self.scale = scale
        self.do_twice = do_twice

        # extract features for each frame
        if self.with_predeblur:
            self.predeblur = PredeblurModule(num_feat=num_feat,
                                             hr_in=self.hr_in)
            self.conv_1x1 = nn.Conv2d(num_feat, num_feat, 1, 1)
        else:
            self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)

        # extrat pyramid features
        self.feature_extraction = make_layer(ResidualBlockNoBN,
                                             num_extract_block,
                                             num_feat=num_feat)
        self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
        self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
        self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)

        # pcd and tsa module
        self.pcd_align = PCDAlignment(num_feat=num_feat,
                                      deformable_groups=deformable_groups)
        if self.with_tsa:
            self.fusion = TSAFusion(num_feat=num_feat,
                                    num_frame=num_frame,
                                    center_frame_idx=self.center_frame_idx)
        else:
            self.fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)

        # reconstruction
        self.reconstruction = make_layer(ResidualBlockNoBN,
                                         num_reconstruct_block,
                                         num_feat=num_feat)
        # upsample
        self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
        if self.scale == 4:
            self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1)
        elif self.scale == 8:
            self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
            self.upconv3 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1)
        elif self.scale == 16:
            self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
            self.upconv3 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
            self.upconv4 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1)
        self.pixel_shuffle = nn.PixelShuffle(2)
        self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
        self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
Example #30
0
    def __init__(self, args):
        super(DSADCSR, self).__init__()
        self.args = args
        self.n_resblocks = n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        block_feats = args.block_feats
        kernel_size = 3
        #if args.act == 'relu' or 'RELU' or 'Relu':
        #    act = nn.ReLU(True)
        #elif args.act == 'leakyrelu' or 'Leakyrelu' or 'LeakyReLU' :
        #    act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
        
        if args.act in ['relu','RELU','Relu']:
            act = nn.ReLU(True)
        elif args.act in ['leakyrelu','Leakyrelu','LeakyReLU'] :
            act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
        print(act)
        
        weight_init = 0.1
        
        scale = args.scale[0] #8,16
        conv=common.default_conv

        wn = lambda x: torch.nn.utils.weight_norm(x)

        self.rgb_mean = torch.autograd.Variable(torch.FloatTensor(
            [args.r_mean, args.g_mean, args.b_mean])).view([1, 3, 1, 1])

        # Shallow feature extraction net
        m_head = [conv(args.n_colors, n_feats, kernel_size)]
        # Redidual dense blocks and dense feature fusion
        self.WRDBs = nn.ModuleList()
        for _ in range(n_resblocks):
            self.WRDBs.append(
                WDN_B(n_feats, kernel_size, block_feats, wn, alpha=args.alpha,beta=args.beta, act=act,weight_init=weight_init)
            )

        self.GFF = nn.Sequential(*[
            wn(nn.Conv2d(n_resblocks * n_feats, n_feats, 1, padding=0, stride=1),),
            wn(nn.Conv2d(n_feats, n_feats, kernel_size, padding=(kernel_size-1)//2, stride=1))
        ])


        tail = FFSC(args, scale, n_feats, kernel_size, wn,weight_init=weight_init)

        skip = []
        if scale == 16:
            for _ in range(int(math.log(scale, 4))):
                    skip.append(
                        wn(nn.Conv2d(args.n_colors, args.n_colors * 16, 5, padding=5//2)))
                    skip.append(nn.PixelShuffle(4))
                    
        elif scale == 8:
            skip.append(wn(nn.Conv2d(args.n_colors, args.n_colors * 16, 5, padding=5//2)))
            skip.append(nn.PixelShuffle(4))
            skip.append(wn(nn.Conv2d(args.n_colors, args.n_colors * 4, 5, padding=5//2)))
            skip.append(nn.PixelShuffle(2))


        self.head = nn.Sequential(*m_head)
        self.tail = tail
        # self.tail = nn.Sequential(*tail)
        self.skip = nn.Sequential(*skip)

        a01,a12,a23,a34 = args.alpha,args.alpha,args.alpha,args.alpha
        b01,b12,b23,b34 = args.beta,args.beta,args.beta,args.beta
        self.A01 = Scale(a01)
        self.A12 = Scale(a12)
        self.A23 = Scale(a23)
        self.A34 = Scale(a34)
        self.A = [self.A01,self.A12,self.A23,self.A34]
        self.B01 = Scale(b01)
        self.B12 = Scale(b12)
        self.B23 = Scale(b23)
        self.B34 = Scale(b34)
        self.B = [self.B01,self.B12,self.B23,self.B34]