Example #1
0
    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):
        super(SRResNet, self).__init__()
        self.upscale = upscale

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
        basic_block = functools.partial(arch_util.ResidualBlock_noBN, nf=nf)
        self.recon_trunk = arch_util.make_layer(basic_block, nb)
        self.LRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)

        # upsampling
        if self.upscale == 2:
            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(2)
        elif self.upscale == 3:
            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(3)
        elif self.upscale == 4:
            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(2)

        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)

        # activation function
        self.relu = nn.ReLU(inplace=True)

        # initialization
        arch_util.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last],
                                     0.1)
        if self.upscale == 4:
            arch_util.initialize_weights(self.upconv2, 0.1)
Example #2
0
    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, adafm_ksize=None):
        super(AdaFMNet, self).__init__()

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 2, 1, bias=True)

        if adafm_ksize is not None:
            basic_block = functools.partial(ResidualBlock_adafm,
                                            nf=nf,
                                            adafm_ksize=adafm_ksize)
        else:
            basic_block = functools.partial(arch_util.ResidualBlock_noBN,
                                            nf=nf)
        self.recon_trunk = arch_util.make_layer(basic_block, nb)

        self.LR_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.adafm = nn.Conv2d(nf,
                               nf,
                               kernel_size=adafm_ksize,
                               padding=(adafm_ksize - 1) // 2,
                               groups=nf,
                               bias=True)

        self.upconv = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
        self.pixel_shuffle = nn.PixelShuffle(2)

        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)

        arch_util.initialize_weights(
            [self.conv_first, self.upconv, self.HRconv, self.conv_last], 0.1)
        arch_util.initialize_weights([self.adafm], 0.)

        self.relu = nn.ReLU(inplace=True)
Example #3
0
    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=32, cond_dim=2):
        super(CResMDNet, self).__init__()

        # condition mapping
        self.global_scale = nn.Linear(cond_dim, out_nc, bias=True)

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 2, 1, bias=True)

        basic_block = functools.partial(ResidualBlock_CRes,
                                        nf=nf,
                                        cond_dim=cond_dim)
        self.recon_trunk = arch_util.make_layer(basic_block, nb)

        self.upconv = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
        self.pixel_shuffle = nn.PixelShuffle(2)

        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)

        self.act = nn.ReLU(inplace=True)

        # initialization
        arch_util.initialize_weights(
            [self.conv_first, self.upconv, self.HRconv, self.conv_last], 0.1)
        arch_util.initialize_weights([self.global_scale], 0.1)
Example #4
0
    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4, n_condition=32):
        super(MSRResNet_withSFT, self).__init__()
        self.upscale = upscale

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
        basic_block = functools.partial(arch_util.ResidualBlock_noBN, nf=nf)
        self.recon_trunk = arch_util.make_layer(basic_block, nb)

        # upsampling
        if self.upscale == 2:
            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(2)
        elif self.upscale == 3:
            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(3)
        elif self.upscale == 4:
            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(2)

        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        # initialization
        arch_util.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last],
                                     0.1)
        if self.upscale == 4:
            arch_util.initialize_weights(self.upconv2, 0.1)

        self.fc1 = nn.Linear(n_condition, 400)
        self.fc2 = nn.Linear(400, 400)
        self.fc3 = nn.Linear(400, n_condition)
Example #5
0
    def __init__(self, nf=64):
        super(ResidualBlock_noBN_PA, self).__init__()
        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.pa = PA(nf)

        # initialization
        arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
Example #6
0
    def __init__(self, nf=64, cond_dim=2):
        super(ResidualBlock_CRes, self).__init__()
        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.act = nn.ReLU(inplace=True)

        self.local_scale = nn.Linear(cond_dim, nf, bias=True)

        # initialization
        arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
        arch_util.initialize_weights([self.local_scale], 0.1)
Example #7
0
    def __init__(self):
        super(Classifier, self).__init__()
        self.lastOut = nn.Linear(32, 3)

        # Condtion network
        self.CondNet = nn.Sequential(nn.Conv2d(3, 128, 4, 4), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 32, 1))
        arch_util.initialize_weights([self.CondNet], 0.1)
Example #8
0
    def __init__(self, nf=64, gc=32, bias=True):
        super(ResidualDenseBlock_5C, self).__init__()
        # gc: growth channel, i.e. intermediate channels
        self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
        self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
        self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)
        self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)
        self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)
        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

        # initialization
        arch_util.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5],
                                     0.1)
Example #9
0
    def __init__(self,
                 n_resgroups=10,
                 n_resblocks=20,
                 n_feats=64,
                 res_scale=1,
                 n_colors=3,
                 rgb_range=1,
                 scale=4,
                 reduction=16,
                 conv=arch_util.default_conv):
        super(RCAN_PA, self).__init__()

        n_resgroups = n_resgroups
        n_resblocks = n_resblocks
        n_feats = n_feats
        kernel_size = 3
        reduction = reduction
        scale = scale
        act = nn.ReLU(True)

        # RGB mean for DIV2K
        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)
        self.sub_mean = arch_util.MeanShift(rgb_range, rgb_mean, rgb_std)

        # define head module
        modules_head = [conv(n_colors, n_feats, kernel_size)]

        # define body module
        modules_body = [
            ResidualGroup(
                conv, n_feats, kernel_size, reduction, act=act, res_scale=res_scale, n_resblocks=n_resblocks) \
            for _ in range(n_resgroups)]

        modules_body.append(conv(n_feats, n_feats, kernel_size))

        # define tail module
        modules_tail = [
            Upsampler(conv, scale, n_feats, act=False),
            conv(n_feats, n_colors, kernel_size)
        ]

        self.add_mean = arch_util.MeanShift(rgb_range, rgb_mean, rgb_std, 1)

        self.head = nn.Sequential(*modules_head)
        self.body = nn.Sequential(*modules_body)
        self.tail = nn.Sequential(*modules_tail)

        arch_util.initialize_weights([self.head, self.body, self.tail], 0.1)
Example #10
0
    def __init__(self, nf=64):
        super(ResidualBlock_noBN_CA, self).__init__()
        reduction_ratio = 16
        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv3 = nn.Conv2d(2 * nf, nf, 1, 1, 1, bias=True)
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(nf, nf // reduction_ratio, bias=False),
            nn.LeakyReLU(negative_slope=0.1, inplace=True),
            nn.Linear(nf // reduction_ratio, nf, bias=False), nn.Sigmoid())
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        # initialization: residual learning
        arch_util.initialize_weights([self.conv1, self.conv2, self.conv3], 0.1)
        arch_util.initialize_weights([self.fc], 0.1)
Example #11
0
    def __init__(self, input_channels, upscale, d=64, s=12, m=4):
        super(FSRCNN_net, self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(in_channels=input_channels,
                      out_channels=d,
                      kernel_size=5,
                      stride=1,
                      padding=2), nn.PReLU())

        self.layers = []
        self.layers.append(
            nn.Sequential(
                nn.Conv2d(in_channels=d,
                          out_channels=s,
                          kernel_size=1,
                          stride=1,
                          padding=0), nn.PReLU()))
        for _ in range(m):
            self.layers.append(
                nn.Conv2d(in_channels=s,
                          out_channels=s,
                          kernel_size=3,
                          stride=1,
                          padding=1))
        self.layers.append(nn.PReLU())
        self.layers.append(
            nn.Sequential(
                nn.Conv2d(in_channels=s,
                          out_channels=d,
                          kernel_size=1,
                          stride=1,
                          padding=0), nn.PReLU()))

        self.body_conv = torch.nn.Sequential(*self.layers)

        # Deconvolution
        self.tail_conv = nn.ConvTranspose2d(in_channels=d,
                                            out_channels=input_channels,
                                            kernel_size=9,
                                            stride=upscale,
                                            padding=3,
                                            output_padding=1)

        arch_util.initialize_weights(
            [self.head_conv, self.body_conv, self.tail_conv], 0.1)
Example #12
0
    def __init__(self, in_nc=3, out_nc=3, nf=64, nb=32):
        super(BaseNet, self).__init__()

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 2, 1, bias=True)

        basic_block = functools.partial(arch_util.ResidualBlock_noBN, nf=nf)
        self.recon_trunk = arch_util.make_layer(basic_block, nb)

        self.upconv = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
        self.pixel_shuffle = nn.PixelShuffle(2)

        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)

        self.act = nn.ReLU(inplace=True)

        # initialization
        arch_util.initialize_weights(
            [self.conv_first, self.upconv, self.HRconv, self.conv_last], 0.1)
Example #13
0
    def __init__(self, nf=64, adafm_ksize=1):
        super(ResidualBlock_adafm, self).__init__()
        padding = (adafm_ksize - 1) // 2
        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.adafm1 = nn.Conv2d(nf,
                                nf,
                                kernel_size=adafm_ksize,
                                padding=padding,
                                groups=nf,
                                bias=True)
        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.adafm2 = nn.Conv2d(nf,
                                nf,
                                kernel_size=adafm_ksize,
                                padding=padding,
                                groups=nf,
                                bias=True)

        # initialization
        arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
        arch_util.initialize_weights([self.adafm1, self.adafm2], 0.)
Example #14
0
    def __init__(self, nf=64, bias=True):
        super(ResidualDenseBlock, self).__init__()
        self.dwconv1 = nn.Conv2d(in_channels=nf // 2,
                                 out_channels=nf // 2,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1,
                                 dilation=1,
                                 groups=nf // 2,
                                 bias=True)
        self.pwconv1 = nn.Conv2d(in_channels=nf,
                                 out_channels=nf,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0,
                                 dilation=1,
                                 groups=1,
                                 bias=True)
        self.dwconv2 = nn.Conv2d(in_channels=nf * 2,
                                 out_channels=nf * 2,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1,
                                 dilation=1,
                                 groups=nf * 2,
                                 bias=True)
        self.pwconv2 = nn.Conv2d(in_channels=nf * 4,
                                 out_channels=nf // 2,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0,
                                 dilation=1,
                                 groups=1,
                                 bias=True)

        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

        # initialization
        arch_util.initialize_weights(
            [self.dwconv1, self.pwconv1, self.dwconv2, self.pwconv2], 0.1)
Example #15
0
    def __init__(self, use_sigmoid):
        super(ProxIQANet, self).__init__()

        conv1 = [
            nn.Conv2d(6, 16, 5, 1, 2),
            nn.ReLU(),
            nn.MaxPool2d(2)
        ]
        self.conv1 = nn.Sequential(*conv1)

        conv2 = [
            nn.Conv2d(16, 32, 5, 1, 2),
            nn.ReLU(),
            nn.MaxPool2d(2)
        ]
        self.conv2 = nn.Sequential(*conv2)

        conv3 = [
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.ReLU(),
            nn.MaxPool2d(2)
        ]
        self.conv3 = nn.Sequential(*conv3)

        conv4 = [
            nn.Conv2d(64, 128, 5, 1, 2),
            nn.ReLU(),
            nn.MaxPool2d(2)
        ]
        self.conv4 = nn.Sequential(*conv4)

        self.fc = nn.Linear(32768, 1)

        self.use_sigmoid = use_sigmoid
        if self.use_sigmoid:
            self.sigmoid = nn.Sigmoid()

        # initialization
        arch_util.initialize_weights([self.conv1, self.conv2, self.conv3, self.fc], 0.1)
    def __init__(self, in_nc=3, out_nc=3, nf=64, upscale=4):
        super(Coarse_Fine_RCANNet, self).__init__()
        self.upscale = upscale

        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
        self.conv_second = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv_third = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        basic_block = functools.partial(network_block.ResidualBlock_noBN_CA,
                                        nf=nf)

        self.recon_trunk1 = arch_util.make_layer(basic_block, 5)
        self.recon_trunk2 = arch_util.make_layer(basic_block, 5)
        self.recon_trunk3 = arch_util.make_layer(basic_block, 6)

        # upsampling
        if self.upscale == 2:
            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(2)
        elif self.upscale == 3:
            self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(3)
        elif self.upscale == 4:
            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(2)
        elif self.upscale == 8:
            self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.upconv3 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
            self.pixel_shuffle = nn.PixelShuffle(2)

        self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        # initialization
        arch_util.initialize_weights([
            self.conv_first, self.conv_second, self.conv_third, self.upconv1,
            self.HRconv, self.conv_last
        ], 0.1)
        if self.upscale == 4:
            arch_util.initialize_weights(self.upconv2, 0.1)
        if self.upscale == 8:
            arch_util.initialize_weights(self.upconv3, 0.1)