示例#1
0
    def __init__(self,
                 num_in_ch=3,
                 num_out_ch=3,
                 num_feat=64,
                 num_block=16,
                 upscale=4):
        super(MSRResNet, self).__init__()
        self.upscale = upscale

        self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
        self.body = arch_util.make_layer(
            arch_util.ResidualBlockNoBN, num_block, num_feat=num_feat)

        # upsampling
        if self.upscale in [2, 3]:
            self.upconv1 = nn.Conv2d(num_feat,
                                     num_feat * self.upscale * self.upscale, 3,
                                     1, 1)
            self.pixel_shuffle = nn.PixelShuffle(self.upscale)
        elif self.upscale == 4:
            self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
            self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
            self.pixel_shuffle = nn.PixelShuffle(2)

        self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        # initialization
        arch_util.default_init_weights(
            [self.conv_first, self.upconv1, self.conv_hr, self.conv_last], 0.1)
        if self.upscale == 4:
            arch_util.default_init_weights(self.upconv2, 0.1)
示例#2
0
    def __init__(self, num_feat=64, num_grow_ch=32):
        super(ResidualDenseBlock, self).__init__()
        self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
        self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
        self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1,
                               1)
        self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1,
                               1)
        self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)

        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

        # initialization
        default_init_weights(
            [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
示例#3
0
    def __init__(self,
                 num_in_ch=3,
                 num_out_ch=3,
                 num_feat=64,
                 num_block=16,
                 upscale=4):
        super(MSRResNet_WT_Pixel, self).__init__()
        self.upscale = upscale

        self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
        self.body = arch_util.make_layer(
            arch_util.ResidualBlockNoBN, num_block, num_feat=num_feat)

        # upsampling
        if self.upscale in [2, 3]:
            self.upconv1 = nn.Conv2d(num_feat,
                                     num_feat * self.upscale * self.upscale, 3,
                                     1, 1)
            self.pixel_shuffle = nn.PixelShuffle(self.upscale)
        elif self.upscale == 4:
            self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
            self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
            self.pixel_shuffle = nn.PixelShuffle(2)

        self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
        self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        # initialization
        arch_util.default_init_weights(
            [self.conv_first, self.upconv1, self.conv_hr, self.conv_last], 0.1)
        if self.upscale == 4:
            arch_util.default_init_weights(self.upconv2, 0.1)

        # WT filter
        inv_filters = arch_util.create_inv_filters()
        self.register_buffer('inv_filters', inv_filters)

        # Normalization buffers
        self.register_buffer(
            'shift',
            torch.Tensor([3.0]))
        self.register_buffer(
            'scale',
                torch.Tensor([10.0]))
示例#4
0
    def __init__(
        self,
        num_in_ch=3,
        num_out_ch=3,
        num_feat=64,
        num_block=16,
    ):
        super().__init__()

        self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
        self.body = arch_util.make_layer(arch_util.ResidualBlockNoBN,
                                         num_block,
                                         num_feat=num_feat)

        self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)

        # activation function
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        # initialization
        arch_util.default_init_weights([self.conv_first, self.conv_last], 0.1)
示例#5
0
    def __init__(self, num_feat=64, num_grow_ch=32):
        super(LowrankResidualDenseBlock, self).__init__()
        #self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
        self.conv1_u = nn.Conv2d(num_feat, int(num_grow_ch / 4), 3, 1, 1)
        self.conv1_v = nn.Conv2d(int(num_grow_ch / 4), num_grow_ch, 1, 1)

        #self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
        self.conv2_u = nn.Conv2d(num_feat + num_grow_ch, int(num_grow_ch / 4),
                                 3, 1, 1)
        self.conv2_v = nn.Conv2d(int(num_grow_ch / 4), num_grow_ch, 1, 1)

        #self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1,
        #                       1)
        self.conv3_u = nn.Conv2d(num_feat + 2 * num_grow_ch,
                                 int(num_grow_ch / 4), 3, 1, 1)
        self.conv3_v = nn.Conv2d(int(num_grow_ch / 4), num_grow_ch, 1, 1)

        #self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1,
        #                       1)
        self.conv4_u = nn.Conv2d(num_feat + 3 * num_grow_ch,
                                 int(num_grow_ch / 4), 3, 1, 1)
        self.conv4_v = nn.Conv2d(int(num_grow_ch / 4), num_grow_ch, 1, 1)

        #self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
        self.conv5_u = nn.Conv2d(num_feat + 4 * num_grow_ch, int(num_feat / 4),
                                 3, 1, 1)
        self.conv5_v = nn.Conv2d(int(num_feat / 4), num_feat, 1, 1)

        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

        # initialization
        #default_init_weights(
        #    [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
        default_init_weights([
            self.conv1_u, self.conv1_v, self.conv2_u, self.conv2_v,
            self.conv3_u, self.conv3_v, self.conv4_u, self.conv4_v,
            self.conv5_u, self.conv5_v
        ], 0.1)