Beispiel #1
0
    def __init__(self, in_nc=3, out_nc=3, nc=64, nb=17, act_mode='BR'):
        """
        # ------------------------------------
        in_nc: channel number of input
        out_nc: channel number of output
        nc: channel number
        nb: total number of conv layers
        act_mode: batch norm + activation function; 'BR' means BN+ReLU.
        # ------------------------------------
        Batch normalization and residual learning are
        beneficial to Gaussian denoising (especially
        for a single noise level).
        The residual of a noisy image corrupted by additive white
        Gaussian noise (AWGN) follows a constant
        Gaussian distribution which stablizes batch
        normalization during training.
        # ------------------------------------
        """
        super(DnCNN, self).__init__()
        assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
        bias = True

        m_head = B.conv(in_nc, nc, mode='C' + act_mode[-1], bias=bias)
        m_body = [
            B.conv(nc, nc, mode='C' + act_mode, bias=bias)
            for _ in range(nb - 2)
        ]
        m_tail = B.conv(nc, out_nc, mode='C', bias=bias)

        self.model = B.sequential(m_head, *m_body, m_tail)
Beispiel #2
0
    def __init__(self, in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R'):
        """
        # ------------------------------------
        in_nc: channel number of input
        out_nc: channel number of output
        nc: channel number
        nb: total number of conv layers
        act_mode: batch norm + activation function; 'BR' means BN+ReLU.
        # ------------------------------------
        # ------------------------------------
        """
        super(FFDNet, self).__init__()
        assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
        bias = True
        sf = 2

        self.m_down = B.PixelUnShuffle(upscale_factor=sf)

        m_head = B.conv(in_nc * sf * sf + 1,
                        nc,
                        mode='C' + act_mode[-1],
                        bias=bias)
        m_body = [
            B.conv(nc, nc, mode='C' + act_mode, bias=bias)
            for _ in range(nb - 2)
        ]
        m_tail = B.conv(nc, out_nc * sf * sf, mode='C', bias=bias)

        self.model = B.sequential(m_head, *m_body, m_tail)

        self.m_up = nn.PixelShuffle(upscale_factor=sf)
Beispiel #3
0
    def __init__(self,
                 in_nc=3,
                 out_nc=3,
                 nc=64,
                 nb=23,
                 gc=32,
                 upscale=4,
                 act_mode='L',
                 upsample_mode='upconv'):
        super(RRDB, self).__init__()
        assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'

        n_upscale = int(math.log(upscale, 2))
        if upscale == 3:
            n_upscale = 1

        m_head = B.conv(in_nc, nc, mode='C')

        m_body = [B.RRDB(nc, gc=32, mode='C' + act_mode) for _ in range(nb)]
        m_body.append(B.conv(nc, nc, mode='C'))

        if upsample_mode == 'upconv':
            upsample_block = B.upsample_upconv
        elif upsample_mode == 'pixelshuffle':
            upsample_block = B.upsample_pixelshuffle
        elif upsample_mode == 'convtranspose':
            upsample_block = B.upsample_convtranspose
        else:
            raise NotImplementedError(
                'upsample mode [{:s}] is not found'.format(upsample_mode))

        if upscale == 3:
            m_uper = upsample_block(nc, nc, mode='3' + act_mode)
        else:
            m_uper = [
                upsample_block(nc, nc, mode='2' + act_mode)
                for _ in range(n_upscale)
            ]

        H_conv0 = B.conv(nc, nc, mode='C' + act_mode)
        H_conv1 = B.conv(nc, out_nc, mode='C')
        m_tail = B.sequential(H_conv0, H_conv1)

        self.model = B.sequential(m_head,
                                  B.ShortcutBlock(B.sequential(*m_body)),
                                  *m_uper, m_tail)
Beispiel #4
0
 def __init__(self,
              in_nc=3,
              out_nc=3,
              nc=64,
              nb=8,
              upscale=4,
              act_mode='L',
              upsample_mode='pixelshuffle',
              negative_slope=0.05):
     """
     in_nc: channel number of input
     out_nc: channel number of output
     nc: channel number
     nb: number of residual blocks
     upscale: up-scale factor
     act_mode: activation function
     upsample_mode: 'upconv' | 'pixelshuffle' | 'convtranspose'
     """
     super(IMDN, self).__init__()
     assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
     m_head = B.conv(in_nc, nc, mode='C')
     m_body = [
         B.IMDBlock(nc,
                    nc,
                    mode='C' + act_mode,
                    negative_slope=negative_slope) for _ in range(nb)
     ]
     m_body.append(B.conv(nc, nc, mode='C'))
     if upsample_mode == 'upconv':
         upsample_block = B.upsample_upconv
     elif upsample_mode == 'pixelshuffle':
         upsample_block = B.upsample_pixelshuffle
     elif upsample_mode == 'convtranspose':
         upsample_block = B.upsample_convtranspose
     else:
         raise NotImplementedError(
             'upsample mode [{:s}] is not found'.format(upsample_mode))
     m_uper = upsample_block(nc, out_nc, mode=str(upscale))
     self.model = B.sequential(m_head,
                               B.ShortcutBlock(B.sequential(*m_body)),
                               *m_uper)
Beispiel #5
0
    def __init__(self,
                 in_nc=3,
                 out_nc=3,
                 nc=[64, 128, 256, 512],
                 nb=2,
                 act_mode='R',
                 downsample_mode='strideconv',
                 upsample_mode='convtranspose'):
        super(ResUNet, self).__init__()

        self.m_head = B.conv(in_nc, nc[0], bias=False, mode='C')

        # downsample
        if downsample_mode == 'avgpool':
            downsample_block = B.downsample_avgpool
        elif downsample_mode == 'maxpool':
            downsample_block = B.downsample_maxpool
        elif downsample_mode == 'strideconv':
            downsample_block = B.downsample_strideconv
        else:
            raise NotImplementedError(
                'downsample mode [{:s}] is not found'.format(downsample_mode))

        self.m_down1 = B.sequential(
            *[
                B.ResBlock(nc[0], nc[0], bias=False, mode='C' + act_mode + 'C')
                for _ in range(nb)
            ], downsample_block(nc[0], nc[1], bias=False, mode='2'))
        self.m_down2 = B.sequential(
            *[
                B.ResBlock(nc[1], nc[1], bias=False, mode='C' + act_mode + 'C')
                for _ in range(nb)
            ], downsample_block(nc[1], nc[2], bias=False, mode='2'))
        self.m_down3 = B.sequential(
            *[
                B.ResBlock(nc[2], nc[2], bias=False, mode='C' + act_mode + 'C')
                for _ in range(nb)
            ], downsample_block(nc[2], nc[3], bias=False, mode='2'))

        self.m_body = B.sequential(*[
            B.ResBlock(nc[3], nc[3], bias=False, mode='C' + act_mode + 'C')
            for _ in range(nb)
        ])

        # upsample
        if upsample_mode == 'upconv':
            upsample_block = B.upsample_upconv
        elif upsample_mode == 'pixelshuffle':
            upsample_block = B.upsample_pixelshuffle
        elif upsample_mode == 'convtranspose':
            upsample_block = B.upsample_convtranspose
        else:
            raise NotImplementedError(
                'upsample mode [{:s}] is not found'.format(upsample_mode))

        self.m_up3 = B.sequential(
            upsample_block(nc[3], nc[2], bias=False, mode='2'), *[
                B.ResBlock(nc[2], nc[2], bias=False, mode='C' + act_mode + 'C')
                for _ in range(nb)
            ])
        self.m_up2 = B.sequential(
            upsample_block(nc[2], nc[1], bias=False, mode='2'), *[
                B.ResBlock(nc[1], nc[1], bias=False, mode='C' + act_mode + 'C')
                for _ in range(nb)
            ])
        self.m_up1 = B.sequential(
            upsample_block(nc[1], nc[0], bias=False, mode='2'), *[
                B.ResBlock(nc[0], nc[0], bias=False, mode='C' + act_mode + 'C')
                for _ in range(nb)
            ])

        self.m_tail = B.conv(nc[0], out_nc, bias=False, mode='C')