Esempio n. 1
0
    def __init__(self,
                 cl=32,
                 cm=32,
                 ch=16,
                 nframes = 7,
                 input_nc = 3,
                 output_nc = 3,
                 upscale_factor=4):
        super(RBPN, self).__init__()
        self.nframes = nframes
        self.upscale_factor = upscale_factor
        #Initial Feature Extraction
        self.conv1 = M.Sequential(
            M.Conv2d(input_nc, cl, kernel_size=3, stride=1, padding=1),
            M.PReLU(),
        )
        self.conv2 = M.Sequential(
            M.Conv2d(input_nc*2+0, cm, kernel_size=3, stride=1, padding=1),
            M.PReLU(),
        )
        # projection module
        self.Projection = Projection_Module(cl, cm, ch)

        # reconstruction module
        self.reconstruction = M.Conv2d((self.nframes-1)*ch, output_nc, kernel_size=3, stride=1, padding=1)
Esempio n. 2
0
    def __init__(self,
                 in_channels=3,
                 out_channels=3,
                 d=56,
                 s=12,
                 upscale_factor=4):
        super(FSRCNN, self).__init__()

        l = []
        l.append(
            M.Sequential(Conv2d(in_channels, d, 5, 1, 2),
                         M.PReLU(num_parameters=1, init=0.25)))
        l.append(
            M.Sequential(Conv2d(d, s, 1, 1, 0),
                         M.PReLU(num_parameters=1, init=0.25)))
        for i in range(4):
            l.append(
                M.Sequential(Conv2d(s, s, 3, 1, 1),
                             M.PReLU(num_parameters=1, init=0.25)))
        l.append(
            M.Sequential(Conv2d(s, d, 1, 1, 0),
                         M.PReLU(num_parameters=1, init=0.25)))
        l.append(ConvTranspose2d(d, out_channels, 8, upscale_factor,
                                 padding=2))
        self.convs = M.Sequential(*l)
Esempio n. 3
0
 def __init__(self, num_channels, filter_size, stride, padding):
     super(UPU, self).__init__()
     self.deconv1 = M.Sequential(
         ConvTranspose2d(num_channels, num_channels, filter_size, stride,
                         padding), M.PReLU(num_parameters=1, init=0.25))
     self.conv1 = M.Sequential(
         Conv2d(num_channels, num_channels, filter_size, stride, padding),
         M.PReLU(num_parameters=1, init=0.25))
     self.deconv2 = M.Sequential(
         ConvTranspose2d(num_channels, num_channels, filter_size, stride,
                         padding), M.PReLU(num_parameters=1, init=0.25))
Esempio n. 4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 activation='prelu'):
        super(ResBlock, self).__init__()

        if activation == 'relu':
            self.act = M.ReLU()
        elif activation == 'prelu':
            self.act = M.PReLU(num_parameters=1, init=0.25)
        else:
            raise NotImplementedError("not implemented activation")

        m = []
        m.append(
            M.Conv2d(in_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=1,
                     padding=(kernel_size // 2)))
        m.append(self.act)
        m.append(
            M.Conv2d(out_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=1,
                     padding=(kernel_size // 2)))
        self.body = M.Sequential(*m)
Esempio n. 5
0
 def __init__(self, ch, cl):
     super(Decoder, self).__init__()
     self.model = M.Sequential(
         ResBlocks(channel_num=ch, resblock_num=5, kernel_size=3),
         M.Conv2d(ch, cl, kernel_size=8, stride=4, padding=2),
         M.PReLU(),
     )
Esempio n. 6
0
 def __init__(self, cm, ch):
     super(MISR_Block, self).__init__()
     self.model = M.Sequential(
         ResBlocks(channel_num=cm, resblock_num=5, kernel_size=3),
         M.ConvTranspose2d(cm, ch, kernel_size=8, stride=4, padding=2),
         M.PReLU(),
     )
Esempio n. 7
0
 def __init__(self, ch):
     super(Residual_Blocks, self).__init__()
     self.model = M.Sequential(
         ResBlocks(channel_num=ch, resblock_num=5, kernel_size=3),
         M.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1),
         M.PReLU(),
     )
Esempio n. 8
0
    def __init__(self, in_ch, out_ch, stride=1, r_lim=7, K=4):
        """
        Implementation of the Extremely Efficient Spatial Pyramid module introduced in
        "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network"
        <https://arxiv.org/pdf/1811.11431.pdf>
        Parameters
        ----------
        in_ch (int): number of channels for input
        out_ch (int): number of channels for output
        stride (int): stride of the convs
        r_lim (int): A maximum value of receptive field allowed for EESP block
        K (int): number of parallel branches
        """
        super(EESP, self).__init__()
        hidden_ch = int(out_ch // K)
        hidden_ch1 = out_ch - hidden_ch * (K - 1)
        assert hidden_ch1 == hidden_ch, \
            "hidden size of n={} must equal to hidden size of n1={}".format(hidden_ch, hidden_ch1)
        self.g_conv1 = Conv2d(in_ch,
                              hidden_ch,
                              1,
                              stride=1,
                              groups=K,
                              activation=M.PReLU(hidden_ch))

        self.spp_convs = []
        for i in range(K):
            ksize = int(3 + i * 2)
            dilation = int((ksize - 1) / 2) if ksize <= r_lim else 1
            self.spp_convs.append(
                M.Conv2d(hidden_ch,
                         hidden_ch,
                         3,
                         stride=stride,
                         padding=dilation,
                         dilation=dilation,
                         groups=hidden_ch,
                         bias=False))

        self.conv_concat = Conv2d(out_ch, out_ch, groups=K, activation=None)
        self.bn_pr = M.Sequential(M.BatchNorm2d(out_ch), M.PReLU(out_ch))
        self.module_act = M.PReLU(out_ch)
        self.K = K
        self.stride = stride
Esempio n. 9
0
 def __init__(self, cl, ch):
     super(SISR_Block, self).__init__()
     self.num_stages = 3
     self.pre_deal = M.Conv2d(cl, ch, kernel_size=1, stride=1, padding=0)
     self.prelu = M.PReLU(num_parameters=1, init=0.25)
     self.UPU1 = UPU(ch, 8, stride=4, padding=2)
     self.UPU2 = UPU(ch, 8, stride=4, padding=2)
     self.UPU3 = UPU(ch, 8, stride=4, padding=2)
     self.DPU1 = DPU(ch, 8, stride=4, padding=2)
     self.DPU2 = DPU(ch, 8, stride=4, padding=2)
     self.reconstruction = M.Conv2d(self.num_stages * ch, ch, kernel_size=1, stride=1, padding=0)
Esempio n. 10
0
    def __init__(self,
                 in_channels=3,
                 out_channels=3,
                 n_0=256,
                 n_R=64,
                 iterations_num=10,
                 upscale_factor=4):
        super(DBPN, self).__init__()

        filter_size = upscale_factor + 4
        stride = upscale_factor
        padding = 2
        self.iterations_num = iterations_num
        self.conv1 = M.Sequential(Conv2d(in_channels, n_0, 3, 1, 1),
                                  M.PReLU(num_parameters=1, init=0.25))
        self.conv2 = M.Sequential(Conv2d(n_0, n_R, 1, 1, 0),
                                  M.PReLU(num_parameters=1, init=0.25))
        self.UPU = UPU(n_R, filter_size, stride, padding)
        self.DPU = DPU(n_R, filter_size, stride, padding)
        self.conv3 = M.Sequential(
            Conv2d(n_R * iterations_num, out_channels, 3, 1, 1),
            M.PReLU(num_parameters=1, init=0.25))
Esempio n. 11
0
 def __init__(self,
              in_ch,
              out_ch,
              stride=2,
              r_lim=7,
              K=4,
              refin=True,
              refin_ch=3):
     """
         Implementation of the Extremely Efficient Spatial Pyramid module introduced in
         "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network"
         <https://arxiv.org/pdf/1811.11431.pdf>
         Parameters
         ----------
         in_ch (int): number of channels for input
         out_ch (int): number of channels for output
         stride (int): stride of the convs
         r_lim (int): A maximum value of receptive field allowed for EESP block
         K (int): number of parallel branches
         refin (bool): whether use the inference from input image
     """
     super(SESSP, self).__init__()
     eesp_out = out_ch - in_ch
     self.eesp = EESP(in_ch, eesp_out, stride=stride, r_lim=r_lim, K=K)
     self.avg_pool = M.AvgPool2d(3, stride=stride, padding=1)
     self.refin = refin
     self.stride = stride
     self.activation = M.PReLU(out_ch)
     if refin:
         self.refin_conv = M.Sequential(
             Conv2d(refin_ch,
                    refin_ch,
                    ksize=3,
                    stride=1,
                    padding=1,
                    activation=M.PReLU(refin_ch)),
             Conv2d(refin_ch, out_ch, activation=None))
Esempio n. 12
0
    def __init__(self, in_ch=3, num_classes=1000, scale=1.0):
        """
            Implementation of the ESPNetV2 introduced in
            "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network"
            <https://arxiv.org/pdf/1811.11431.pdf>
            Parameters
            ----------
            in_ch (int): number of channels for input
            num_classes (int): number of classes
            scale (float): the scale rate for the net
        """
        super(EspNetV2, self).__init__()
        reps = [0, 3, 7, 3]  #how many times the essp block repeat
        r_lims = [13, 11, 9, 7, 5]
        K = [4] * len(r_lims)

        base = 32
        config_len = 5
        config = [base] * config_len
        base_s = 0
        for i in range(config_len):
            if i == 0:
                base_s = int(base * scale)
                base_s = math.ceil(base_s / K[0]) * K[0]
                config[i] = base if base_s > base else base_s
            else:
                config[i] = base_s * pow(2, i)
        if scale <= 1.5:
            config.append(1024)
        elif scale <= 2.0:
            config.append(1280)
        else:
            ValueError(
                "Configuration for scale={} not supported".format(scale))

        ref_input = in_ch
        self.reinf = True

        self.level1 = Conv2d(in_ch,
                             config[0],
                             3,
                             stride=2,
                             padding=1,
                             activation=M.PReLU(config[0]))
        self.level2_0 = SESSP(config[0],
                              config[1],
                              stride=2,
                              r_lim=r_lims[0],
                              K=K[0],
                              refin=self.reinf,
                              refin_ch=ref_input)

        self.level3_0 = SESSP(config[1],
                              config[2],
                              stride=2,
                              r_lim=r_lims[1],
                              K=K[1],
                              refin=self.reinf,
                              refin_ch=ref_input)

        self.level3 = []
        for i in range(reps[1]):
            self.level3.append(
                EESP(config[2], config[2], stride=1, r_lim=r_lims[2], K=K[2]))

        self.level4_0 = SESSP(config[2],
                              config[3],
                              stride=2,
                              r_lim=r_lims[2],
                              K=K[2],
                              refin=self.reinf,
                              refin_ch=ref_input)
        self.level4 = []
        for i in range(reps[2]):
            self.level4.append(
                EESP(config[3], config[3], stride=1, r_lim=r_lims[3], K=K[3]))

        self.level5_0 = SESSP(config[3],
                              config[4],
                              stride=2,
                              r_lim=r_lims[3],
                              K=K[3],
                              refin=self.reinf,
                              refin_ch=ref_input)
        self.level5 = []
        for i in range(reps[3]):
            self.level5.append(
                EESP(config[4], config[4], stride=1, r_lim=r_lims[4], K=K[4]))

        self.level5.append(
            Conv2d(config[4],
                   config[4],
                   ksize=3,
                   stride=1,
                   padding=1,
                   groups=config[4],
                   activation=M.PReLU(config[4])))
        self.level5.append(
            Conv2d(config[4],
                   config[5],
                   ksize=1,
                   stride=1,
                   padding=0,
                   groups=K[3],
                   activation=M.PReLU(config[5])))
        self.classifier = M.Linear(config[5], num_classes)

        self.init_params()