Beispiel #1
0
    def __init__(self, args, conv=common.default_conv, BBlock = common.BBlock):
        super(BSR, self).__init__()
        self.out_channels = 4
        self.scale_idx = 0
        self.is_fcSim = args.is_fcSim
        self.toRGB = common.ApplyBayer2RGB(normalize = False)
        n_colors = args.n_colors
        n_feats = args.n_feats

        # Sampling layer
        if args.is_fcSim:
            self.fc_sim = common.FlatCamSampSim(args.batch_size)
            self.add_noise = common.AddNoise(nSig = args.sigma)

        self.toBayer = common.ApplyRaw2Bayer()

        # Initial Reconstruction
        self.init_recon = common.KronConv(in_channels = 4, out_channels=self.out_channels,\
                                          mid_channels=args.mid_channels, act = args.is_act)
        self.conv = nn.Conv2d(4, 3, kernel_size = 1, stride= 1, padding= 0, bias= False)

        # Enhance reconstruction
        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 3
        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n*2):
            pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4,640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv,160, 160, 3, act=act)))

        m_tail = [conv(160, 4 * n_colors, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Beispiel #2
0
    def __init__(self, args, conv=common.default_conv):
        super(KERNEL_EST, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = 48#args.n_feats
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)


        self.DWT = common.DWT()
        self.IWT = common.IWT()

        m_head = [conv(args.n_colors*4+20, n_feats*2, kernel_size)]

        m_body = []
        for _ in range(10):
            m_body.append(common.BBlock(conv, n_feats*2, n_feats*2, kernel_size, act=act))

        m_tail = [conv(n_feats*2, args.n_colors*4, kernel_size)]


        self.head = nn.Sequential(*m_head)
        self.body = nn.Sequential(*m_body)
        self.tail = nn.Sequential(*m_tail)
Beispiel #3
0
    def __init__(self, in_channels, out_channels, scale, up=True):
        super(projection_conv, self).__init__()
        self.DWT = common.DWT()
        self.IWT = common.IWT()
        self.up = up
        self.scale = int(math.log2(scale))

        kernel_size, stride, padding, ratio = {
            2: (6, 2, 2, 4),
            4: (8, 4, 2, 16),
            8: (12, 8, 2, 64)
        }[scale]

        if up:
            self.conv_2 = nn.Conv2d(in_channels,
                                    out_channels * ratio,
                                    3,
                                    stride=1,
                                    padding=1)
            self.conv_1 = nn.Conv2d(out_channels,
                                    out_channels,
                                    3,
                                    stride=1,
                                    padding=1)
        else:
            self.conv_1 = nn.Conv2d(in_channels * ratio,
                                    out_channels,
                                    3,
                                    stride=1,
                                    padding=1)
Beispiel #4
0
    def __init__(self, args, conv=common.default_conv):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3  # 卷积核大小
        self.scale_idx = 0

        act = nn.ReLU(True)  # 激活函数

        self.DWT = common.DWT()  # 二维离散小波
        self.IWT = common.IWT()  # 逆向的二维离散小波

        n = 3
        # downsample的第一层,维度变化4->16
        m_head = [common.BBlock(conv, 4, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(common.BBlock(conv, 160, 160, 3, act=act))

        # downsample的第二层,维度变化640->256(默认的feature map == 64)
        d_l2 = [common.BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        # downsample的第三层,并与upsample进行连接,也是upsample的第三层
        # 维度变化1024->256,256->1024
        pro_l3 = [common.BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(
            common.BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        # upsample的第二层,维度变化256->640
        i_l2 = []
        for _ in range(n):
            i_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(common.BBlock(conv, n_feats * 4, 640, 3, act=act))

        # upsample的第一层,维度变化160->4
        i_l1 = []
        for _ in range(n):
            i_l1.append((common.BBlock(conv, 160, 160, 3, act=act)))
        m_tail = [conv(160, 4, 3)]

        # downsample的第一层
        self.head = nn.Sequential(*m_head)
        self.d_l1 = nn.Sequential(*d_l1)
        # downsample的第二层
        self.d_l2 = nn.Sequential(*d_l2)
        # 第三层连接层
        self.pro_l3 = nn.Sequential(*pro_l3)
        # upsample的第二层
        self.i_l2 = nn.Sequential(*i_l2)
        # upsample的第一层
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Beispiel #5
0
 def __init__(self):
     super(MWRN_lv1, self).__init__()
     self.color_channel = 3
     self.lv1_head = MWRN_lv1_head()
     self.lv2 = MWRN_lv2()
     self.lv1_tail = MWRN_lv1_tail()
     self.DWT = common.DWT()
     self.IWT = common.IWT()
Beispiel #6
0
    def __init__(self, args, conv=common.default_conv, BBlock=common.BBlock):
        super(BSR, self).__init__()
        #n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        n_colors = args.n_colors
        self.scale_idx = 0
        act = nn.ReLU(True)
        self.is_fcSim = args.is_fcSim

        # Sampling layer
        if args.is_fcSim:
            self.fc_sim = common.FlatCamSampSim(args.batch_size)
            self.add_noise = common.AddNoise(nSig=args.sigma)

        self.init_recon = common.FlatCamSimInitConv1()

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = args.n_resblocks

        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4, 640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv, 160, 160, 3, act=act)))

        m_tail = [conv(160, 4 * n_colors, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Beispiel #7
0
    def __init__(self, args, conv=common.default_conv, BBlock = common.BBlock, CALayer = common.CALayer):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        n_colors = args.n_colors

        self.scale_idx = 0

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        self.FCA2 = CALayer(channel = 160, reduction=16)
        self.FCA3 = CALayer(channel = n_feats * 4, reduction=16)
        self.ICA1 = CALayer(channel = n_feats * 16, reduction=16)
        self.ICA2 = CALayer(channel = 640, reduction=16)
        self.ICA3 = CALayer(channel = 4 * n_colors, reduction=2)

        n = 3
        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n*2):
            pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4,640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv,160, 160, 3, act=act)))

        m_tail = [conv(160, 4*n_colors, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Beispiel #8
0
    def __init__(self, args, conv=common.default_conv):
        super().__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0
        nColor = args.n_colors

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 1
        m_head = [common.BBlock(conv, nColor, n_feats, kernel_size, act=act)]
        d_l0 = []
        d_l0.append(common.DBlock_com1(conv, n_feats, n_feats, kernel_size, act=act, bn=False))


        d_l1 = [common.BBlock(conv, n_feats * 4, n_feats * 2, kernel_size, act=act, bn=False)]
        d_l1.append(common.DBlock_com1(conv, n_feats * 2, n_feats * 2, kernel_size, act=act, bn=False))

        d_l2 = []
        d_l2.append(common.BBlock(conv, n_feats * 8, n_feats * 4, kernel_size, act=act, bn=False))
        d_l2.append(common.DBlock_com1(conv, n_feats * 4, n_feats * 4, kernel_size, act=act, bn=False))

        pro_l3 = []
        pro_l3.append(common.BBlock(conv, n_feats * 16, n_feats * 8, kernel_size, act=act, bn=False))
        pro_l3.append(common.DBlock_com(conv, n_feats * 8, n_feats * 8, kernel_size, act=act, bn=False))

        pro_l3.append(common.DBlock_inv(conv, n_feats * 8, n_feats * 8, kernel_size, act=act, bn=False))
        pro_l3.append(common.BBlock(conv, n_feats * 8, n_feats * 16, kernel_size, act=act, bn=False))

        i_l2 = [common.DBlock_inv1(conv, n_feats * 4, n_feats * 4, kernel_size, act=act, bn=False)]
        i_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 8, kernel_size, act=act, bn=False))

        i_l1 = [common.DBlock_inv1(conv, n_feats * 2, n_feats * 2, kernel_size, act=act, bn=False)]
        i_l1.append(common.BBlock(conv, n_feats * 2, n_feats * 4, kernel_size, act=act, bn=False))

        i_l0 = [common.DBlock_inv1(conv, n_feats, n_feats, kernel_size, act=act, bn=False)]

        m_tail = [conv(n_feats, nColor, kernel_size)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.d_l0 = nn.Sequential(*d_l0)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.i_l0 = nn.Sequential(*i_l0)
        self.tail = nn.Sequential(*m_tail)
Beispiel #9
0
    def __init__(self, args, conv=common.default_conv):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 3
        m_head = [common.BBlock(conv, 4, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(common.BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [common.BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [common.BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(
            common.BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(common.BBlock(conv, n_feats * 4, 640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((common.BBlock(conv, 160, 160, 3, act=act)))

        m_tail = [conv(160, 4, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Beispiel #10
0
    def __init__(self):
        super(MWRN_lv1_tail, self).__init__()
        self.color_channel = 3

        self.conv1_tail_1 = common.BBlock(common.default_conv0,
                                          256,
                                          640,
                                          3,
                                          bn=True)
        self.IWT = common.IWT()
        self.res1_tail = nn.Sequential(
            *[common.ResBlock(common.default_conv0, 160, 3) for i in range(4)])
        self.conv1_tail_2 = common.BBlock(common.default_conv0,
                                          160,
                                          4 * self.color_channel,
                                          3,
                                          bn=True)
Beispiel #11
0
    def __init__(self):
        super(MWRN_lv2_tail, self).__init__()
        self.color_channel = 3

        self.conv2_tail_1 = common.BBlock(common.default_conv0,
                                          512,
                                          1024,
                                          3,
                                          bn=True)
        self.IWT = common.IWT()
        self.res2_tail = nn.Sequential(
            *[common.ResBlock(common.default_conv0, 256, 3) for i in range(4)])
        self.conv2_tail_img = common.BBlock(common.default_conv0,
                                            256,
                                            16 * self.color_channel,
                                            3,
                                            bn=True)
Beispiel #12
0
    def __init__(self, args, conv=common.default_conv):
        super(NL_EST, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        n_feats = 48
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)

        # rgb_mean = (0.4488, 0.4371, 0.4040)
        # rgb_std = (1.0, 1.0, 1.0)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        m_head = [conv(args.n_colors * 4 + 4, n_feats * 2, kernel_size)]

        m_body = []
        for _ in range(10):
            m_body.append(
                common.BBlock(conv,
                              n_feats * 2,
                              n_feats * 2,
                              kernel_size,
                              act=act))
        # m_body.append(conv(n_feats, n_feats, kernel_size))
        #
        # self.upsample = nn.ModuleList([
        #     common.Upsampler(
        #         conv, s, n_feats, act=False
        #     ) for s in args.scale
        # ])

        m_tail = [conv(n_feats * 2, args.n_colors * 4, kernel_size)]

        # self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)

        self.head = nn.Sequential(*m_head)
        self.body = nn.Sequential(*m_body)

        self.tail = nn.Sequential(*m_tail)
Beispiel #13
0
    def __init__(self, args):
        super(MWPDO, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0
        nColor = args.n_colors

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 1
        m_head = [mwpdo_layers.BBlock(nColor, n_feats, p=8, act=act, bn=False)]
        d_l0 = []
        d_l0.append(
            mwpdo_layers.DBlock_com1(n_feats, n_feats, p=8, act=act, bn=False))

        d_l1 = [
            mwpdo_layers.BBlock1(n_feats * 4,
                                 n_feats * 2,
                                 p=8,
                                 act=act,
                                 bn=False)
        ]
        d_l1.append(
            mwpdo_layers.DBlock_com1(n_feats * 2,
                                     n_feats * 2,
                                     p=8,
                                     act=act,
                                     bn=False))

        d_l2 = []
        d_l2.append(
            mwpdo_layers.BBlock1(n_feats * 8,
                                 n_feats * 4,
                                 p=8,
                                 act=act,
                                 bn=False))
        d_l2.append(
            mwpdo_layers.DBlock_com1(n_feats * 4,
                                     n_feats * 4,
                                     p=8,
                                     act=act,
                                     bn=False))
        pro_l3 = []
        pro_l3.append(
            mwpdo_layers.BBlock1(n_feats * 16,
                                 n_feats * 8,
                                 p=8,
                                 act=act,
                                 bn=False))
        pro_l3.append(
            mwpdo_layers.DBlock_com(n_feats * 8,
                                    n_feats * 8,
                                    p=8,
                                    act=act,
                                    bn=False))
        pro_l3.append(
            mwpdo_layers.DBlock_inv(n_feats * 8,
                                    n_feats * 8,
                                    p=8,
                                    act=act,
                                    bn=False))
        pro_l3.append(
            mwpdo_layers.BBlock1(n_feats * 8,
                                 n_feats * 16,
                                 p=8,
                                 act=act,
                                 bn=False))

        i_l2 = [
            mwpdo_layers.DBlock_inv1(n_feats * 4,
                                     n_feats * 4,
                                     p=8,
                                     act=act,
                                     bn=False)
        ]
        i_l2.append(
            mwpdo_layers.BBlock1(n_feats * 4,
                                 n_feats * 8,
                                 p=8,
                                 act=act,
                                 bn=False))

        i_l1 = [
            mwpdo_layers.DBlock_inv1(n_feats * 2,
                                     n_feats * 2,
                                     p=8,
                                     act=act,
                                     bn=False)
        ]
        i_l1.append(
            mwpdo_layers.BBlock1(n_feats * 2,
                                 n_feats * 4,
                                 p=8,
                                 act=act,
                                 bn=False))

        i_l0 = [
            mwpdo_layers.DBlock_inv1(n_feats, n_feats, p=8, act=act, bn=False)
        ]

        #m_tail = [mwpdo_layers.g_conv2d(n_feats, nColor, p=8,partial=mwpdo_layers.partial_dict_0,tran=mwpdo_layers.tran_to_partial_coef_0)]
        m_tail = [common.default_conv(n_feats * 8, nColor, kernel_size)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.d_l0 = nn.Sequential(*d_l0)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.i_l0 = nn.Sequential(*i_l0)
        self.tail = nn.Sequential(*m_tail)
Beispiel #14
0
    def __init__(self, args, conv=common.default_conv, BBlock=common.BBlock):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        n_colors = args.n_colors
        self.scale_idx = 0
        self.DWT = common.DWT()
        self.IWT = common.IWT()
        act = nn.ReLU(True)
        n = 3
        self.scale = args.scale

        # =============== Graudually up-scaling ===================

        u2_ = []
        u2_.append(BBlock(conv, n_colors, 256, 3, act=act))
        for _ in range(n):
            u2_.append(BBlock(conv, 256, 256, 3, act=act))
        u2_.append(BBlock(conv, 256, 640, 3, act=act))
        self.u2 = nn.Sequential(*u2_)

        u1_ = [BBlock(conv, 160, 160, 3, act=act)]
        for _ in range(n):
            u1_.append(BBlock(conv, 160, 160, 3, act=act))
        self.u1 = nn.Sequential(*u1_)

        u0_ = []
        u0_.append(BBlock(conv, 160, n_colors * 4, 3, act=act))
        self.u0 = nn.Sequential(*u0_)

        # =============== Main MWCNN =========================

        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]

        d_l1 = [BBlock(conv, 160, 160, 3, act=act)]  # always happend
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        #if n_scale > 1:                                         # for scale 4 or 8
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        #for _ in range(n*2):
        #    pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4, 640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv, 160, 160, 3, act=act)))

        m_tail = [conv(160, 4, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Beispiel #15
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12),
                 num_init_features=4,
                 bn_size=4,
                 drop_rate=0):
        super(DenseWTUnet, self).__init__()

        self.DWT = common.DWT()  # 二维离散小波
        self.IWT = common.IWT()  # 逆向的二维离散小波

        # out: b, 4, 128, 128
        self.features = nn.Sequential(
            OrderedDict([("conv0",
                          nn.Conv2d(4,
                                    num_init_features,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm0", nn.BatchNorm2d(num_init_features)),
                         ("relu0", nn.ReLU(inplace=True))]))
        num_layers1 = block_config[0]
        num_layers2 = block_config[1]
        # print(num_layers2)
        # out: b, 196, 128, 128
        self.block1 = common._DenseBlock(num_layers1, num_init_features,
                                         bn_size, growth_rate, drop_rate)
        num_features1 = num_init_features + growth_rate * num_layers1  # 196
        # out: b, 1168, 64, 64
        self.block2 = common._DenseBlock(num_layers2, num_features1 * 4,
                                         bn_size, growth_rate, drop_rate)
        num_features2 = num_features1 * 4 + growth_rate * num_layers2
        # out: b, 256, 64, 64
        self.features1 = nn.Sequential(
            OrderedDict([("conv11",
                          nn.Conv2d(num_features2,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm11", nn.BatchNorm2d(256)),
                         ("relu11", nn.ReLU(inplace=True))]))
        self.block3 = common._DenseBlock(num_layers2, 256, bn_size,
                                         growth_rate, drop_rate)
        #num_features3 = 256 + growth_rate * num_layers2 # 640
        # out: b, 24, 128, 128
        self.features2 = nn.Sequential(
            OrderedDict([("conv22",
                          nn.Conv2d(160,
                                    24,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm22", nn.BatchNorm2d(24)),
                         ("relu22", nn.ReLU(inplace=True))]))
        # out: b, 256, 64, 64
        self.block4 = common._DenseBlock(num_layers1, num_features1 + 24,
                                         bn_size, growth_rate, drop_rate)
        num_features3 = 220 + growth_rate * num_layers1  # 412

        self.features3 = nn.Sequential(
            OrderedDict([("conv33",
                          nn.Conv2d(num_features3,
                                    4,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm33", nn.BatchNorm2d(4)),
                         ("relu33", nn.ReLU(inplace=True))]))

        # params initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
Beispiel #16
0
Datei: cbsr.py Projekt: lpj0/CBSR
    def __init__(self, args, conv=common.default_conv):
        super(CBSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)

        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)

        self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
        self.DWT = common.DWT()
        self.IWT = common.IWT()
        n = n_resblocks

        m_head = [conv(args.n_colors * 4 + 20, n_feats * 2, kernel_size + 2)]
        # d_l1 = [common.BBlock(conv, n_feats*4, n_feats*2, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            #d_l1.append(common.BBlock(conv, n_feats*2, n_feats*2, 3, act=act))
            d_l1.append(common.ResBlock(conv, n_feats * 2, 3, act=act))
            d_l1.append(act)

        # dwt_l2 = [common.DWT]
        d_l2 = [common.BBlock(conv, n_feats * 8, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(common.ResBlock(conv, n_feats * 4, 3, act=act))
            d_l2.append(act)
            #d_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        # pro_l3 = [common.DWT]
        pro_l3 = [common.BBlock(conv, n_feats * 16, n_feats * 8, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(common.ResBlock(conv, n_feats * 8, 3, act=act))
            pro_l3.append(act)
        # pro_l3.append(common.BBlock(conv, n_feats * 8, n_feats * 16, 3, act=act))
        pro_l3.append(conv(n_feats * 8, n_feats * 16, 3))
        # pro_l3.append(common.IWT)

        i_l2 = []
        for _ in range(n):
            #i_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
            i_l2.append(common.ResBlock(conv, n_feats * 4, 3, act=act))
            i_l2.append(act)
        # i_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 8, 3, act=act))
        i_l2.append(conv(n_feats * 4, n_feats * 8, 3))
        # IWT = common.IWT
        # DWT = common.DWT

        i_l1 = []
        for _ in range(n):
            #i_l1.append(common.BBlock(conv, n_feats * 2, n_feats * 2, 3, act=act))
            i_l1.append(common.ResBlock(conv, n_feats * 2, 3, act=act))
            i_l1.append(act)

        m_tail = [conv(n_feats * 2, args.n_colors * 4, kernel_size + 4)]

        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)

        self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)

        self.head = nn.Sequential(*m_head)
        # self.d_l0 = nn.Sequential(*d_l0)
        self.d_l1 = nn.Sequential(*d_l1)
        self.d_l2 = nn.Sequential(*d_l2)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        # self.i_l0 = nn.Sequential(*i_l0)
        self.tail = nn.Sequential(*m_tail)
        self.act = nn.Sequential(act)