Example #1
0
    def __init__(self, args, conv=common.default_conv, BBlock = common.BBlock):
        super(BSR, self).__init__()
        self.out_channels = 4
        self.scale_idx = 0
        self.is_fcSim = args.is_fcSim
        self.toRGB = common.ApplyBayer2RGB(normalize = False)
        n_colors = args.n_colors
        n_feats = args.n_feats

        # Sampling layer
        if args.is_fcSim:
            self.fc_sim = common.FlatCamSampSim(args.batch_size)
            self.add_noise = common.AddNoise(nSig = args.sigma)

        self.toBayer = common.ApplyRaw2Bayer()

        # Initial Reconstruction
        self.init_recon = common.KronConv(in_channels = 4, out_channels=self.out_channels,\
                                          mid_channels=args.mid_channels, act = args.is_act)
        self.conv = nn.Conv2d(4, 3, kernel_size = 1, stride= 1, padding= 0, bias= False)

        # Enhance reconstruction
        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 3
        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n*2):
            pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4,640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv,160, 160, 3, act=act)))

        m_tail = [conv(160, 4 * n_colors, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Example #2
0
    def __init__(self, args, conv=common.default_conv):
        super(KERNEL_EST, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = 48#args.n_feats
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)


        self.DWT = common.DWT()
        self.IWT = common.IWT()

        m_head = [conv(args.n_colors*4+20, n_feats*2, kernel_size)]

        m_body = []
        for _ in range(10):
            m_body.append(common.BBlock(conv, n_feats*2, n_feats*2, kernel_size, act=act))

        m_tail = [conv(n_feats*2, args.n_colors*4, kernel_size)]


        self.head = nn.Sequential(*m_head)
        self.body = nn.Sequential(*m_body)
        self.tail = nn.Sequential(*m_tail)
Example #3
0
    def __init__(self, in_channels, out_channels, scale, up=True):
        super(projection_conv, self).__init__()
        self.DWT = common.DWT()
        self.IWT = common.IWT()
        self.up = up
        self.scale = int(math.log2(scale))

        kernel_size, stride, padding, ratio = {
            2: (6, 2, 2, 4),
            4: (8, 4, 2, 16),
            8: (12, 8, 2, 64)
        }[scale]

        if up:
            self.conv_2 = nn.Conv2d(in_channels,
                                    out_channels * ratio,
                                    3,
                                    stride=1,
                                    padding=1)
            self.conv_1 = nn.Conv2d(out_channels,
                                    out_channels,
                                    3,
                                    stride=1,
                                    padding=1)
        else:
            self.conv_1 = nn.Conv2d(in_channels * ratio,
                                    out_channels,
                                    3,
                                    stride=1,
                                    padding=1)
Example #4
0
    def __init__(self, args, conv=common.default_conv):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3  # 卷积核大小
        self.scale_idx = 0

        act = nn.ReLU(True)  # 激活函数

        self.DWT = common.DWT()  # 二维离散小波
        self.IWT = common.IWT()  # 逆向的二维离散小波

        n = 3
        # downsample的第一层,维度变化4->16
        m_head = [common.BBlock(conv, 4, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(common.BBlock(conv, 160, 160, 3, act=act))

        # downsample的第二层,维度变化640->256(默认的feature map == 64)
        d_l2 = [common.BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        # downsample的第三层,并与upsample进行连接,也是upsample的第三层
        # 维度变化1024->256,256->1024
        pro_l3 = [common.BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(
            common.BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        # upsample的第二层,维度变化256->640
        i_l2 = []
        for _ in range(n):
            i_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(common.BBlock(conv, n_feats * 4, 640, 3, act=act))

        # upsample的第一层,维度变化160->4
        i_l1 = []
        for _ in range(n):
            i_l1.append((common.BBlock(conv, 160, 160, 3, act=act)))
        m_tail = [conv(160, 4, 3)]

        # downsample的第一层
        self.head = nn.Sequential(*m_head)
        self.d_l1 = nn.Sequential(*d_l1)
        # downsample的第二层
        self.d_l2 = nn.Sequential(*d_l2)
        # 第三层连接层
        self.pro_l3 = nn.Sequential(*pro_l3)
        # upsample的第二层
        self.i_l2 = nn.Sequential(*i_l2)
        # upsample的第一层
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Example #5
0
 def __init__(self):
     super(MWRN_lv1, self).__init__()
     self.color_channel = 3
     self.lv1_head = MWRN_lv1_head()
     self.lv2 = MWRN_lv2()
     self.lv1_tail = MWRN_lv1_tail()
     self.DWT = common.DWT()
     self.IWT = common.IWT()
Example #6
0
    def __init__(self):
        super(MWRN_lv2_head, self).__init__()
        self.color_channel = 3

        self.conv2_head_1 = common.BBlock(common.default_conv0,16*self.color_channel,256,3,bn=True)
        self.conv2_head_2 = common.BBlock(common.default_conv0,512,256,3,bn=True)
        self.res2_head = nn.Sequential(*[common.ResBlock(common.default_conv0,256,3) for i in range(4)])
        self.conv2_head_3 = common.BBlock(common.default_conv0,1024,512,3,bn=True)

        self.DWT = common.DWT()
Example #7
0
    def __init__(self, args, conv=common.default_conv, BBlock=common.BBlock):
        super(BSR, self).__init__()
        #n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        n_colors = args.n_colors
        self.scale_idx = 0
        act = nn.ReLU(True)
        self.is_fcSim = args.is_fcSim

        # Sampling layer
        if args.is_fcSim:
            self.fc_sim = common.FlatCamSampSim(args.batch_size)
            self.add_noise = common.AddNoise(nSig=args.sigma)

        self.init_recon = common.FlatCamSimInitConv1()

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = args.n_resblocks

        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4, 640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv, 160, 160, 3, act=act)))

        m_tail = [conv(160, 4 * n_colors, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Example #8
0
    def __init__(self, args, conv=common.default_conv, BBlock = common.BBlock, CALayer = common.CALayer):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        n_colors = args.n_colors

        self.scale_idx = 0

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        self.FCA2 = CALayer(channel = 160, reduction=16)
        self.FCA3 = CALayer(channel = n_feats * 4, reduction=16)
        self.ICA1 = CALayer(channel = n_feats * 16, reduction=16)
        self.ICA2 = CALayer(channel = 640, reduction=16)
        self.ICA3 = CALayer(channel = 4 * n_colors, reduction=2)

        n = 3
        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n*2):
            pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4,640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv,160, 160, 3, act=act)))

        m_tail = [conv(160, 4*n_colors, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Example #9
0
    def __init__(self, args, conv=common.default_conv):
        super().__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0
        nColor = args.n_colors

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 1
        m_head = [common.BBlock(conv, nColor, n_feats, kernel_size, act=act)]
        d_l0 = []
        d_l0.append(common.DBlock_com1(conv, n_feats, n_feats, kernel_size, act=act, bn=False))


        d_l1 = [common.BBlock(conv, n_feats * 4, n_feats * 2, kernel_size, act=act, bn=False)]
        d_l1.append(common.DBlock_com1(conv, n_feats * 2, n_feats * 2, kernel_size, act=act, bn=False))

        d_l2 = []
        d_l2.append(common.BBlock(conv, n_feats * 8, n_feats * 4, kernel_size, act=act, bn=False))
        d_l2.append(common.DBlock_com1(conv, n_feats * 4, n_feats * 4, kernel_size, act=act, bn=False))

        pro_l3 = []
        pro_l3.append(common.BBlock(conv, n_feats * 16, n_feats * 8, kernel_size, act=act, bn=False))
        pro_l3.append(common.DBlock_com(conv, n_feats * 8, n_feats * 8, kernel_size, act=act, bn=False))

        pro_l3.append(common.DBlock_inv(conv, n_feats * 8, n_feats * 8, kernel_size, act=act, bn=False))
        pro_l3.append(common.BBlock(conv, n_feats * 8, n_feats * 16, kernel_size, act=act, bn=False))

        i_l2 = [common.DBlock_inv1(conv, n_feats * 4, n_feats * 4, kernel_size, act=act, bn=False)]
        i_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 8, kernel_size, act=act, bn=False))

        i_l1 = [common.DBlock_inv1(conv, n_feats * 2, n_feats * 2, kernel_size, act=act, bn=False)]
        i_l1.append(common.BBlock(conv, n_feats * 2, n_feats * 4, kernel_size, act=act, bn=False))

        i_l0 = [common.DBlock_inv1(conv, n_feats, n_feats, kernel_size, act=act, bn=False)]

        m_tail = [conv(n_feats, nColor, kernel_size)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.d_l0 = nn.Sequential(*d_l0)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.i_l0 = nn.Sequential(*i_l0)
        self.tail = nn.Sequential(*m_tail)
Example #10
0
    def __init__(self, args, conv=common.default_conv):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 3
        m_head = [common.BBlock(conv, 4, 160, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            d_l1.append(common.BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [common.BBlock(conv, 640, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [common.BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(
            common.BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(
                common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(common.BBlock(conv, n_feats * 4, 640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((common.BBlock(conv, 160, 160, 3, act=act)))

        m_tail = [conv(160, 4, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Example #11
0
File: nl_est.py Project: lpj0/CBSR
    def __init__(self, args, conv=common.default_conv):
        super(NL_EST, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        n_feats = 48
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)

        # rgb_mean = (0.4488, 0.4371, 0.4040)
        # rgb_std = (1.0, 1.0, 1.0)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        m_head = [conv(args.n_colors * 4 + 4, n_feats * 2, kernel_size)]

        m_body = []
        for _ in range(10):
            m_body.append(
                common.BBlock(conv,
                              n_feats * 2,
                              n_feats * 2,
                              kernel_size,
                              act=act))
        # m_body.append(conv(n_feats, n_feats, kernel_size))
        #
        # self.upsample = nn.ModuleList([
        #     common.Upsampler(
        #         conv, s, n_feats, act=False
        #     ) for s in args.scale
        # ])

        m_tail = [conv(n_feats * 2, args.n_colors * 4, kernel_size)]

        # self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)

        self.head = nn.Sequential(*m_head)
        self.body = nn.Sequential(*m_body)

        self.tail = nn.Sequential(*m_tail)
Example #12
0
    def __init__(self, conf, exp_id=0, status='train', loader=default_loader):
        self.imp_num = conf['imp_num']
        self.loader = loader
        self.imrefID = conf['yl360Dataset1']['refimpID_pth']
        self.im_dir = conf['yl360Dataset1']['img_ref_IMG_pth']
        self.im_dmos = conf['yl360Dataset1']['impDMOS_reg']
        self.patch_size = conf['patch_size']
        self.stride = conf['stride']
        self.bz = conf['batch_size']
        self.dwt = common.DWT()
        self.indexData = np.arange(self.imp_num)
        self.dmos = np.loadtxt(self.im_dmos)

        if os.path.exists(os.path.join(args.log_dir_MW, "train_test_randList.txt")):
            self.indexData = np.loadtxt(os.path.join(args.log_dir_MW, "train_test_randList.txt"))
        else:
            np.random.shuffle(self.indexData)
            np.savetxt(os.path.join(args.log_dir_MW, "train_test_randList.txt"), self.indexData)

        test_ratio = conf['test_ratio']
        train_ratio = conf['train_ratio']
        trainindex = self.indexData[: int(train_ratio*self.imp_num)]
        valindex = self.indexData[int(train_ratio*self.imp_num): int((1 - test_ratio) * self.imp_num)+1]
        testindex = self.indexData[int((1 - test_ratio) * self.imp_num): ]

        if status == 'train':
            self.index = trainindex
            np.savetxt(os.path.join(args.log_dir_IQA, 'train_score.txt'), self.index)
            print(len(self.index))
            print("# Train Images: {}".format(len(self.index)))
            print(trainindex)
        if status == 'test':
            self.index = testindex
            np.savetxt(os.path.join(args.log_dir_IQA, 'test_score.txt'), self.index)
            print(len(self.index))
            print("# Test Images:  {}".format(len(self.index)))
            print(testindex)
        if status == 'val':
            self.index = valindex
            np.savetxt(os.path.join(args.log_dir_IQA, 'validate_score.txt'), self.index)
            print("# Val Images: {}".format(len(self.index)))
Example #13
0
    def __init__(self, args, conv=common.default_conv, BBlock=common.BBlock):
        super(BSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        n_colors = args.n_colors
        self.scale_idx = 0
        self.DWT = common.DWT()
        self.IWT = common.IWT()
        act = nn.ReLU(True)
        n = 3
        self.scale = args.scale

        # =============== Graudually up-scaling ===================

        u2_ = []
        u2_.append(BBlock(conv, n_colors, 256, 3, act=act))
        for _ in range(n):
            u2_.append(BBlock(conv, 256, 256, 3, act=act))
        u2_.append(BBlock(conv, 256, 640, 3, act=act))
        self.u2 = nn.Sequential(*u2_)

        u1_ = [BBlock(conv, 160, 160, 3, act=act)]
        for _ in range(n):
            u1_.append(BBlock(conv, 160, 160, 3, act=act))
        self.u1 = nn.Sequential(*u1_)

        u0_ = []
        u0_.append(BBlock(conv, 160, n_colors * 4, 3, act=act))
        self.u0 = nn.Sequential(*u0_)

        # =============== Main MWCNN =========================

        m_head = [BBlock(conv, 4 * n_colors, 160, 3, act=act)]

        d_l1 = [BBlock(conv, 160, 160, 3, act=act)]  # always happend
        for _ in range(n):
            d_l1.append(BBlock(conv, 160, 160, 3, act=act))

        d_l2 = [BBlock(conv, 640, n_feats * 4, 3, act=act)]
        #if n_scale > 1:                                         # for scale 4 or 8
        for _ in range(n):
            d_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        pro_l3 = [BBlock(conv, n_feats * 16, n_feats * 4, 3, act=act)]
        #for _ in range(n*2):
        #    pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        pro_l3.append(BBlock(conv, n_feats * 4, n_feats * 16, 3, act=act))

        i_l2 = []
        for _ in range(n):
            i_l2.append(BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
        i_l2.append(BBlock(conv, n_feats * 4, 640, 3, act=act))

        i_l1 = []
        for _ in range(n):
            i_l1.append((BBlock(conv, 160, 160, 3, act=act)))

        m_tail = [conv(160, 4, 3)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.tail = nn.Sequential(*m_tail)
Example #14
0
File: cbsr.py Project: lpj0/CBSR
    def __init__(self, args, conv=common.default_conv):
        super(CBSR, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0

        act = nn.ReLU(True)

        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)

        self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
        self.DWT = common.DWT()
        self.IWT = common.IWT()
        n = n_resblocks

        m_head = [conv(args.n_colors * 4 + 20, n_feats * 2, kernel_size + 2)]
        # d_l1 = [common.BBlock(conv, n_feats*4, n_feats*2, 3, act=act)]
        d_l1 = []
        for _ in range(n):
            #d_l1.append(common.BBlock(conv, n_feats*2, n_feats*2, 3, act=act))
            d_l1.append(common.ResBlock(conv, n_feats * 2, 3, act=act))
            d_l1.append(act)

        # dwt_l2 = [common.DWT]
        d_l2 = [common.BBlock(conv, n_feats * 8, n_feats * 4, 3, act=act)]
        for _ in range(n):
            d_l2.append(common.ResBlock(conv, n_feats * 4, 3, act=act))
            d_l2.append(act)
            #d_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))

        # pro_l3 = [common.DWT]
        pro_l3 = [common.BBlock(conv, n_feats * 16, n_feats * 8, 3, act=act)]
        for _ in range(n * 2):
            pro_l3.append(common.ResBlock(conv, n_feats * 8, 3, act=act))
            pro_l3.append(act)
        # pro_l3.append(common.BBlock(conv, n_feats * 8, n_feats * 16, 3, act=act))
        pro_l3.append(conv(n_feats * 8, n_feats * 16, 3))
        # pro_l3.append(common.IWT)

        i_l2 = []
        for _ in range(n):
            #i_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 4, 3, act=act))
            i_l2.append(common.ResBlock(conv, n_feats * 4, 3, act=act))
            i_l2.append(act)
        # i_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 8, 3, act=act))
        i_l2.append(conv(n_feats * 4, n_feats * 8, 3))
        # IWT = common.IWT
        # DWT = common.DWT

        i_l1 = []
        for _ in range(n):
            #i_l1.append(common.BBlock(conv, n_feats * 2, n_feats * 2, 3, act=act))
            i_l1.append(common.ResBlock(conv, n_feats * 2, 3, act=act))
            i_l1.append(act)

        m_tail = [conv(n_feats * 2, args.n_colors * 4, kernel_size + 4)]

        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)

        self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)

        self.head = nn.Sequential(*m_head)
        # self.d_l0 = nn.Sequential(*d_l0)
        self.d_l1 = nn.Sequential(*d_l1)
        self.d_l2 = nn.Sequential(*d_l2)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        # self.i_l0 = nn.Sequential(*i_l0)
        self.tail = nn.Sequential(*m_tail)
        self.act = nn.Sequential(act)
Example #15
0
    def __init__(self, args):
        super(MWPDO, self).__init__()
        n_resblocks = args.n_resblocks
        n_feats = args.n_feats
        kernel_size = 3
        self.scale_idx = 0
        nColor = args.n_colors

        act = nn.ReLU(True)

        self.DWT = common.DWT()
        self.IWT = common.IWT()

        n = 1
        m_head = [mwpdo_layers.BBlock(nColor, n_feats, p=8, act=act, bn=False)]
        d_l0 = []
        d_l0.append(
            mwpdo_layers.DBlock_com1(n_feats, n_feats, p=8, act=act, bn=False))

        d_l1 = [
            mwpdo_layers.BBlock1(n_feats * 4,
                                 n_feats * 2,
                                 p=8,
                                 act=act,
                                 bn=False)
        ]
        d_l1.append(
            mwpdo_layers.DBlock_com1(n_feats * 2,
                                     n_feats * 2,
                                     p=8,
                                     act=act,
                                     bn=False))

        d_l2 = []
        d_l2.append(
            mwpdo_layers.BBlock1(n_feats * 8,
                                 n_feats * 4,
                                 p=8,
                                 act=act,
                                 bn=False))
        d_l2.append(
            mwpdo_layers.DBlock_com1(n_feats * 4,
                                     n_feats * 4,
                                     p=8,
                                     act=act,
                                     bn=False))
        pro_l3 = []
        pro_l3.append(
            mwpdo_layers.BBlock1(n_feats * 16,
                                 n_feats * 8,
                                 p=8,
                                 act=act,
                                 bn=False))
        pro_l3.append(
            mwpdo_layers.DBlock_com(n_feats * 8,
                                    n_feats * 8,
                                    p=8,
                                    act=act,
                                    bn=False))
        pro_l3.append(
            mwpdo_layers.DBlock_inv(n_feats * 8,
                                    n_feats * 8,
                                    p=8,
                                    act=act,
                                    bn=False))
        pro_l3.append(
            mwpdo_layers.BBlock1(n_feats * 8,
                                 n_feats * 16,
                                 p=8,
                                 act=act,
                                 bn=False))

        i_l2 = [
            mwpdo_layers.DBlock_inv1(n_feats * 4,
                                     n_feats * 4,
                                     p=8,
                                     act=act,
                                     bn=False)
        ]
        i_l2.append(
            mwpdo_layers.BBlock1(n_feats * 4,
                                 n_feats * 8,
                                 p=8,
                                 act=act,
                                 bn=False))

        i_l1 = [
            mwpdo_layers.DBlock_inv1(n_feats * 2,
                                     n_feats * 2,
                                     p=8,
                                     act=act,
                                     bn=False)
        ]
        i_l1.append(
            mwpdo_layers.BBlock1(n_feats * 2,
                                 n_feats * 4,
                                 p=8,
                                 act=act,
                                 bn=False))

        i_l0 = [
            mwpdo_layers.DBlock_inv1(n_feats, n_feats, p=8, act=act, bn=False)
        ]

        #m_tail = [mwpdo_layers.g_conv2d(n_feats, nColor, p=8,partial=mwpdo_layers.partial_dict_0,tran=mwpdo_layers.tran_to_partial_coef_0)]
        m_tail = [common.default_conv(n_feats * 8, nColor, kernel_size)]

        self.head = nn.Sequential(*m_head)
        self.d_l2 = nn.Sequential(*d_l2)
        self.d_l1 = nn.Sequential(*d_l1)
        self.d_l0 = nn.Sequential(*d_l0)
        self.pro_l3 = nn.Sequential(*pro_l3)
        self.i_l2 = nn.Sequential(*i_l2)
        self.i_l1 = nn.Sequential(*i_l1)
        self.i_l0 = nn.Sequential(*i_l0)
        self.tail = nn.Sequential(*m_tail)
Example #16
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12),
                 num_init_features=4,
                 bn_size=4,
                 drop_rate=0):
        super(DenseWTUnet, self).__init__()

        self.DWT = common.DWT()  # 二维离散小波
        self.IWT = common.IWT()  # 逆向的二维离散小波

        # out: b, 4, 128, 128
        self.features = nn.Sequential(
            OrderedDict([("conv0",
                          nn.Conv2d(4,
                                    num_init_features,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm0", nn.BatchNorm2d(num_init_features)),
                         ("relu0", nn.ReLU(inplace=True))]))
        num_layers1 = block_config[0]
        num_layers2 = block_config[1]
        # print(num_layers2)
        # out: b, 196, 128, 128
        self.block1 = common._DenseBlock(num_layers1, num_init_features,
                                         bn_size, growth_rate, drop_rate)
        num_features1 = num_init_features + growth_rate * num_layers1  # 196
        # out: b, 1168, 64, 64
        self.block2 = common._DenseBlock(num_layers2, num_features1 * 4,
                                         bn_size, growth_rate, drop_rate)
        num_features2 = num_features1 * 4 + growth_rate * num_layers2
        # out: b, 256, 64, 64
        self.features1 = nn.Sequential(
            OrderedDict([("conv11",
                          nn.Conv2d(num_features2,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm11", nn.BatchNorm2d(256)),
                         ("relu11", nn.ReLU(inplace=True))]))
        self.block3 = common._DenseBlock(num_layers2, 256, bn_size,
                                         growth_rate, drop_rate)
        #num_features3 = 256 + growth_rate * num_layers2 # 640
        # out: b, 24, 128, 128
        self.features2 = nn.Sequential(
            OrderedDict([("conv22",
                          nn.Conv2d(160,
                                    24,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm22", nn.BatchNorm2d(24)),
                         ("relu22", nn.ReLU(inplace=True))]))
        # out: b, 256, 64, 64
        self.block4 = common._DenseBlock(num_layers1, num_features1 + 24,
                                         bn_size, growth_rate, drop_rate)
        num_features3 = 220 + growth_rate * num_layers1  # 412

        self.features3 = nn.Sequential(
            OrderedDict([("conv33",
                          nn.Conv2d(num_features3,
                                    4,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0,
                                    bias=False)),
                         ("norm33", nn.BatchNorm2d(4)),
                         ("relu33", nn.ReLU(inplace=True))]))

        # params initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
Example #17
0
def train(args):
    # torch.set_num_threads(4)
    # torch.manual_seed(args.seed)
    # checkpoint = utility.checkpoint(args)
    data_set = SingleLoader(noise_dir=args.noise_dir,
                            gt_dir=args.gt_dir,
                            image_size=args.image_size)
    data_loader = DataLoader(data_set,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)

    loss_basic = BasicLoss()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint_dir = args.checkpoint
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    model = MWRN_lv3().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               [5, 10, 15, 20, 25, 30], 0.5)
    optimizer.zero_grad()
    average_loss = MovingAverage(args.save_every)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    try:
        checkpoint = load_checkpoint(checkpoint_dir, device == 'cuda',
                                     'latest')
        start_epoch = checkpoint['epoch']
        global_step = checkpoint['global_iter']
        best_loss = checkpoint['best_loss']
        state_dict = checkpoint['state_dict']

        model.load_state_dict(state_dict)
        optimizer.load_state_dict(checkpoint['optimizer'])
        print('=> loaded checkpoint (epoch {}, global_step {})'.format(
            start_epoch, global_step))
    except:
        start_epoch = 0
        global_step = 0
        best_loss = np.inf
        print('=> no checkpoint file to be loaded.')
    DWT = common.DWT()
    param = [x for name, x in model.named_parameters()]
    clip_grad_D = 1e4
    grad_norm_D = 0
    for epoch in range(start_epoch, args.epoch):
        for step, (noise, gt) in enumerate(data_loader):
            noise = noise.to(device)
            gt = gt.to(device)
            x1 = DWT(gt).to(device)
            x2 = DWT(x1).to(device)
            x3 = DWT(x2).to(device)

            y1 = DWT(noise).to(device)
            y2 = DWT(y1).to(device)
            y3 = DWT(y2).to(device)
            lv3_out, img_lv3 = model(y3, None)
            scale_loss_lv3 = loss_basic(x3, img_lv3)
            loss = scale_loss_lv3
            optimizer.zero_grad()
            loss.backward()
            total_norm_D = nn.utils.clip_grad_norm_(param, clip_grad_D)
            grad_norm_D = (grad_norm_D * (step / (step + 1)) + total_norm_D /
                           (step + 1))
            optimizer.step()
            average_loss.update(loss)
            if global_step % args.save_every == 0:
                print("Save : epoch ", epoch,
                      " step : ", global_step, " with avg loss : ",
                      average_loss.get_value(), ",   best loss : ", best_loss)
                if average_loss.get_value() < best_loss:
                    is_best = True
                    best_loss = average_loss.get_value()
                else:
                    is_best = False
                save_dict = {
                    'epoch': epoch,
                    'global_iter': global_step,
                    'state_dict': model.state_dict(),
                    'best_loss': best_loss,
                    'optimizer': optimizer.state_dict(),
                }
                save_checkpoint(save_dict, is_best, checkpoint_dir,
                                global_step)
            if global_step % args.loss_every == 0:
                print(global_step, ": ", average_loss.get_value())
            global_step += 1
        clip_grad_D = min(clip_grad_D, grad_norm_D)
        scheduler.step()
        print("Epoch : ", epoch, "end at step: ", global_step)