Beispiel #1
0
    def test_video_truth(self, path, name='result', reuse=False, part=50):
        save_path=join(path,name)
        automkdir(save_path)
        inp_path=join(path,'truth')
        imgs=sorted(glob.glob(join(inp_path,'*.png')))
        max_frame=len(imgs)
        imgs=np.array([cv2_imread(i) for i in imgs])/255.

        if part>max_frame:
            part=max_frame
        if max_frame%part ==0 :
            num_once=max_frame//part
        else:
            num_once=max_frame//part+1
        
        h,w,c=imgs[0].shape

        L_test = tf.placeholder(tf.float32, shape=[num_once, self.num_frames, h//self.scale, w//self.scale, 3], name='L_test')
        SR_test=self.forward(L_test)
        if not reuse:
            self.img_hr=tf.placeholder(tf.float32, shape=[None, None, None, 3], name='H_truth')
            self.img_lr=DownSample_4D(self.img_hr, BLUR, scale=self.scale)
            config = tf.ConfigProto() 
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config) 
            #sess=tf.Session()
            self.sess=sess
            sess.run(tf.global_variables_initializer())
            self.saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=1)
            self.load(sess, self.save_dir)
        
        lrs=self.sess.run(self.img_lr,feed_dict={self.img_hr:imgs})

        lr_list=[]
        max_frame=lrs.shape[0]
        for i in range(max_frame):
            index=np.array([i for i in range(i-self.num_frames//2,i+self.num_frames//2+1)])
            index=np.clip(index,0,max_frame-1).tolist()
            lr_list.append(np.array([lrs[j] for j in index]))
        lr_list=np.array(lr_list)
        
        print('Save at {}'.format(save_path))
        print('{} Inputs With Shape {}'.format(lrs.shape[0],lrs.shape[1:]))
        h,w,c=lrs.shape[1:]
        
        
        all_time=[]
        for i in trange(part):
            st_time=time.time()
            sr=self.sess.run(SR_test,feed_dict={L_test : lr_list[i*num_once:(i+1)*num_once]})
            all_time.append(time.time()-st_time)
            for j in range(sr.shape[0]):
                img=sr[j][0]*255.
                img=np.clip(img,0,255)
                img=np.round(img,0).astype(np.uint8)
                cv2_imsave(join(save_path, '{:0>4}.png'.format(i*num_once+j)),img)
        all_time=np.array(all_time)
        if max_frame>0:
            all_time=np.array(all_time)
            print('spent {} s in total and {} s in average'.format(np.sum(all_time),np.mean(all_time[1:])))
Beispiel #2
0
    def test_video_lr(self, path, name='result', reuse=False, part=8):
        save_path = join(path, name)
        automkdir(save_path)

        inp_path = join(path, 'blur{}'.format(self.scale))
        imgs = sorted(glob.glob(join(inp_path, '*.png')))
        imgs = np.array([cv2_imread(i) / 255. for i in imgs])

        lr_list = []
        max_frame = imgs.shape[0]
        for i in range(max_frame):
            index = np.array([
                i for i in range(i - self.num_frames // 2, i +
                                 self.num_frames // 2 + 1)
            ])
            index = np.clip(index, 0, max_frame - 1).tolist()
            lr_list.append(np.array([imgs[j] for j in index]))
        lr_list = np.array(lr_list)

        if not reuse:
            self.build()
            sess = tf.Session()
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            self.saver = tf.train.Saver(max_to_keep=100,
                                        keep_checkpoint_every_n_hours=1)
            self.load(sess, self.save_dir)

        print('Save at {}'.format(save_path))
        print('{} Inputs With Shape {}'.format(lr_list.shape[0],
                                               lr_list.shape[1:]))

        part = min(part, max_frame)
        if max_frame % part == 0:
            num_once = max_frame // part
        else:
            num_once = max_frame // part + 1

        all_time = 0
        for i in trange(part):
            st_time = time.time()
            sr = self.sess.run(self.SR,
                               feed_dict={
                                   self.L:
                                   lr_list[i * num_once:(i + 1) * num_once],
                                   self.is_train: False
                               })
            onece_time = time.time() - st_time
            if i > 0:
                all_time += onece_time
            for j in range(sr.shape[0]):
                img = sr[j][0] * 255.
                img = np.clip(img, 0, 255).astype(np.uint8)
                imgname = '{:0>4}.png'.format(i * num_once + j)
                cv2_imsave(join(save_path, imgname), img)
        print('spent {} s in total and {} s in average'.format(
            all_time, all_time / (max_frame - 1)))
Beispiel #3
0
    def test_video_lr(self, path, output_path, exp_name='PFNL_result'):
        num_once = 1
        _, video_name = os.path.split(path)
        save_path = join(output_path, exp_name, video_name)
        automkdir(save_path)
        imgs = sorted(glob.glob(join(path, '*.png')))
        max_frame = len(imgs)
        lrs = np.array([cv2_imread(i) for i in imgs]) / 255.
        lrs = lrs.astype('float32')
        h, w, _ = lrs[0].shape
        lr_list = []

        for i in range(max_frame):
            index = np.array([
                i for i in range(i - self.num_frames // 2, i +
                                 self.num_frames // 2 + 1)
            ])
            index = np.clip(index, 0, max_frame - 1).tolist()
            lr_list.append(np.array([lrs[j] for j in index]))
        del lrs
        lr_list = np.array(lr_list)

        # L_test = tf.placeholder(tf.float32, shape=[1, self.num_frames, h, w, 3], name='L_test')
        # SR_test=self.forward(L_test)

        print('Save at {}'.format(save_path))
        print('{} Inputs With Shape {}'.format(max_frame, [h, w]))

        all_time = []
        for i in trange(max_frame):
            st_time = time.time()
            sr = self.sess.run(self.SR_test,
                               feed_dict={
                                   self.L_test:
                                   lr_list[i * num_once:(i + 1) * num_once]
                               })
            all_time.append(time.time() - st_time)
            for j in range(sr.shape[0]):
                img = sr[j] * 255.
                img = np.clip(img, 0, 255)
                img = np.round(img, 0).astype(np.uint8)
                cv2_imsave(
                    join(save_path, '{:0>4}.jpg'.format(i * num_once + j)),
                    img, 100)

        all_time = np.array(all_time)
        if max_frame > 0:
            all_time = np.array(all_time)
            print('spent {} s in total and {} s in average'.format(
                np.sum(all_time), np.mean(all_time[1:])))

        del imgs
        # del L_test
        # del SR_test
        # del lrs
        del lr_list
Beispiel #4
0
    def test_video(self, path, name='result', reuse=False):
        save_path = join(path, name)
        automkdir(save_path)

        inp_path = join(path, 'blur{}'.format(self.scale))
        imgs = sorted(glob.glob(join(inp_path, '*.png')))
        imgs = np.array([cv2_imread(i) / 255. for i in imgs])
        n, h, w, c = imgs.shape
        max_frame = n

        self.L = tf.placeholder(tf.float32, shape=[1, h, w, 3], name='L_input')
        self.LP = tf.placeholder(tf.float32,
                                 shape=[1, h, w, 3],
                                 name='Previous_L_input')
        self.est = tf.placeholder(tf.float32,
                                  shape=[1, h * self.scale, w * self.scale, 3],
                                  name='est')
        self.sr0 = self.forward(self.L)
        self.sr1 = self.forward(self.L, self.LP, self.est)
        if not reuse:
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            #sess=tf.Session()
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            self.saver = tf.train.Saver(max_to_keep=100,
                                        keep_checkpoint_every_n_hours=1)
            self.load(sess, self.save_dir)

        print('Save at {}'.format(save_path))
        print('{} Inputs With Shape {}'.format(imgs.shape[0], imgs.shape[1:]))

        all_time = []
        for i in trange(max_frame):
            st_time = time.time()
            if i == 0:
                SR = self.sess.run(self.sr0, feed_dict={self.L: imgs[i:i + 1]})
            else:
                SR = self.sess.run(self.sr1,
                                   feed_dict={
                                       self.L: imgs[i:i + 1],
                                       self.LP: imgs[i - 1:i],
                                       self.est: SR
                                   })
            all_time.append(time.time() - st_time)
            img = SR[0] * 255.
            img = np.clip(img, 0, 255).astype(np.uint8)
            cv2_imsave(join(save_path, '{:0>4}.png'.format(i)), img)
        if max_frame > 0:
            all_time = np.array(all_time)
            print('spent {} s in total and {} s in average'.format(
                np.sum(all_time), np.mean(all_time[1:])))
Beispiel #5
0
    def testvideo(self,
                  dataPath=None,
                  savename='result',
                  reuse=False,
                  scale=4,
                  num_frames=3):
        scale = self.scale
        num_frames = self.num_frames
        inList = sorted(
            glob.glob(os.path.join(dataPath, 'blur{}/*.png').format(scale)))
        inp = [cv2_imread(i).astype(np.float32) / 255.0 for i in inList]
        max_frame = len(inList)

        print('Testing path: {}'.format(dataPath))
        print('# of testing frames: {}'.format(len(inList)))

        savepath = os.path.join(dataPath, savename)
        automkdir(savepath)

        all_time = []
        for idx0 in trange(len(inList)):
            T = num_frames // 2

            imgs = [inp[0] for i in np.arange(idx0 - T, 0)]
            imgs.extend([inp[i] for i in np.arange(max(0, idx0 - T), idx0)])
            imgs.extend([
                inp[i] for i in np.arange(idx0, min(len(inList), idx0 + T + 1))
            ])
            imgs.extend(
                [inp[-1] for i in np.arange(idx0 + T,
                                            len(inList) - 1, -1)])

            dims = imgs[0].shape
            if len(dims) == 2:
                imgs = [np.expand_dims(i, -1) for i in imgs]
            h, w, c = imgs[0].shape
            out_h = h * scale
            out_w = w * scale
            padh = int(ceil(h / 4.0) * 4.0 - h)
            padw = int(ceil(w / 4.0) * 4.0 - w)
            imgs = [
                np.pad(i, [[0, padh], [0, padw], [0, 0]], 'edge') for i in imgs
            ]
            imgs = np.expand_dims(np.stack(imgs, axis=0), 0)

            if idx0 == 0:
                # frames_lr = tf.convert_to_tensor(imgs, tf.float32)
                frames_lr = tf.placeholder(dtype=tf.float32, shape=imgs.shape)
                frames_ref_ycbcr = rgb2ycbcr(frames_lr[:, T:T + 1, :, :, :])
                # frames_ref_ycbcr = tf.tile(frames_ref_ycbcr, [1, num_frames, 1, 1, 1])
                output = self.forward(frames_lr,
                                      is_training=False,
                                      reuse=reuse)
                # print (frames_lr_ycbcr.get_shape(), h, w, padh, padw)
                if len(dims) == 3:
                    output_rgb = ycbcr2rgb(
                        tf.concat([
                            output,
                            resize_images(
                                frames_ref_ycbcr, [(h + padh) * scale,
                                                   (w + padw) * scale],
                                method=2)[:, :, :, :, 1:3]
                        ], -1))
                else:
                    output_rgb = output
                output = output[:, :, :out_h, :out_w, :]
                output_rgb = output_rgb[:, :, :out_h, :out_w, :]

            if reuse == False:
                config = tf.ConfigProto()
                config.gpu_options.allow_growth = True
                sess = tf.Session(config=config)
                #sess=tf.Session()
                self.sess = sess
                self.saver = tf.train.Saver(max_to_keep=50,
                                            keep_checkpoint_every_n_hours=1)
                self.load(self.sess, self.save_dir)
                reuse = True

            st_time = time.time()
            [imgs_hr, imgs_hr_rgb,
             uv] = self.sess.run([output, output_rgb, self.uv],
                                 feed_dict={frames_lr: imgs})
            all_time.append(time.time() - st_time)

            if len(dims) == 3:
                cv2_imsave(os.path.join(savepath, 'rgb_%03d.png' % (idx0)),
                           im2uint8(imgs_hr_rgb[0, -1, :, :, :]))

        print('SR results path: {}'.format(savepath))
        if max_frame > 0:
            all_time = np.array(all_time)
            print('spent {} s in total and {} s in average'.format(
                np.sum(all_time), np.mean(all_time[1:])))
Beispiel #6
0
    def testvideo(self,
                  dataPath=None,
                  savename='result',
                  reuse=False,
                  scale_factor=4,
                  num_frames=3):
        inList = sorted(
            glob.glob(
                os.path.join(dataPath, 'blur{}/*.png').format(scale_factor)))
        inp = [cv2_imread(i).astype(np.float32) / 255.0 for i in inList]

        print('Testing path: {}'.format(dataPath))
        print('# of testing frames: {}'.format(len(inList)))
        max_frame = len(inList)

        savepath = os.path.join(dataPath, savename)
        if not os.path.exists(savepath):
            os.mkdir(savepath)

        self.scale_factor = scale_factor

        all_time = []
        for idx0 in trange(len(inList)):
            T = num_frames // 2

            imgs = [inp[0] for i in np.arange(idx0 - T, 0)]
            imgs.extend([inp[i] for i in np.arange(max(0, idx0 - T), idx0)])
            imgs.extend([
                inp[i] for i in np.arange(idx0, min(len(inList), idx0 + T + 1))
            ])
            imgs.extend(
                [inp[-1] for i in np.arange(idx0 + T,
                                            len(inList) - 1, -1)])

            dims = imgs[0].shape
            if len(dims) == 2:
                imgs = [np.expand_dims(i, -1) for i in imgs]
            h, w, c = imgs[0].shape
            out_h = h * scale_factor
            out_w = w * scale_factor
            padh = int(ceil(h / 4.0) * 4.0 - h)
            padw = int(ceil(w / 4.0) * 4.0 - w)
            imgs = [
                np.pad(i, [[0, padh], [0, padw], [0, 0]], 'edge') for i in imgs
            ]
            imgs = np.expand_dims(np.stack(imgs, axis=0), 0)

            if idx0 == 0:
                # frames_lr = tf.convert_to_tensor(imgs, tf.float32)
                frames_lr = tf.placeholder(dtype=tf.float32, shape=imgs.shape)
                frames_ref_ycbcr = rgb2ycbcr(frames_lr[:, T:T + 1, :, :, :])
                frames_ref_ycbcr = tf.tile(frames_ref_ycbcr,
                                           [1, num_frames, 1, 1, 1])
                output, frame_i_fw = self.forward(frames_lr,
                                                  is_training=False,
                                                  reuse=reuse)
                # print (frames_lr_ycbcr.get_shape(), h, w, padh, padw)
                if len(dims) == 3:
                    output_rgb = ycbcr2rgb(
                        tf.concat([
                            output,
                            resize_images(
                                frames_ref_ycbcr, [(h + padh) * scale_factor,
                                                   (w + padw) * scale_factor],
                                method=2)[:, :, :, :, 1:3]
                        ], -1))
                else:
                    output_rgb = output
                output = output[:, :, :out_h, :out_w, :]
                output_rgb = output_rgb[:, :, :out_h, :out_w, :]

            if reuse == False:
                config = tf.ConfigProto()
                config.gpu_options.allow_growth = True
                sess = tf.Session(config=config)
                #sess=tf.Session()
                self.sess = sess
                # summary_op = tf.summary.merge_all()
                # summary_writer = tf.summary.FileWriter('E:/PyProject/out0', sess.graph, flush_secs=30)
                self.saver = tf.train.Saver(max_to_keep=50,
                                            keep_checkpoint_every_n_hours=1)
                self.load(self.sess, self.save_dir)
                #self.flownets.load_easyflow(self.sess, os.path.join('./easyflow_log/model1', 'checkpoints'))
                reuse = True
            # frames_ref_warp = imwarp_backward(self.uv, tf.tile(tf.expand_dims(self.frame_ref_y, 1), [1, num_frames, 1, 1, 1]),
            #                                   [200, 244])
            # warp_err = tf.reduce_mean((self.frames_y - frames_ref_warp) ** 2, axis=4)
            case_path = dataPath.split('/')[-1]
            # print('Testing - ', case_path, len(imgs))
            st_time = time.time()
            [imgs_hr, imgs_hr_rgb, uv, frame_fw
             ] = self.sess.run([output, output_rgb, self.uv, frame_i_fw],
                               feed_dict={frames_lr: imgs})
            all_time.append(time.time() - st_time)
            # print('spent {} s'.format(ed_time-st_time))
            '''cv2_imsave(os.path.join(savepath, 'y_%03d.png' % (idx0)),
                              im2uint8(imgs_hr[0, -1, :, :, 0]))'''
            '''for q in range(0, 3):
                cv2_imsave(os.path.join(savepath, 'y_ff%03d_%01d.png' % (idx0, q)),
                                  im2uint8(frame_fw[q, :, :, 0]))'''
            if len(dims) == 3:
                cv2_imsave(os.path.join(savepath, '%04d.png' % (idx0)),
                           im2uint8(imgs_hr_rgb[0, -1, :, :, :]))

                # summary_str = self.sess.run(summary_op)
                # summary_writer.add_summary(summary_str, idx0)
        print('SR results path: {}'.format(savepath))
        if max_frame > 0:
            all_time = np.array(all_time)
            print('spent {} s in total and {} s in average'.format(
                np.sum(all_time), np.mean(all_time[1:])))
Beispiel #7
0
    def test_video_lr(self, path, output_path, name='PFNL_result', reuse=False, part=10000):
        _, video_name = os.path.split(path)
        save_path=join(output_path, video_name, name)
        automkdir(save_path)
        # inp_path=join(path,'blur{}'.format(self.scale))
        imgs=sorted(glob.glob(join(path,'*.png')))
        # names = os.listdir(path)
        # imgs = []
        # for name_ in names:
        #     if name_[-3]
        #     temp_path = os.path.join(path, name_)
        #     imgs.append(temp_path)
        max_frame=len(imgs)
        # lrs=np.array([cv2_imread(i) for i in imgs])/255.
        lrs = []
        for i in imgs:
            img = cv2_imread(i)
            img = cv2.resize(img, (int(img.shape[1]*0.5), int(img.shape[0]*0.5)))
            img = img / 255
            lrs.append(img)
        lrs = np.array(lrs)
        
        if part>max_frame:
            part=max_frame
        if max_frame%part ==0 :
            num_once=max_frame//part
        else:
            num_once=max_frame//part+1
        
        h,w,c=lrs[0].shape

        L_test = tf.placeholder(tf.float32, shape=[num_once, self.num_frames, h, w, 3], name='L_test')
        SR_test=self.forward(L_test)
        if not reuse:
            config = tf.ConfigProto() 
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config) 
            #sess=tf.Session()
            self.sess=sess
            sess.run(tf.global_variables_initializer())
            self.saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=1)
            self.load(sess, self.save_dir)
        

        lr_list=[]
        max_frame=lrs.shape[0]
        for i in range(max_frame):
            index=np.array([i for i in range(i-self.num_frames//2,i+self.num_frames//2+1)])
            index=np.clip(index,0,max_frame-1).tolist()
            lr_list.append(np.array([lrs[j] for j in index]))
        lr_list=np.array(lr_list)
        
        print('Save at {}'.format(save_path))
        print('{} Inputs With Shape {}'.format(lrs.shape[0],lrs.shape[1:]))
        h,w,c=lrs.shape[1:]
        
        all_time=[]
        for i in trange(part):
            st_time=time.time()
            sr=self.sess.run(SR_test,feed_dict={L_test : lr_list[i*num_once:(i+1)*num_once]})
            all_time.append(time.time()-st_time)
            for j in range(sr.shape[0]):
                img=sr[j][0]*255.
                img=np.clip(img,0,255)
                img=np.round(img,0).astype(np.uint8)
                cv2_imsave(join(save_path, '{:0>4}.png'.format(i*num_once+j)),img)

        all_time=np.array(all_time)
        if max_frame>0:
            all_time=np.array(all_time)
            print('spent {} s in total and {} s in average'.format(np.sum(all_time),np.mean(all_time[1:])))
Beispiel #8
0
    def makeup(self,
               path,
               output_path,
               exp_name='PFNL_result',
               method='replicate'):
        print(' [**] Using makeup method: ' + method)
        _, video_name = os.path.split(path)
        save_path = join(output_path, exp_name, method, video_name)
        automkdir(save_path)
        imgs = sorted(glob.glob(join(path, '*.png')))
        max_frame = len(imgs)
        lrs = np.array([cv2_imread(i) for i in imgs]) / 255.
        lrs = lrs.astype('float32')
        h, w, _ = lrs[0].shape
        lr_list = []

        deal_index_1 = np.linspace(0,
                                   self.num_frames // 2 - 1,
                                   self.num_frames // 2,
                                   dtype=np.int)
        deal_index_2 = np.linspace(max_frame - self.num_frames // 2,
                                   max_frame - 1,
                                   self.num_frames // 2,
                                   dtype=np.int)
        deal_index = np.concatenate((deal_index_1, deal_index_2), axis=0)
        # max_frame = len(deal_index)
        for i in deal_index:
            index = np.array([
                i for i in range(i - self.num_frames // 2, i +
                                 self.num_frames // 2 + 1)
            ])
            if method == 'replicate':
                # index=np.array([i for i in range(i-self.num_frames//2,i+self.num_frames//2+1)])
                index = np.clip(index, 0, max_frame - 1)
            elif method == 'reflection':
                # index=np.array([i for i in range(i-self.num_frames//2,i+self.num_frames//2+1)])
                for ii in range(len(index)):
                    index[ii] = index[ii] if index[ii] >= 0 else index[ii] * -1
                    index[ii] = index[ii] if index[ii] < max_frame else 2 * (
                        max_frame - 1) - index[ii]
            elif method == 'new_info':
                # index=np.array([i for i in range(i-self.num_frames//2,i+self.num_frames//2+1)])
                for ii in range(len(index)):
                    index[ii] = index[
                        ii] if index[ii] >= 0 else index[ii] * -1 + index[-1]
                    index[ii] = index[ii] if index[ii] < max_frame else index[
                        0] - (index[ii] - max_frame + 1)
            elif method == 'circle':
                for ii in range(len(index)):
                    index[ii] = index[
                        ii] if index[ii] >= 0 else self.num_frames + index[ii]
                    index[ii] = index[ii] if index[ii] < max_frame else index[
                        ii] - i + index[0] - self.num_frames // 2 - 1

            index = index.tolist()
            lr_list.append(np.array([lrs[j] for j in index]))

        del lrs
        lr_list = np.array(lr_list)

        L_test = tf.placeholder(tf.float32,
                                shape=[1, self.num_frames, h, w, 3],
                                name='L_test')
        SR_test = self.forward(L_test)

        print('Save at {}'.format(save_path))
        print('{} Inputs With Shape {}'.format(max_frame, [h, w]))

        all_time = []

        for i in trange(len(deal_index)):
            # i = deal_index[cc]
            st_time = time.time()
            sr = self.sess.run(SR_test, feed_dict={L_test: lr_list[i:(i + 1)]})
            all_time.append(time.time() - st_time)
            for j in range(sr.shape[0]):
                img = sr[j][0] * 255.
                img = np.clip(img, 0, 255)
                img = np.round(img, 0).astype(np.uint8)
                cv2_imsave(join(save_path, '{:0>4}.png'.format(deal_index[i])),
                           img, 100)

        all_time = np.array(all_time)
        if max_frame > 0:
            all_time = np.array(all_time)
            print('spent {} s in total and {} s in average'.format(
                np.sum(all_time), np.mean(all_time[1:])))

        del imgs
        del L_test
        del SR_test
        # del lrs
        del lr_list