def run_video(self, video): base_name = os.path.basename(video).split('.')[0] output_path = os.path.join(self.output, base_name) pred_frame_path = os.path.join(output_path, 'frames_pred') if not os.path.exists(output_path): os.makedirs(output_path) if not os.path.exists(pred_frame_path): os.makedirs(pred_frame_path) cap = cv2.VideoCapture(video) fps = cap.get(cv2.CAP_PROP_FPS) out_path = video2frames(video, output_path) frames = sorted(glob.glob(os.path.join(out_path, '*.png'))) for frame in tqdm(frames): pred_img = self.run_image(frame) frame_name = os.path.basename(frame) pred_img.save(os.path.join(pred_frame_path, frame_name)) frame_pattern_combined = os.path.join(pred_frame_path, '%08d.png') vid_out_path = os.path.join(output_path, '{}_realsr_out.mp4'.format(base_name)) frames2video(frame_pattern_combined, vid_out_path, str(int(fps))) return frame_pattern_combined, vid_out_path
def run(self, video_path): vid = video_path base_name = os.path.basename(vid).split('.')[0] output_path = os.path.join(self.output, base_name) pred_frame_path = os.path.join(output_path, 'frames_pred') if not os.path.exists(output_path): os.makedirs(output_path) if not os.path.exists(pred_frame_path): os.makedirs(pred_frame_path) cap = cv2.VideoCapture(vid) fps = cap.get(cv2.CAP_PROP_FPS) out_path = video2frames(vid, output_path) frames = sorted(glob.glob(os.path.join(out_path, '*.png'))) dataset = EDVRDataset(frames) periods = [] cur_time = time.time() for infer_iter, data in enumerate(tqdm(dataset)): data_feed_in = [data[0]] outs = self.base_forward(np.array(data_feed_in)) infer_result_list = [item for item in outs] frame_path = data[1] img_i = get_img(infer_result_list[0]) save_img( img_i, os.path.join(pred_frame_path, os.path.basename(frame_path))) prev_time = cur_time cur_time = time.time() period = cur_time - prev_time periods.append(period) # print('Processed {} samples'.format(infer_iter + 1)) frame_pattern_combined = os.path.join(pred_frame_path, '%08d.png') vid_out_path = os.path.join(self.output, '{}_edvr_out.mp4'.format(base_name)) frames2video(frame_pattern_combined, vid_out_path, str(int(fps))) return frame_pattern_combined, vid_out_path
def run(self, video_path): frame_path_input = os.path.join(self.output_path, 'frames-input') frame_path_interpolated = os.path.join(self.output_path, 'frames-interpolated') frame_path_combined = os.path.join(self.output_path, 'frames-combined') video_path_output = os.path.join(self.output_path, 'videos-output') if not os.path.exists(self.output_path): os.makedirs(self.output_path) if not os.path.exists(frame_path_input): os.makedirs(frame_path_input) if not os.path.exists(frame_path_interpolated): os.makedirs(frame_path_interpolated) if not os.path.exists(frame_path_combined): os.makedirs(frame_path_combined) if not os.path.exists(video_path_output): os.makedirs(video_path_output) timestep = self.time_step num_frames = int(1.0 / timestep) - 1 cap = cv2.VideoCapture(video_path) fps = cap.get(cv2.CAP_PROP_FPS) print("Old fps (frame rate): ", fps) times_interp = int(1.0 / timestep) r2 = str(int(fps) * times_interp) print("New fps (frame rate): ", r2) out_path = video2frames(video_path, frame_path_input) vidname = video_path.split('/')[-1].split('.')[0] frames = sorted(glob.glob(os.path.join(out_path, '*.png'))) orig_frames = len(frames) need_frames = orig_frames * times_interp if self.remove_duplicates: frames = self.remove_duplicate_frames(out_path) left_frames = len(frames) timestep = left_frames / need_frames num_frames = int(1.0 / timestep) - 1 img = imread(frames[0]) int_width = img.shape[1] int_height = img.shape[0] channel = img.shape[2] if not channel == 3: return if int_width != ((int_width >> 7) << 7): int_width_pad = ( ((int_width >> 7) + 1) << 7) # more than necessary padding_left = int((int_width_pad - int_width) / 2) padding_right = int_width_pad - int_width - padding_left else: int_width_pad = int_width padding_left = 32 padding_right = 32 if int_height != ((int_height >> 7) << 7): int_height_pad = ( ((int_height >> 7) + 1) << 7) # more than necessary padding_top = int((int_height_pad - int_height) / 2) padding_bottom = int_height_pad - int_height - padding_top else: int_height_pad = int_height padding_top = 32 padding_bottom = 32 frame_num = len(frames) if not os.path.exists(os.path.join(frame_path_interpolated, vidname)): os.makedirs(os.path.join(frame_path_interpolated, vidname)) if not os.path.exists(os.path.join(frame_path_combined, vidname)): os.makedirs(os.path.join(frame_path_combined, vidname)) for i in tqdm(range(frame_num - 1)): first = frames[i] second = frames[i + 1] img_first = imread(first) img_second = imread(second) '''--------------Frame change test------------------------''' #img_first_gray = np.dot(img_first[..., :3], [0.299, 0.587, 0.114]) #img_second_gray = np.dot(img_second[..., :3], [0.299, 0.587, 0.114]) #img_first_gray = img_first_gray.flatten(order='C') #img_second_gray = img_second_gray.flatten(order='C') #corr = np.corrcoef(img_first_gray, img_second_gray)[0, 1] #key_frame = False #if corr < self.key_frame_thread: # key_frame = True '''-------------------------------------------------------''' X0 = img_first.astype('float32').transpose((2, 0, 1)) / 255 X1 = img_second.astype('float32').transpose((2, 0, 1)) / 255 assert (X0.shape[1] == X1.shape[1]) assert (X0.shape[2] == X1.shape[2]) X0 = np.pad(X0, ((0,0), (padding_top, padding_bottom), \ (padding_left, padding_right)), mode='edge') X1 = np.pad(X1, ((0,0), (padding_top, padding_bottom), \ (padding_left, padding_right)), mode='edge') X0 = np.expand_dims(X0, axis=0) X1 = np.expand_dims(X1, axis=0) X0 = np.expand_dims(X0, axis=0) X1 = np.expand_dims(X1, axis=0) X = np.concatenate((X0, X1), axis=0) o = self.base_forward(X) y_ = o[0] y_ = [ np.transpose( 255.0 * item.clip( 0, 1.0)[0, :, padding_top:padding_top + int_height, padding_left:padding_left + int_width], (1, 2, 0)) for item in y_ ] time_offsets = [ kk * timestep for kk in range(1, 1 + num_frames, 1) ] count = 1 for item, time_offset in zip(y_, time_offsets): out_dir = os.path.join(frame_path_interpolated, vidname, "{:0>6d}_{:0>4d}.png".format(i, count)) count = count + 1 imsave(out_dir, np.round(item).astype(np.uint8)) num_frames = int(1.0 / timestep) - 1 input_dir = os.path.join(frame_path_input, vidname) interpolated_dir = os.path.join(frame_path_interpolated, vidname) combined_dir = os.path.join(frame_path_combined, vidname) self.combine_frames(input_dir, interpolated_dir, combined_dir, num_frames) frame_pattern_combined = os.path.join(frame_path_combined, vidname, '%08d.png') video_pattern_output = os.path.join(video_path_output, vidname + '.mp4') if os.path.exists(video_pattern_output): os.remove(video_pattern_output) frames2video(frame_pattern_combined, video_pattern_output, r2) return frame_pattern_combined, video_pattern_output
def run(self): frame_path_input = os.path.join(self.output_path, 'frames-input') frame_path_interpolated = os.path.join(self.output_path, 'frames-interpolated') frame_path_combined = os.path.join(self.output_path, 'frames-combined') video_path_output = os.path.join(self.output_path, 'videos-output') if not os.path.exists(self.output_path): os.makedirs(self.output_path) if not os.path.exists(frame_path_input): os.makedirs(frame_path_input) if not os.path.exists(frame_path_interpolated): os.makedirs(frame_path_interpolated) if not os.path.exists(frame_path_combined): os.makedirs(frame_path_combined) if not os.path.exists(video_path_output): os.makedirs(video_path_output) timestep = self.time_step num_frames = int(1.0 / timestep) - 1 if self.video_path.endswith('.mp4'): videos = [self.video_path] else: videos = sorted(glob.glob(os.path.join(self.video_path, '*.mp4'))) for cnt, vid in enumerate(videos): print("Interpolating video:", vid) cap = cv2.VideoCapture(vid) fps = cap.get(cv2.CAP_PROP_FPS) print("Old fps (frame rate): ", fps) times_interp = int(1.0 / timestep) r2 = str(int(fps) * times_interp) print("New fps (frame rate): ", r2) out_path = video2frames(vid, frame_path_input) vidname = vid.split('/')[-1].split('.')[0] tot_timer = AverageMeter() proc_timer = AverageMeter() end = time.time() frames = sorted(glob.glob(os.path.join(out_path, '*.png'))) img = imread(frames[0]) int_width = img.shape[1] int_height = img.shape[0] channel = img.shape[2] if not channel == 3: continue if int_width != ((int_width >> 7) << 7): int_width_pad = ( ((int_width >> 7) + 1) << 7) # more than necessary padding_left = int((int_width_pad - int_width) / 2) padding_right = int_width_pad - int_width - padding_left else: int_width_pad = int_width padding_left = 32 padding_right = 32 if int_height != ((int_height >> 7) << 7): int_height_pad = ( ((int_height >> 7) + 1) << 7) # more than necessary padding_top = int((int_height_pad - int_height) / 2) padding_bottom = int_height_pad - int_height - padding_top else: int_height_pad = int_height padding_top = 32 padding_bottom = 32 frame_num = len(frames) print('processing {} frames, from video: {}'.format(frame_num, vid)) if not os.path.exists(os.path.join(frame_path_interpolated, vidname)): os.makedirs(os.path.join(frame_path_interpolated, vidname)) if not os.path.exists(os.path.join(frame_path_combined, vidname)): os.makedirs(os.path.join(frame_path_combined, vidname)) for i in tqdm(range(frame_num - 1)): first = frames[i] second = frames[i + 1] img_first = imread(first) img_second = imread(second) '''--------------Frame change test------------------------''' img_first_gray = np.dot(img_first[..., :3], [0.299, 0.587, 0.114]) img_second_gray = np.dot(img_second[..., :3], [0.299, 0.587, 0.114]) img_first_gray = img_first_gray.flatten(order='C') img_second_gray = img_second_gray.flatten(order='C') corr = np.corrcoef(img_first_gray, img_second_gray)[0, 1] key_frame = False if corr < self.key_frame_thread: key_frame = True '''-------------------------------------------------------''' X0 = img_first.astype('float32').transpose((2, 0, 1)) / 255 X1 = img_second.astype('float32').transpose((2, 0, 1)) / 255 if key_frame: y_ = [ np.transpose(255.0 * X0.clip(0, 1.0), (1, 2, 0)) for i in range(num_frames) ] else: assert (X0.shape[1] == X1.shape[1]) assert (X0.shape[2] == X1.shape[2]) X0 = np.pad(X0, ((0,0), (padding_top, padding_bottom), \ (padding_left, padding_right)), mode='edge') X1 = np.pad(X1, ((0,0), (padding_top, padding_bottom), \ (padding_left, padding_right)), mode='edge') X0 = np.expand_dims(X0, axis=0) X1 = np.expand_dims(X1, axis=0) X0 = np.expand_dims(X0, axis=0) X1 = np.expand_dims(X1, axis=0) X = np.concatenate((X0, X1), axis=0) proc_end = time.time() o = self.exe.run(self.program, fetch_list=self.fetch_targets, feed={"image": X}) y_ = o[0] proc_timer.update(time.time() - proc_end) tot_timer.update(time.time() - end) end = time.time() y_ = [ np.transpose( 255.0 * item.clip( 0, 1.0)[0, :, padding_top:padding_top + int_height, padding_left:padding_left + int_width], (1, 2, 0)) for item in y_ ] time_offsets = [ kk * timestep for kk in range(1, 1 + num_frames, 1) ] count = 1 for item, time_offset in zip(y_, time_offsets): out_dir = os.path.join( frame_path_interpolated, vidname, "{:0>6d}_{:0>4d}.png".format(i, count)) count = count + 1 imsave(out_dir, np.round(item).astype(np.uint8)) num_frames = int(1.0 / timestep) - 1 input_dir = os.path.join(frame_path_input, vidname) interpolated_dir = os.path.join(frame_path_interpolated, vidname) combined_dir = os.path.join(frame_path_combined, vidname) combine_frames(input_dir, interpolated_dir, combined_dir, num_frames) frame_pattern_combined = os.path.join(frame_path_combined, vidname, '%08d.png') video_pattern_output = os.path.join(video_path_output, vidname + '.mp4') if os.path.exists(video_pattern_output): os.remove(video_pattern_output) frames2video(frame_pattern_combined, video_pattern_output, r2) return frame_pattern_combined, video_pattern_output