def main(): conf = Config().parse(create_dir_flag=False) conf.name = 'TEST_' + conf.name conf.output_dir_path = util.prepare_result_dir(conf) gan = InGAN(conf) try: gan.resume(conf.test_params_path, test_flag=True) [input_tensor] = util.read_data(conf) if conf.test_video: retarget_video(gan, input_tensor, define_video_scales(conf.test_vid_scales), 8, conf.output_dir_path) if conf.test_collage: generate_collage_and_outputs(conf, gan, input_tensor) if conf.test_non_rect: test_homo(conf, gan, input_tensor) print 'Done with %s' % conf.input_image_path except KeyboardInterrupt: raise except Exception as e: # print 'Something went wrong with %s (%d/%d), iter %dk' % (input_image_path, i, n_files, snapshot_iter) print_exc()
def generate_full_video(video_script, frame_shape): conf = Config().parse(create_dir_flag=False) conf.name = 'supp_vid' conf.output_dir_path = util.prepare_result_dir(conf) n_scenes = len(video_script) for i, (nameses, scene_script_names, scene_script_params) in enumerate(video_script): if not isinstance(nameses, list): nameses = [[nameses]] if not isinstance(scene_script_names, list): scene_script_names = [scene_script_names] scene_script_params = [scene_script_params] scenes = [] for names, scene_script_name, scene_script_param in zip( nameses, scene_script_names, scene_script_params): partial_screen_scenes = [] for name in names: conf.input_image_path = [ os.path.dirname(os.path.abspath(__file__)) + '/' + INPUT_DICT[name][0] ] conf.test_params_path = os.path.dirname( os.path.abspath(__file__)) + INPUT_DICT[name][1] gan = InGAN(conf) gan.G.load_state_dict( torch.load(conf.test_params_path, map_location='cuda:0')['G']) [input_tensor] = util.read_data(conf) cur_frame_shape = frame_shape[:] concat_axis = 2 if scene_script_name == 'resize_round' else 1 if len(names) > 1: cur_frame_shape[concat_axis - 1] /= 2 cur_scene_script_param = scene_script_param[:] if scene_script_param[1] is None: cur_scene_script_param[ 1] = cur_frame_shape[0] * 1.0 / input_tensor.shape[2] print 'max scale vertical:', cur_scene_script_param[1] if cur_scene_script_param[3] is None: cur_scene_script_param[ 3] = cur_frame_shape[1] * 1.0 / input_tensor.shape[3] print 'max scale horizontal:', cur_scene_script_param[3] scene_script = make_scene_script(scene_script_name, *cur_scene_script_param) center = (cur_scene_script_param[4] is not None) scene = generate_one_scene( gan, input_tensor, scene_script, np.array([cur_frame_shape[0], cur_frame_shape[1]]), center) partial_screen_scenes.append(scene) print 'Done with %s, (scene %d/%d)' % (name, i + 1, n_scenes) scene = np.concatenate( partial_screen_scenes, axis=concat_axis ) if len(partial_screen_scenes) > 1 else partial_screen_scenes[0] scenes.append(scene) scene = np.concatenate(scenes, axis=0) outputdict = { '-b:v': '30000000', '-r': '100.0', '-vf': 'drawtext="text=\'Input image\':fontcolor=red:fontsize=48:x=(w-text_w)/2:y=(h-text_h)*7/8:enable=\'between(t,0,2)\'"', '-preset': 'slow', '-profile:v': 'high444', '-level:v': '4.0', '-crf': '22' } if len(names) > 1: outputdict[ '-vf'] = 'drawtext="text=\'Input images\':fontcolor=red:fontsize=48:x=(w-text_w)/2:y=(h-text_h)/2.5:enable=\'between(t,0,2)\'"' if not scene_script_params[-1]: outputdict[ '-vf'] = 'drawtext="text=\'Input images\':fontcolor=red:fontsize=48:x=(w-text_w)/2:y=(h-text_h)/2.5:enable=\'between(t,0,0)\'"' writer = FFmpegWriter(conf.output_dir_path + '/vid%d_%s.mp4' % (i, '_'.join(names)), verbosity=1, outputdict=outputdict) for frame in scene: for j in range(3): writer.writeFrame(frame) writer.close()