def generate_actor_result(test_opt, src_img_path): imitator = Imitator(test_opt) src_img_name = os.path.split(src_img_path)[-1][:-4] test_opt.src_path = src_img_path if test_opt.post_tune: adaptive_personalize(test_opt, imitator, visualizer=None) else: imitator.personalize(test_opt.src_path, visualizer=None) action_list_dict = {'dance': MIXAMO_DANCE_ACTION_IDX_LIST, 'base': MIXAMO_BASE_ACTION_IDX_LIST, 'acrobat': MIXAMO_ACROBAT_ACTION_IDX_LIST} for action_type in ['dance', 'base', 'acrobat']: for i in action_list_dict[action_type]: if test_opt.output_dir: pred_output_dir = os.path.join(test_opt.output_dir, 'mixamo_preds') if os.path.exists(pred_output_dir): os.system("rm -r %s" % pred_output_dir) mkdir(pred_output_dir) else: pred_output_dir = None print(pred_output_dir) tgt_smpls = load_mixamo_smpl(i) imitator.inference_by_smpls(tgt_smpls, cam_strategy='smooth', output_dir=pred_output_dir, visualizer=None) save_dir = os.path.join(test_opt.output_dir, src_img_name, action_type) mkdir(save_dir) output_mp4_path = os.path.join(save_dir, 'mixamo_%.4d_%s.mp4' % (i, src_img_name)) img_path_list = sorted(glob.glob('%s/*.jpg' % pred_output_dir)) make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=30)
def generate_T_pose_novel_view_result(test_opt, src_img_path): imitator = Imitator(test_opt) src_img_name = os.path.split(src_img_path)[-1][:-4] test_opt.src_path = src_img_path if test_opt.post_tune: adaptive_personalize(test_opt, imitator, visualizer=None) else: imitator.personalize(test_opt.src_path, visualizer=None) if test_opt.output_dir: pred_output_dir = os.path.join(test_opt.output_dir, 'T_novel_view_preds') if os.path.exists(pred_output_dir): os.system("rm -r %s" % pred_output_dir) mkdir(pred_output_dir) else: pred_output_dir = None print(pred_output_dir) tgt_smpls = create_T_pose_novel_view_smpl() imitator.inference_by_smpls(tgt_smpls, cam_strategy='smooth', output_dir=pred_output_dir, visualizer=None) save_dir = os.path.join(test_opt.output_dir, src_img_name) mkdir(save_dir) output_mp4_path = os.path.join(save_dir, 'T_novel_view_%s.mp4' % src_img_name) img_path_list = sorted(glob.glob('%s/*.jpg' % pred_output_dir)) make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=30) # clean other left clean(test_opt.output_dir)
def generate_orig_pose_novel_view_result(opt, src_path): opt.src_path = src_path # set imitator viewer = Viewer(opt) if opt.ip: visualizer = VisdomVisualizer(env=opt.name, ip=opt.ip, port=opt.port) else: visualizer = None if opt.post_tune: adaptive_personalize(opt, viewer, visualizer) viewer.personalize(opt.src_path, visualizer=visualizer) print('\n\t\t\tPersonalization: completed...') view_params = opt.view_params params = parse_view_params(view_params) length = 180 delta = 360 / length logger = tqdm(range(length)) src_img_true_name = os.path.split(opt.src_path)[-1][:-4] save_dir = os.path.join(opt.output_dir, src_img_true_name) mkdir(os.path.join(save_dir, 'imgs')) print('\n\t\t\tSynthesizing {} novel views'.format(length)) for i in logger: params['R'][0] = 0 params['R'][1] = delta * i / 180.0 * np.pi params['R'][2] = 0 preds = viewer.view(params['R'], params['t'], visualizer=None, name=str(i)) # pred_outs.append(preds) save_img_name = '%s.%d.jpg' % (os.path.split(opt.src_path)[-1], delta * i) cv2.imwrite('%s/imgs/%s' % (save_dir, save_img_name), tensor2cv2(preds)) """ make video """ img_path_list = glob.glob("%s/imgs/*.jpg" % save_dir) output_mp4_path = '%s/%s.mp4' % (save_dir, src_img_true_name) make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=30) clean(opt.output_dir) clean(save_dir)
# restore weights g.saver.restore(g.sess, opt.weight_path) sample = opt.sample noise_seed = opt.noise_seed if conf.model == 'biggan': graph_inputs = graph_util.graph_input(g, sample+1, seed=noise_seed, category=opt.category, trunc=opt.truncation) filename = os.path.join(output_dir, '{}_seed{}_sample{}.mp4' .format(opt.category, noise_seed, sample)) else: graph_inputs = graph_util.graph_input(g, sample+1, seed=noise_seed) filename = os.path.join(output_dir, 'seed{}_sample{}.mp4' .format(noise_seed, sample)) if opt.filename: # override default filename filename = opt.filename # get the appropriate sample from the inputs for k,v in graph_inputs.items(): if isinstance(v, np.ndarray) and v.shape[0] == sample+1: graph_inputs[k] = v[sample][None] # create video video.make_video(g, graph_inputs, filename, num_frames=opt.num_frames, max_alpha=opt.max_alpha, min_alpha=opt.min_alpha, channel=opt.channel)
if test_opt.post_tune: adaptive_personalize(test_opt, imitator, visualizer) imitator.personalize(test_opt.src_path, visualizer=visualizer) print('\n\t\t\tPersonalization: completed...') if test_opt.save_res: pred_output_dir = mkdir(os.path.join(test_opt.output_dir, 'imitators')) pred_output_dir = clear_dir(pred_output_dir) else: pred_output_dir = None print('\n\t\t\tImitating `{}`'.format(test_opt.tgt_path)) tgt_paths = scan_tgt_paths(test_opt.tgt_path, itv=1) imitator.inference(tgt_paths, tgt_smpls=None, cam_strategy='smooth', output_dir=pred_output_dir, visualizer=visualizer, verbose=True) src_img_name = os.path.split(test_opt.src_path)[-1] if test_opt.save_res: output_mp4_path = os.path.join( test_opt.save_res, 'test_%.4d_%s.mp4' % ('1', src_img_name)) img_path_list = sorted(glob.glob('%s/*.jpg' % pred_output_dir)) make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=30)
visualizer = None # set imitator imitator = Imitator(test_opt) if test_opt.post_tune: adaptive_personalize(test_opt, imitator, visualizer) imitator.personalize(test_opt.src_path, visualizer=visualizer) print('\n\t\t\tPersonalization: completed...') if test_opt.save_res: pred_output_dir = mkdir(os.path.join(test_opt.output_dir, 'imitators')) pred_output_dir = clear_dir(pred_output_dir) else: pred_output_dir = None print('\n\t\t\tImitating `{}`'.format(test_opt.tgt_path)) tgt_paths = scan_tgt_paths(test_opt.tgt_path, itv=1) imitator.inference(tgt_paths, tgt_smpls=None, cam_strategy='smooth', output_dir=pred_output_dir, visualizer=visualizer, verbose=True) mp4_path = test_opt.output_dir + test_opt.output_dir.split( '/')[-3] + '_' + test_opt.output_dir.split('/')[-2] + '.mp4' imgs = sorted(glob.glob('%s/*.jpg' % pred_output_dir)) make_video(mp4_path, imgs, save_frames_dir=None, fps=30)