Beispiel #1
0
    def __init__(self, opt, output_dir):
        super().__init__(output_dir)

        self.opt = opt

        if self.opt.ip:
            visualizer = VisdomVisualizer(env=self.opt.name,
                                          ip=self.opt.ip,
                                          port=self.opt.port)
        else:
            visualizer = None

        self.visualizer = visualizer
        self.model = None
def generate_orig_pose_novel_view_result(opt, src_path):
    opt.src_path = src_path
    # set imitator
    viewer = Viewer(opt)

    if opt.ip:
        visualizer = VisdomVisualizer(env=opt.name, ip=opt.ip, port=opt.port)
    else:
        visualizer = None

    if opt.post_tune:
        adaptive_personalize(opt, viewer, visualizer)

    viewer.personalize(opt.src_path, visualizer=visualizer)
    print('\n\t\t\tPersonalization: completed...')

    view_params = opt.view_params
    params = parse_view_params(view_params)

    length = 180
    delta = 360 / length
    logger = tqdm(range(length))

    src_img_true_name = os.path.split(opt.src_path)[-1][:-4]
    save_dir = os.path.join(opt.output_dir, src_img_true_name)
    mkdir(os.path.join(save_dir, 'imgs'))

    print('\n\t\t\tSynthesizing {} novel views'.format(length))
    for i in logger:
        params['R'][0] = 0
        params['R'][1] = delta * i / 180.0 * np.pi
        params['R'][2] = 0

        preds = viewer.view(params['R'], params['t'], visualizer=None, name=str(i))
        # pred_outs.append(preds)

        save_img_name = '%s.%d.jpg' % (os.path.split(opt.src_path)[-1], delta * i)

        cv2.imwrite('%s/imgs/%s' % (save_dir, save_img_name), tensor2cv2(preds))

    """
    make video
    """
    img_path_list = glob.glob("%s/imgs/*.jpg" % save_dir)
    output_mp4_path = '%s/%s.mp4' % (save_dir, src_img_true_name)
    make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=30)

    clean(opt.output_dir)
    clean(save_dir)
Beispiel #3
0
    preds = preds.cpu().numpy()

    filepath = os.path.join(output_dir,
                            '{}->{}.png'.format(src_name, tgt_name))
    cv_utils.save_cv2_img(preds, filepath, normalize=True)
    print('\n\t\t\tSaving results to {}'.format(filepath))


if __name__ == "__main__":
    opt = TestOptions().parse()

    # set imitator
    swapper = Swapper(opt=opt)

    if opt.ip:
        visualizer = VisdomVisualizer(env=opt.name, ip=opt.ip, port=opt.port)
    else:
        visualizer = None

    src_path = opt.src_path
    tgt_path = opt.tgt_path

    swapper.swap_setup(src_path, tgt_path)

    if opt.post_tune:
        print('\n\t\t\tPersonalization: meta cycle finetune...')
        swapper.post_personalize(opt.output_dir,
                                 visualizer=None,
                                 verbose=False)

    print('\n\t\t\tPersonalization: completed...')
Beispiel #4
0
    # post tune
    print('\n\t\t\tPersonalization: meta cycle finetune...')
    loader = make_dataset(opt)
    imitator.post_personalize(opt.output_dir,
                              loader,
                              visualizer=None,
                              verbose=False)


if __name__ == "__main__":
    # meta imitator
    test_opt = TestOptions().parse()

    if test_opt.ip:
        visualizer = VisdomVisualizer(env=test_opt.name,
                                      ip=test_opt.ip,
                                      port=test_opt.port)
    else:
        visualizer = None

    # set imitator
    imitator = Imitator(test_opt)

    if test_opt.post_tune:
        adaptive_personalize(test_opt, imitator, visualizer)

    imitator.personalize(test_opt.src_path, visualizer=visualizer)
    print('\n\t\t\tPersonalization: completed...')

    if test_opt.save_res:
        pred_output_dir = mkdir(os.path.join(test_opt.output_dir, 'imitators'))
Beispiel #5
0
import numpy as np
import os
import os.path as osp
import glob
from tqdm import tqdm
import time
from utils.visdom_visualizer import VisdomVisualizer
from utils.nmr import SMPLRenderer
from networks.hmr import HumanModelRecovery

os.environ['CUDA_DEVICES_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

IMG_SIZE = 256
visualizer = VisdomVisualizer(env='visual',
                              ip='http://10.10.10.100',
                              port=31102)


def visual(model, out_dir):
    global visualizer

    render = SMPLRenderer(image_size=IMG_SIZE).cuda()

    texs = render.debug_textures().cuda()[None]

    with h5py.File(osp.join(out_dir, 'smpl_infos.h5'), 'r') as reader:
        cams_crop = reader['cam_crop']
        poses = reader['pose']
        shapes = reader['shape']
        frame_ids = reader['f_id']
Beispiel #6
0
        params[name] = np.array(vals, dtype=np.float32)

    params['R'] = params['R'] / 180 * np.pi
    return params


if __name__ == "__main__":

    opt = TestOptions().parse()

    # set imitator
    viewer = Viewer(opt=opt)

    if opt.ip:
        visualizer = VisdomVisualizer(env=opt.name, ip=opt.ip, port=opt.port)
    else:
        visualizer = None

    if opt.post_tune:
        adaptive_personalize(opt, viewer, visualizer)

    viewer.personalize(opt.src_path, visualizer=visualizer)
    print('\n\t\t\tPersonalization: completed...')

    src_path = opt.src_path
    view_params = opt.view_params
    params = parse_view_params(view_params)

    length = 16
    delta = 360 / length