示例#1
0
def render_job_videos(job_list, image_size, gpu_index):
    device = torch.device('cuda', index=gpu_index) if torch.cuda.is_available() else torch.device('cpu')
    smpl_renderer = SMPL_Renderer(camera_mode="look_at", device = device, image_size = image_size)
    for curr_pose, out_file_name in tqdm(job_list):
        if not os.path.isfile(out_file_name):
            curr_pose = torch.tensor(curr_pose)
            smpl_renderer.render_pose_vid(curr_pose, out_file_name, random_camera=False, random_shape=True, frame_chunk=50)
示例#2
0
def render_amass_videos(job_list, image_size, gpu_index):
    
    device = torch.device('cuda', index=gpu_index) if torch.cuda.is_available() else torch.device('cpu')
    smpl_p = SMPL_Parser(device = device)
    smpl_renderer = SMPL_Renderer(device = device, image_size = image_size)
    # smpl_renderer.set_render_angles(3, 89, 0)
    
    for curr_pose, out_file_name in job_list:
        import time
        t_s = time.time()
        curr_pose = vertizalize_smpl_root(curr_pose)
        images, _ = smpl_renderer._render_pose_images(curr_pose, smpl_p)
        write_frames_to_video(images, out_file_name)
        
        dt = time.time() - t_s

        print(out_file_name, "Time {:.4f}".format(dt))
示例#3
0
        shutil.rmtree("output/rid/same_cloth")
    os.makedirs("output/rid/same_cloth")
    if os.path.isdir("output/rid/diff_cloth"):
        shutil.rmtree("output/rid/diff_cloth")
    os.makedirs("output/rid/diff_cloth")

    jobs = [
        (idt[0], idt[1], "same_cloth", False) for idt in texture_fixed_ids
    ] + [(idt[0], idt[1], "diff_cloth", True) for idt in texture_varies_ids]
    for i in range(len(jobs)):
        p_id, gender, out_dir, texture_fix = jobs[i]
        pid_str = "".join(["{:03d}".format(i), str(gender)[0]])
        print("Generating for: ", pid_str)

        smpl_parser = SMPL_Parser(device=device, gender=gender)
        smpl_render = SMPL_Renderer(device=device, gender=gender)

        pose_list = []
        bg_list = []
        for a in range(len(angles)):
            smpl_render.set_render_angles(2.5, angles[a], 0)
            counter = 0
            for posese, trans in seq_generator:
                # print(pose.shape, trans.shape)
                posese, trans = torch.tensor(posese).to(device).type(
                    dtype), torch.tensor(trans).to(device).type(dtype)
                pose_list.append(posese[0])
                counter += 1
                bg_list.append((a, angles[a]))
                if counter >= pose_num:
                    break
示例#4
0
def read_data(amass_data, set, debug=False, max_samples = -1):

    dataset = {
        'vid_name': [],
        'frame_id': [],
        'joints3D': [],
        'joints2D': [],
        'shape': [],
        'pose': [],
        'bbox': [],
        'img_name': [],
        'features': [],
        'valid': [],
    }
    device = (
        torch.device("cuda", index=0)
        if torch.cuda.is_available()
        else torch.device("cpu")
    )
    model = spin.get_pretrained_hmr()
    smpl_renderer = SMPL_Renderer(device = device, image_size = 400, camera_mode = "look_at")
    for i, (k,v) in tqdm(enumerate(amass_data)):
        vid_name, frame_id, j3d, j2d, shape, pose, bbox, img_name, features, valid = amass_to_dataset(k, v, set = set, smpl_renderer = smpl_renderer)

        if not vid_name is None:
            bbox_params, time_pt1, time_pt2 = get_smooth_bbox_params(j2d, vis_thresh=VIS_THRESH, sigma=8)

            c_x = bbox_params[:,0]
            c_y = bbox_params[:,1]
            scale = bbox_params[:,2]
            w = h = 150. / scale
            w = h = h * 1.1
            bbox = np.vstack([c_x,c_y,w,h]).T
            # print('campose', campose_valid[time_pt1:time_pt2].shape)

            img_paths_array = img_name
            dataset['vid_name'].append(vid_name)
            dataset['frame_id'].append(frame_id)
            dataset['img_name'].append(img_name)
            dataset['joints3D'].append(j3d)
            dataset['joints2D'].append(j2d)
            dataset['shape'].append(shape)
            dataset['pose'].append(pose)
            dataset['bbox'].append(bbox)
            dataset['valid'].append(valid)

            features = extract_features(model, img_paths_array, bbox,
                                        kp_2d=j2d[time_pt1:time_pt2], debug=debug, dataset='3dpw', scale=1.2)
            dataset['features'].append(features)
                
        if max_samples != -1 and i > max_samples:
            break
    for k in dataset.keys():
        dataset[k] = np.concatenate(dataset[k])
        print(k, dataset[k].shape)

    # Filter out keypoints
    indices_to_use = np.where((dataset['joints2D'][:, :, 2] > VIS_THRESH).sum(-1) > MIN_KP)[0]
    for k in dataset.keys():
        dataset[k] = dataset[k][indices_to_use]

    return dataset
示例#5
0
from zen_renderer.smpl_parser import SMPL_Parser
from zen_renderer.dataloaders.dataset_amass import Dataset_AMASS
from zen_renderer.renderer.smpl_renderer import SMPL_Renderer


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    args = parser.parse_args()

    dtype = torch.FloatTensor

    device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
    smpl_p = SMPL_Parser(device = device) 
    smpl_render = SMPL_Renderer(device = device)
    smpl_render.set_render_angles(3, 150, 0)
    
    dsf_res_path = "/hdd/zen/dev/ActMix/actmix/DataGen/MotionSyn/gen.pk"
    dsf_ress = pk.load(open(dsf_res_path, "rb"))
    org_seq = dsf_ress["org_seq"]
    gen_seq =dsf_ress["gen_seq"]
    label = np.argmax(dsf_ress["label"], 1)
    gen_seq = gen_seq.reshape(org_seq.shape[0], gen_seq.shape[0]//org_seq.shape[0], org_seq.shape[1], org_seq.shape[2])
    
    print(org_seq.shape)
    print(gen_seq.shape)
    print(label.shape)

    for i in range(org_seq.shape[0]):
        org_seqs = org_seq[i]
示例#6
0
    idx = np.argmax(label)
    return label_map[idx]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    args = parser.parse_args()

    dtype = torch.FloatTensor

    device = torch.device('cuda',
                          index=args.gpu_index) if torch.cuda.is_available(
                          ) else torch.device('cpu')
    smpl_p = SMPL_Parser(device=device)
    smpl_renderer = SMPL_Renderer(device=device)
    smpl_renderer.set_render_angles(2, 89, 0)

    ################## Load BG images ##################
    # bg_base = "/hdd/zen/data/lsun/imgs/"
    # bg_imgs = [ os.path.join(bg_base, i) for i in os.listdir(bg_base) if i.endswith("jpg")]
    bg_imgs = [
        "/hdd/zen/data/NTU/images/S010C002P025R001A030_rgb/frame000001.jpg",
        "/hdd/zen/data/NTU/images/S012C003P017R001A048_rgb/frame000001.jpg",
        "/hdd/zen/data/NTU/images/S006C003P007R001A041_rgb/frame000001.jpg",
    ]

    data_path = "/hdd/zen/data/ActBound/Aug/gen_aug_6d.pk"

    pose_data = pk.load(open(data_path, "rb"))
    for k, v in pose_data.items():
示例#7
0
    # # print(amass_dataset[0][0]["trans"])
    # pose_list = []
    # num_poses = 0
    # for poses, trans in seq_generator:
    #     # print(pose.shape, trans.shape)
    #     curr_pose = vertizalize_smpl_root(poses[0])
    #     smpl_renderer.render_pose_vid(curr_pose, smpl_p,  "ouput/smpl/test.mp4")

    #     num_poses += 1
    #     if num_poses > 10:
    #         break
        
    #################### AMASS 6d ####################
    
    amass_6d = pk.load(open("/hdd/zen/data/ActBound/AMASS/amass_take3.pkl", "rb"))
    smpl_renderer = SMPL_Renderer(camera_mode="look_at", device = device, image_size = image_size)
    for idx, (k, v) in enumerate(amass_6d.items()):
        curr_pose = v['pose']
        curr_pose = convert_orth_6d_to_aa(torch.tensor(curr_pose).float())
        output_path = "/hdd/zen/data/ActmixGenenerator/output/amass/test{}.mp4".format(idx)
        smpl_renderer.render_pose_vid(curr_pose, output_path, random_camera=False, random_shape=True)

    #################### VIBE ####################
    # import joblib
    # ntu_data_path = "/hdd/zen/data/VIBE_NTU/A029_rnd/S008C001P033R001A029_rgb.pkl"
    # ntu_data = jobzen_renderer.load(open(ntu_data_path, "rb"))
    # key = list(ntu_data.keys())[0]
    # pose = ntu_data[key]['pose']
    # # pose[:, :3] = np.tile([1.20919958, -1.20919958, -1.20919958], [pose.shape[0], 1])
    # pose = vertizalize_smpl_root(torch.tensor(pose).double())
    # # pose[:, :3] = np.tile([-1.74393425e-16, -2.22144147e+00, -2.22144147e+00], [pose.shape[0], 1])
示例#8
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    parser.add_argument('--output_dir', type=str)
    parser.add_argument('--image_size', type=int, default=400)
    args = parser.parse_args()

    output_dir = args.output_dir
    image_size = args.image_size
    if not os.path.isdir(output_dir): os.makedirs(output_dir)
    dtype = torch.FloatTensor
    device = torch.device('cuda',
                          index=args.gpu_index) if torch.cuda.is_available(
                          ) else torch.device('cpu')
    smpl_render = SMPL_Renderer(device=device,
                                image_size=image_size,
                                camera_mode="look_at")

    vae_res_path = "/hdd/zen/dev/ActMix/actmix/DataGen/MotionSyn/res_rf.pk"

    vae_ress = pk.load(open(vae_res_path, "rb"))
    gt_seqs = vae_ress["gt_seq"]
    fk_seqs = vae_ress["fake_seq"]
    rec_seqs = vae_ress['rec']
    rec_rf_seqs = vae_ress["rec_rf"]
    labels = vae_ress["label"]

    print(gt_seqs.shape, rec_seqs.shape, labels.shape, rec_rf_seqs.shape)

    ################## Rendering #################
    with torch.no_grad():
示例#9
0
    torch.cuda.empty_cache()




if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    args = parser.parse_args()

    dtype = torch.FloatTensor

    device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
    print(device)
    smpl_p = SMPL_Parser(device = device)
    smpl_renderer = SMPL_Renderer(device = device, image_size=512)
    smpl_renderer.set_render_angles(3, 89, 0)
    
    
    ################## NTU ##################
    # from glob import glob
    # class_num = "A009"
    # vid_names = glob("/hdd/zen/data/HMMR/results/ntu/{}/*.avi".format(class_num))
    # for vid_name in vid_names:
    #     vid_name = vid_name.split("/")[-1][:-4]
    #     vibe_data_path = "/hdd/zen/data/VIBE_NTU/{}/{}.pkl".format(class_num, vid_name)
    #     hmmr_path = "/hdd/zen/data/HMMR/results/ntu/{}/{}.avi/hmmr_output/hmmr_output.pkl".format(class_num, vid_name)
    #     vid_path = "/hdd/zen/data/NTU/videos/{}/{}.avi".format(class_num, vid_name)
    #     render_compare_video(vibe_data_path, hmmr_path, vid_path)
    
    ################## Self vids ##################
示例#10
0
    label_map = {0: "fake", 1: "real"}
    idx = np.argmax(label)
    return label_map[idx]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    args = parser.parse_args()

    dtype = torch.FloatTensor
    device = torch.device('cuda',
                          index=args.gpu_index) if torch.cuda.is_available(
                          ) else torch.device('cpu')
    smpl_p = SMPL_Parser(device=device)
    smpl_render = SMPL_Renderer(device=device, image_size=512)
    smpl_render.set_render_angles(3, 89, 0)

    vae_res_path = "/hdd/zen/dev/ActMix/actmix/DataGen/MotionSyn/smpl_vis.pk"

    vae_ress = pk.load(open(vae_res_path, "rb"))
    org_seqs = vae_ress["org_seq"]
    rec_seqs = vae_ress["rec_seq"]
    labels = vae_ress["label"]
    print(org_seqs.shape, rec_seqs.shape, labels.shape)

    ################## Rendering ##################
    for k in range(org_seqs.shape[0]):
        org_seq = org_seqs[k]
        rec_seq = rec_seqs[k]
        label = labels[k]