Example #1
0
    def __init__(self, camera_mode='projection', K = None, R = None, t = None,\
        smpl_model_path = "/hdd/zen/dev/ActMix/actmix/DataGen/MotionRender/ActmixTorchGenerator/smpl_models/",\
        texture_path =  "/hdd/zen/data/SURREAL/smpl_data/", dtype = torch.float32, gender = "male", image_size = 256, \
        device =(torch.device("cuda", index=0) if torch.cuda.is_available() else torch.device("cpu")), \
            background_color=[0,0,0],
        ):

        self.dtype = dtype
        self.camera_mode = camera_mode
        self.image_size = image_size
            
        r = sciR.from_euler('zxy', [0, 0, 0], degrees = True)
        vet_rot = r.as_matrix()
        fx = 500.0
        fy = 500.0
        cx = 512.0
        cy = 512.0
        def_K = np.array( [ [fx, 0., cx],
                        [ 0. ,fx, cy],
                        [0.,0.,1.]])

        def_K = def_K[None, :]
        def_R = vet_rot[None, :]
        def_t = np.array([0,0,1.5])

        if K is None:
            K = def_K
        if R is None:
            R = def_R
        if t is None:
            t = def_t

        
        if camera_mode == "look_at":
            self.renderer = nr.Renderer(camera_mode='look_at', viewing_angle = 30, image_size = image_size, background_color= background_color)
            self.set_render_angles(2.5, 0, 0)
        elif camera_mode == "projection":
            self.renderer = nr.Renderer(K = K, R = R, t = t, camera_mode='projection', image_size = image_size, light_intensity_ambient=1, background_color= background_color)
        

        self.renderer = self.renderer.to(device)
        
        self.gender = gender
        
        self.faces = np.load(os.path.join(smpl_model_path, "smpl_faces.npy"))[np.newaxis, :,:]
        self.device = device
        self.faces_uv = np.load(os.path.join(texture_path, 'final_faces_uv_mapping.npy'))
        self.uv_sampler = torch.from_numpy(self.faces_uv.reshape(-1, 2, 2, 2)).to(device).type(self.dtype)
        self.uv_sampler = self.uv_sampler.view(-1, 13776, 2*2, 2)


        self.male_texture_dataset = TextureDataset(texture_path, "male")
        self.male_smpl_p = SMPL_Parser(device = device, gender = "male")

        self.female_texture_dataset = TextureDataset(texture_path, "female")
        self.female_smpl_p = SMPL_Parser(device = device, gender = "female")

        self.smpl_p = self.male_smpl_p if self.gender == 'male' else self.female_smpl_p
        self.texture_dataset = self.male_texture_dataset if self.gender == 'male' else self.female_texture_dataset
Example #2
0
def render_amass_videos(job_list, image_size, gpu_index):
    
    device = torch.device('cuda', index=gpu_index) if torch.cuda.is_available() else torch.device('cpu')
    smpl_p = SMPL_Parser(device = device)
    smpl_renderer = SMPL_Renderer(device = device, image_size = image_size)
    # smpl_renderer.set_render_angles(3, 89, 0)
    
    for curr_pose, out_file_name in job_list:
        import time
        t_s = time.time()
        curr_pose = vertizalize_smpl_root(curr_pose)
        images, _ = smpl_renderer._render_pose_images(curr_pose, smpl_p)
        write_frames_to_video(images, out_file_name)
        
        dt = time.time() - t_s

        print(out_file_name, "Time {:.4f}".format(dt))
Example #3
0
    if os.path.isdir("output/rid/same_cloth"):
        shutil.rmtree("output/rid/same_cloth")
    os.makedirs("output/rid/same_cloth")
    if os.path.isdir("output/rid/diff_cloth"):
        shutil.rmtree("output/rid/diff_cloth")
    os.makedirs("output/rid/diff_cloth")

    jobs = [
        (idt[0], idt[1], "same_cloth", False) for idt in texture_fixed_ids
    ] + [(idt[0], idt[1], "diff_cloth", True) for idt in texture_varies_ids]
    for i in range(len(jobs)):
        p_id, gender, out_dir, texture_fix = jobs[i]
        pid_str = "".join(["{:03d}".format(i), str(gender)[0]])
        print("Generating for: ", pid_str)

        smpl_parser = SMPL_Parser(device=device, gender=gender)
        smpl_render = SMPL_Renderer(device=device, gender=gender)

        pose_list = []
        bg_list = []
        for a in range(len(angles)):
            smpl_render.set_render_angles(2.5, angles[a], 0)
            counter = 0
            for posese, trans in seq_generator:
                # print(pose.shape, trans.shape)
                posese, trans = torch.tensor(posese).to(device).type(
                    dtype), torch.tensor(trans).to(device).type(dtype)
                pose_list.append(posese[0])
                counter += 1
                bg_list.append((a, angles[a]))
                if counter >= pose_num:
Example #4
0
import argparse

from zen_renderer.smpl_parser import SMPL_Parser
from zen_renderer.dataloaders.dataset_amass import Dataset_AMASS
from zen_renderer.renderer.smpl_renderer import SMPL_Renderer


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    args = parser.parse_args()

    dtype = torch.FloatTensor

    device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
    smpl_p = SMPL_Parser(device = device) 
    smpl_render = SMPL_Renderer(device = device)
    smpl_render.set_render_angles(3, 150, 0)
    
    dsf_res_path = "/hdd/zen/dev/ActMix/actmix/DataGen/MotionSyn/gen.pk"
    dsf_ress = pk.load(open(dsf_res_path, "rb"))
    org_seq = dsf_ress["org_seq"]
    gen_seq =dsf_ress["gen_seq"]
    label = np.argmax(dsf_ress["label"], 1)
    gen_seq = gen_seq.reshape(org_seq.shape[0], gen_seq.shape[0]//org_seq.shape[0], org_seq.shape[1], org_seq.shape[2])
    
    print(org_seq.shape)
    print(gen_seq.shape)
    print(label.shape)

    for i in range(org_seq.shape[0]):
Example #5
0
        open("/hdd/zen/data/VIBE_NTU/ntu_class_labels.pk", "rb"))
    idx = np.argmax(label)
    return label_map[idx]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    args = parser.parse_args()

    dtype = torch.FloatTensor

    device = torch.device('cuda',
                          index=args.gpu_index) if torch.cuda.is_available(
                          ) else torch.device('cpu')
    smpl_p = SMPL_Parser(device=device)
    smpl_renderer = SMPL_Renderer(device=device)
    smpl_renderer.set_render_angles(2, 89, 0)

    ################## Load BG images ##################
    # bg_base = "/hdd/zen/data/lsun/imgs/"
    # bg_imgs = [ os.path.join(bg_base, i) for i in os.listdir(bg_base) if i.endswith("jpg")]
    bg_imgs = [
        "/hdd/zen/data/NTU/images/S010C002P025R001A030_rgb/frame000001.jpg",
        "/hdd/zen/data/NTU/images/S012C003P017R001A048_rgb/frame000001.jpg",
        "/hdd/zen/data/NTU/images/S006C003P007R001A041_rgb/frame000001.jpg",
    ]

    data_path = "/hdd/zen/data/ActBound/Aug/gen_aug_6d.pk"

    pose_data = pk.load(open(data_path, "rb"))
Example #6
0
    write_frames_to_video(output_frames, ouput_dir)
    torch.cuda.empty_cache()




if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_index', type=int, default=0)
    args = parser.parse_args()

    dtype = torch.FloatTensor

    device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
    print(device)
    smpl_p = SMPL_Parser(device = device)
    smpl_renderer = SMPL_Renderer(device = device, image_size=512)
    smpl_renderer.set_render_angles(3, 89, 0)
    
    
    ################## NTU ##################
    # from glob import glob
    # class_num = "A009"
    # vid_names = glob("/hdd/zen/data/HMMR/results/ntu/{}/*.avi".format(class_num))
    # for vid_name in vid_names:
    #     vid_name = vid_name.split("/")[-1][:-4]
    #     vibe_data_path = "/hdd/zen/data/VIBE_NTU/{}/{}.pkl".format(class_num, vid_name)
    #     hmmr_path = "/hdd/zen/data/HMMR/results/ntu/{}/{}.avi/hmmr_output/hmmr_output.pkl".format(class_num, vid_name)
    #     vid_path = "/hdd/zen/data/NTU/videos/{}/{}.avi".format(class_num, vid_name)
    #     render_compare_video(vibe_data_path, hmmr_path, vid_path)