Example #1
0
    def vis_results(dorig, vposer_model, bm, imgpath):
        from human_body_prior.mesh import MeshViewer
        from human_body_prior.tools.omni_tools import copy2cpu as c2c
        import trimesh
        from human_body_prior.tools.omni_tools import colors
        from human_body_prior.tools.omni_tools import apply_mesh_tranfsormations_

        from human_body_prior.tools.visualization_tools import imagearray2file
        from human_body_prior.train.vposer_smpl import VPoser

        view_angles = [0, 180, 90, -90]
        imw, imh = 800, 800
        batch_size = len(dorig['pose_aa'])

        mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
        mv.render_wireframe = True

        dorig_aa = dorig['pose_aa']

        prec_aa = vposer_model(dorig_aa, output_type='aa')['pose_aa'].view(batch_size,-1)
        if hasattr(vposer_model, 'module'):
            pgen_aa = vposer_model.module.sample_poses(num_poses=batch_size, output_type='aa')
        else:
            pgen_aa = vposer_model.sample_poses(num_poses=batch_size, output_type='aa')

        pgen_aa = pgen_aa.view(batch_size,-1)
        dorig_aa = dorig_aa.view(batch_size, -1)

        images = np.zeros([len(view_angles), batch_size, 1, imw, imh, 3])
        images_gen = np.zeros([len(view_angles), batch_size, 1, imw, imh, 3])
        for cId in range(0, batch_size):

            bm.pose_body.data[:] = bm.pose_body.new(dorig_aa[cId])
            orig_body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]), faces=c2c(bm.f), vertex_colors=np.tile(colors['grey'], (6890, 1)))

            bm.pose_body.data[:] = bm.pose_body.new(prec_aa[cId])
            rec_body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]), faces=c2c(bm.f), vertex_colors=np.tile(colors['blue'], (6890, 1)))

            bm.pose_body.data[:] = bm.pose_body.new(pgen_aa[cId])
            gen_body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]), faces=c2c(bm.f), vertex_colors=np.tile(colors['blue'], (6890, 1)))

            all_meshes = [orig_body_mesh, rec_body_mesh, gen_body_mesh]

            for rId, angle in enumerate(view_angles):
                if angle != 0: apply_mesh_tranfsormations_(all_meshes, trimesh.transformations.rotation_matrix(np.radians(angle), (0, 1, 0)))
                mv.set_meshes([orig_body_mesh, rec_body_mesh], group_name='static')
                images[rId, cId, 0] = mv.render()
                mv.set_meshes([gen_body_mesh], group_name='static')
                images_gen[rId, cId, 0] = mv.render()

                if angle != 0: apply_mesh_tranfsormations_(all_meshes, trimesh.transformations.rotation_matrix(np.radians(-angle), (0, 1, 0)))

        imagearray2file(images, imgpath)
        imagearray2file(images_gen, imgpath.replace('.png','_gen.png'))
Example #2
0
def dump_vposer_samples(bm, pose_body, out_imgpath=False, save_ply=False):
    '''
    
    :param bm: the BodyModel instance
    :param pose_body: Nx63 will pose the body
    :param out_imgpath: the final png path
    :param save_ply: if True will dump as ply files
    :return: 
    '''

    view_angles = [0, 90, -90]
    imw, imh = 400, 400
    mv = MeshViewer(width=imw, height=imh, use_offscreen=True)

    images = np.zeros([len(view_angles), len(pose_body), 1, imw, imh, 3])
    for cId in range(0, len(pose_body)):

        bm.pose_body.data[:] = bm.pose_body.new(pose_body[cId].reshape(-1))

        body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]),
                                    faces=c2c(bm.f),
                                    vertex_colors=np.tile(
                                        colors['grey'], (6890, 1)))

        for rId, angle in enumerate(view_angles):
            apply_mesh_tranfsormations_(
                [body_mesh],
                trimesh.transformations.rotation_matrix(
                    np.radians(angle), (0, 1, 0)))
            mv.set_meshes([body_mesh], group_name='static')
            images[rId, cId, 0] = mv.render()
            apply_mesh_tranfsormations_(
                [body_mesh],
                trimesh.transformations.rotation_matrix(
                    np.radians(-angle), (0, 1, 0)))

    if out_imgpath:
        imagearray2file(images, out_imgpath)

        np.savez(out_imgpath.replace('.png', '.npz'), pose=pose_body)

        if save_ply:
            im_id = os.path.basename(out_imgpath).split('.')[0]
            out_dir = makepath(
                os.path.join(os.path.dirname(out_imgpath), '%s_ply' % im_id))
            smpl_params2ply(bm, out_dir=out_dir, pose_body=pose_body)

        print('Saved image: %s' % out_imgpath)

    return images
Example #3
0
mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
images = np.zeros([2, 3, T, imh, imw, 3], dtype=np.float32)

count = 0
for fId in tqdm(range(1, bdata['poses'].shape[0], step)):
    if count > T: break
    body = bm(pose_body=pose_body[fId:fId+1], pose_hand=pose_hand[fId:fId+1], betas=betas, root_orient=root_orient[fId:fId+1])

    body_mesh = trimesh.Trimesh(vertices=c2c(body.v[0]), faces=faces, vertex_colors=np.tile(colors['grey'], (6890, 1)))
    joints_mesh = points_to_spheres(c2c(body.Jtr[0]), vc=colors['red'])
    mrks = bdata['marker_data'][fId] - bdata['trans'][fId]
    mrks_mesh = points_to_spheres(mrks, vc=colors['blue'])

    all_meshes = [body_mesh] + joints_mesh + mrks_mesh
    apply_mesh_tranfsormations_(all_meshes, trimesh.transformations.rotation_matrix(np.radians(-90), (1, 0, 0)))
    apply_mesh_tranfsormations_(all_meshes, trimesh.transformations.rotation_matrix(np.radians(-90), (0, 1, 0)))
    mv.set_static_meshes(mrks_mesh)
    images[0, 0, count] = mv.render()
    mv.set_static_meshes([body_mesh])
    images[0, 1, count] = mv.render()
    mv.set_static_meshes([body_mesh]+joints_mesh)
    images[0, 2, count] = mv.render(render_wireframe=True)

    apply_mesh_tranfsormations_(all_meshes, trimesh.transformations.rotation_matrix(np.radians(90), (0, 1, 0)))
    mv.set_static_meshes(mrks_mesh)
    images[1, 0, count] = mv.render()
    mv.set_static_meshes([body_mesh])
    images[1, 1, count] = mv.render()
    mv.set_static_meshes([body_mesh]+joints_mesh)
    images[1, 2, count] = mv.render(render_wireframe=True)
Example #4
0
    def nakedgen(self,
                 output_file,
                 subject_id,
                 pose_id,
                 bg_image=None,
                 bg_color='white',
                 color='grey',
                 rotation=0,
                 imw=300,
                 imh=300,
                 frame_skip=2,
                 scale=1,
                 translation=[0, 0],
                 rotate_to=0,
                 translation_to=[0, 0]):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        npz_bdata_path = "{}/Subject_{}_F_MoSh/Subject_{}_F_{}_poses.npz".format(
            self.bmlmovi_path, subject_id, subject_id, pose_id)
        bdata = np.load(npz_bdata_path)
        gender = bdata["gender"]
        bm_path = "{}/{}/model.npz".format(self.smplh_path, gender)
        dmpl_path = "{}/{}/model.npz".format(self.dmpl_path, gender)
        poses = torch.Tensor(bdata["poses"]).to(device)
        betas = torch.Tensor(bdata["betas"][:10][np.newaxis]).to(device)
        dmpls = torch.Tensor(bdata["dmpls"]).to(device)
        num_betas = 10
        num_dmpls = 8
        bm = BodyModel(bm_path=bm_path,
                       num_betas=num_betas,
                       num_dmpls=num_dmpls,
                       path_dmpl=dmpl_path).to(device)
        wall_face = torch.IntTensor([[0, 1, 2, 3]]).to(device)
        faces = c2c(bm.f)
        mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
        writer = skvideo.io.FFmpegWriter(output_file,
                                         outputdict={
                                             "-vcodec": "libx264",
                                             "-pix_fmt": "yuv420p"
                                         })
        sq_scale = 2.1
        #wall = torch.cat((
        #    torch.FloatTensor([[-sq_scale, sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[-sq_scale, -sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[sq_scale, -sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[sq_scale, sq_scale, -1]]).to(device),
        #)).to(device)
        #uv = np.array([
        #    [0., 1],
        #    [0., 0],
        #    [1., 0],
        #    [1., 1.],
        #])
        #if bg_image:
        #    im = Image.open(bg_image)
        #    texture = TextureVisuals(image=im, uv=uv)
        #else:
        #    texture = None
        #wall_mesh = trimesh.Trimesh(vertices=c2c(wall), faces=wall_face, visual=texture, vertex_colors=np.tile(colors[bg_color], (4, 1)))

        translation = np.array(translation)
        translation_to = np.array(translation_to)
        translation_diff = translation_to - translation
        rotation_diff = rotate_to - rotation
        frames = len(poses)

        living_trimesh = as_mesh(trimesh.load('integrated.ply'))

        # Rotate & Move the object
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        7 / 6 * pi, (0, 0, 1)))
        living_trimesh.vertices[:, 1] += 1
        living_trimesh.vertices[:, 2] -= 1
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        pi, (0, 1, 0)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        1 / 6 * pi, (0, 0, 1)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        -1 / 7 * pi, (1, 0, 0)))
        living_trimesh.vertices[:, 0] += 2
        living_trimesh.vertices[:, 1] -= 0.2
        living_trimesh.vertices[:, 2] -= 1.7
        living_trimesh.vertices *= 1.3
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        -pi / 6, (0, 1, 0)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        1 / 18 * pi, (0, 0, 1)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        rotation, (0, 1, 0)))
        # End

        for fId in range(0, len(poses), frame_skip):
            f_rotation = rotation + rotation_diff / frames * fId
            f_translation = translation + translation_diff / frames * fId
            root_orient = poses[fId:fId + 1, :3]
            pose_body = poses[fId:fId + 1, 3:66]
            pose_hand = poses[fId:fId + 1, 66:]
            dmpl = dmpls[fId:fId + 1]
            body = bm(pose_body=pose_body,
                      pose_hand=pose_hand,
                      betas=betas,
                      root_orient=root_orient)
            body_mesh_wfingers = trimesh.Trimesh(vertices=c2c(body.v[0]),
                                                 faces=faces,
                                                 vertex_colors=np.tile(
                                                     colors[color], (6890, 1)))
            #living_trimesh.visual.vertex_colors = np.tile(colors[bg_color], (24829, 1))
            #living_trimesh.vertices[:, 2] -= 1.3
            apply_mesh_tranfsormations_(
                [body_mesh_wfingers],
                trimesh.transformations.rotation_matrix(-pi / 1.9, (1, 0, 0)))
            apply_mesh_tranfsormations_(
                [body_mesh_wfingers],
                trimesh.transformations.rotation_matrix(rotation, (0, 1, 0)))
            basepoint = body_mesh_wfingers.vertices[:, 2].max().item()
            ground = body_mesh_wfingers.vertices[:, 1].min().item()
            measure = (body_mesh_wfingers.vertices[:, 1].max().item() - ground)
            body_mesh_wfingers.vertices[:, 1] -= (ground - 0.2)
            body_mesh_wfingers.vertices[:, 2] -= basepoint
            body_mesh_wfingers.vertices *= scale
            body_mesh_wfingers.vertices[:, 2] += basepoint
            #body_mesh_wfingers.vertices[:, :2] += f_translation * measure
            #living_trimesh.vertices[:, :2] += f_translation * measure
            mv.set_static_meshes([body_mesh_wfingers, living_trimesh])
            body_image_wfingers = mv.render(render_wireframe=False)
            writer.writeFrame(body_image_wfingers)
        writer.close()