def main(**args):
    input_media = args.pop('input_media')
    config = easy_configuration.configure(**args)
    device = torch.device('cpu')
    dtype=torch.float32

    body_model = config['neutral_model']
    body_model.to(device= device)
    camera = config['camera']

    vposer_ckpt = args['vposer_ckpt']
    vposer_ckpt = osp.expandvars(vposer_ckpt)
    vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
    vposer = vposer.to(device=device)
    vposer.eval()

    viewer = MeshViewer()


    body_color=(1.0, 1.0, 0.9, 1.0)
    img_np = cv2.imread(input_media)
    img = get_img(input_media)
    # Keypoints for the first person
    keypoints = get_keypoints(img_np)[[0]]

    keypoint_data = torch.tensor(keypoints, dtype=dtype)
    gt_joints = keypoint_data[:, :, :2]
    gt_joints = gt_joints.to(device=device, dtype=dtype)


    def render_embedding(vposer, pose_embedding, body_model, viewer):
        body_pose = vposer.decode(pose_embedding, output_type='aa').view(1, -1)
        body_pose.to(device=device)
        body_model_output = body_model(body_pose=body_pose)

        vertices = body_model_output.vertices.detach().cpu().numpy().squeeze()

        viewer.update_mesh(vertices,  body_model.faces)

    while True:
        pose_embedding = torch.randn([1,32],  dtype=torch.float32, device=device) * 10929292929
        render_embedding(vposer, pose_embedding, body_model, viewer)
Пример #2
0
 def __enter__(self):
     self.steps = 0
     if self.visualize:
         self.mv = MeshViewer(body_color=self.body_color)
     return self
Пример #3
0
class FittingMonitor(object):
    def __init__(self, summary_steps=1, visualize=False,
                 maxiters=100, ftol=2e-09, gtol=1e-05,
                 body_color=(1.0, 1.0, 0.9, 1.0),
                 model_type='smpl',
                 **kwargs):
        super(FittingMonitor, self).__init__()

        self.maxiters = maxiters
        self.ftol = ftol
        self.gtol = gtol

        self.visualize = visualize
        self.summary_steps = summary_steps
        self.body_color = body_color
        self.model_type = model_type

    def __enter__(self):
        self.steps = 0
        if self.visualize:
            self.mv = MeshViewer(body_color=self.body_color)
        return self

    def __exit__(self, exception_type, exception_value, traceback):
        if self.visualize:
            self.mv.close_viewer()

    def set_colors(self, vertex_color):
        batch_size = self.colors.shape[0]

        self.colors = np.tile(
            np.array(vertex_color).reshape(1, 3),
            [batch_size, 1])

    def run_fitting(self, optimizer, closure, params, body_model,
                    use_vposer=True, pose_embedding=None, vposer=None,
                    **kwargs):
        ''' Helper function for running an optimization process
            Parameters
            ----------
                optimizer: torch.optim.Optimizer
                    The PyTorch optimizer object
                closure: function
                    The function used to calculate the gradients
                params: list
                    List containing the parameters that will be optimized
                body_model: nn.Module
                    The body model PyTorch module
                use_vposer: bool
                    Flag on whether to use VPoser (default=True).
                pose_embedding: torch.tensor, BxN
                    The tensor that contains the latent pose variable.
                vposer: nn.Module
                    The VPoser module
            Returns
            -------
                loss: float
                The final loss value
        '''
        append_wrists = self.model_type == 'smpl' and use_vposer
        prev_loss = None
        for n in range(self.maxiters):
            loss = optimizer.step(closure)

            if torch.isnan(loss).sum() > 0:
                print('NaN loss value, stopping!')
                break

            if torch.isinf(loss).sum() > 0:
                print('Infinite loss value, stopping!')
                break

            if n > 0 and prev_loss is not None and self.ftol > 0:
                loss_rel_change = utils.rel_change(prev_loss, loss.item())

                if loss_rel_change <= self.ftol:
                    break

            if all([torch.abs(var.grad.view(-1).max()).item() < self.gtol
                    for var in params if var.grad is not None]):
                break

            if self.visualize and n % self.summary_steps == 0:
                body_pose = vposer.decode(
                    pose_embedding, output_type='aa').view(
                        1, -1) if use_vposer else None

                if append_wrists:
                    wrist_pose = torch.zeros([body_pose.shape[0], 6],
                                             dtype=body_pose.dtype,
                                             device=body_pose.device)
                    body_pose = torch.cat([body_pose, wrist_pose], dim=1)
                model_output = body_model(
                    return_verts=True, body_pose=body_pose)
                vertices = model_output.vertices.detach().cpu().numpy()

                self.mv.update_mesh(vertices.squeeze(),
                                    body_model.faces)

            prev_loss = loss.item()

        return prev_loss

    def create_fitting_closure(self,
                               optimizer, body_model,
                               gt_joints=None, loss=None,
                               joints_conf=None,
                               joint_weights=None,
                               return_verts=True, return_full_pose=False,
                               use_vposer=False, vposer=None,
                               pose_embedding=None,
                               create_graph=False,
                               **kwargs):
        faces_tensor = body_model.faces_tensor.view(-1)
        append_wrists = self.model_type == 'smpl' and use_vposer

        def fitting_func(backward=True):
            if backward:
                optimizer.zero_grad()

            body_pose = vposer.decode(
                pose_embedding, output_type='aa').view(
                    1, -1) if use_vposer else None

            if append_wrists:
                wrist_pose = torch.zeros([body_pose.shape[0], 6],
                                         dtype=body_pose.dtype,
                                         device=body_pose.device)
                body_pose = torch.cat([body_pose, wrist_pose], dim=1)

            body_model_output = body_model(return_verts=return_verts,
                                           body_pose=body_pose,
                                           return_full_pose=return_full_pose)
            total_loss = loss(body_model_output,
                              gt_joints=gt_joints,
                              body_model_faces=faces_tensor,
                              joints_conf=joints_conf,
                              joint_weights=joint_weights,
                              pose_embedding=pose_embedding,
                              use_vposer=use_vposer,
                              **kwargs)

            if backward:
                total_loss.backward(create_graph=create_graph)

            self.steps += 1
            if self.visualize and self.steps % self.summary_steps == 0:
                model_output = body_model(return_verts=True,
                                          body_pose=body_pose)
                vertices = model_output.vertices.detach().cpu().numpy()

                self.mv.update_mesh(vertices.squeeze(),
                                    body_model.faces)

            return total_loss

        return fitting_func
Пример #4
0
    if osp.isdir(mesh_fn):
        mesh_fns += [
            osp.join(root, fn) for (root, dirs, files) in os.walk(mesh_fn)
            for fn in files if fn.endswith('.obj')
        ]
    elif osp.isfile(mesh_fn):
        mesh_fns.append(mesh_fn)
mesh_fns.sort()

key_handler = KeyHandler(mesh_fns)
registered_keys = {
    'q': key_handler.quit_viewer,
    '+': key_handler.next_mesh,
    '-': key_handler.prev_mesh
}
mv = MeshViewer(registered_keys=registered_keys)

print('Press q to exit')
print('Press + to open next mesh')
print('Press - to open previous mesh')

close = False
while True:
    if not mv.is_active():
        break
    if key_handler.close:
        break

    mesh_fn = key_handler.get_mesh_fn()
    #  if prev_idx == idx:
    #  continue
Пример #5
0
    def nakedgen(self,
                 output_file,
                 subject_id,
                 pose_id,
                 bg_image=None,
                 bg_color='white',
                 color='grey',
                 rotation=0,
                 imw=300,
                 imh=300,
                 frame_skip=2,
                 scale=1,
                 translation=[0, 0],
                 rotate_to=0,
                 translation_to=[0, 0]):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        npz_bdata_path = "{}/Subject_{}_F_MoSh/Subject_{}_F_{}_poses.npz".format(
            self.bmlmovi_path, subject_id, subject_id, pose_id)
        bdata = np.load(npz_bdata_path)
        gender = bdata["gender"]
        bm_path = "{}/{}/model.npz".format(self.smplh_path, gender)
        dmpl_path = "{}/{}/model.npz".format(self.dmpl_path, gender)
        poses = torch.Tensor(bdata["poses"]).to(device)
        betas = torch.Tensor(bdata["betas"][:10][np.newaxis]).to(device)
        dmpls = torch.Tensor(bdata["dmpls"]).to(device)
        num_betas = 10
        num_dmpls = 8
        bm = BodyModel(bm_path=bm_path,
                       num_betas=num_betas,
                       num_dmpls=num_dmpls,
                       path_dmpl=dmpl_path).to(device)
        wall_face = torch.IntTensor([[0, 1, 2, 3]]).to(device)
        faces = c2c(bm.f)
        mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
        writer = skvideo.io.FFmpegWriter(output_file,
                                         outputdict={
                                             "-vcodec": "libx264",
                                             "-pix_fmt": "yuv420p"
                                         })
        sq_scale = 2.1
        #wall = torch.cat((
        #    torch.FloatTensor([[-sq_scale, sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[-sq_scale, -sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[sq_scale, -sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[sq_scale, sq_scale, -1]]).to(device),
        #)).to(device)
        #uv = np.array([
        #    [0., 1],
        #    [0., 0],
        #    [1., 0],
        #    [1., 1.],
        #])
        #if bg_image:
        #    im = Image.open(bg_image)
        #    texture = TextureVisuals(image=im, uv=uv)
        #else:
        #    texture = None
        #wall_mesh = trimesh.Trimesh(vertices=c2c(wall), faces=wall_face, visual=texture, vertex_colors=np.tile(colors[bg_color], (4, 1)))

        translation = np.array(translation)
        translation_to = np.array(translation_to)
        translation_diff = translation_to - translation
        rotation_diff = rotate_to - rotation
        frames = len(poses)

        living_trimesh = as_mesh(trimesh.load('integrated.ply'))

        # Rotate & Move the object
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        7 / 6 * pi, (0, 0, 1)))
        living_trimesh.vertices[:, 1] += 1
        living_trimesh.vertices[:, 2] -= 1
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        pi, (0, 1, 0)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        1 / 6 * pi, (0, 0, 1)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        -1 / 7 * pi, (1, 0, 0)))
        living_trimesh.vertices[:, 0] += 2
        living_trimesh.vertices[:, 1] -= 0.2
        living_trimesh.vertices[:, 2] -= 1.7
        living_trimesh.vertices *= 1.3
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        -pi / 6, (0, 1, 0)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        1 / 18 * pi, (0, 0, 1)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        rotation, (0, 1, 0)))
        # End

        for fId in range(0, len(poses), frame_skip):
            f_rotation = rotation + rotation_diff / frames * fId
            f_translation = translation + translation_diff / frames * fId
            root_orient = poses[fId:fId + 1, :3]
            pose_body = poses[fId:fId + 1, 3:66]
            pose_hand = poses[fId:fId + 1, 66:]
            dmpl = dmpls[fId:fId + 1]
            body = bm(pose_body=pose_body,
                      pose_hand=pose_hand,
                      betas=betas,
                      root_orient=root_orient)
            body_mesh_wfingers = trimesh.Trimesh(vertices=c2c(body.v[0]),
                                                 faces=faces,
                                                 vertex_colors=np.tile(
                                                     colors[color], (6890, 1)))
            #living_trimesh.visual.vertex_colors = np.tile(colors[bg_color], (24829, 1))
            #living_trimesh.vertices[:, 2] -= 1.3
            apply_mesh_tranfsormations_(
                [body_mesh_wfingers],
                trimesh.transformations.rotation_matrix(-pi / 1.9, (1, 0, 0)))
            apply_mesh_tranfsormations_(
                [body_mesh_wfingers],
                trimesh.transformations.rotation_matrix(rotation, (0, 1, 0)))
            basepoint = body_mesh_wfingers.vertices[:, 2].max().item()
            ground = body_mesh_wfingers.vertices[:, 1].min().item()
            measure = (body_mesh_wfingers.vertices[:, 1].max().item() - ground)
            body_mesh_wfingers.vertices[:, 1] -= (ground - 0.2)
            body_mesh_wfingers.vertices[:, 2] -= basepoint
            body_mesh_wfingers.vertices *= scale
            body_mesh_wfingers.vertices[:, 2] += basepoint
            #body_mesh_wfingers.vertices[:, :2] += f_translation * measure
            #living_trimesh.vertices[:, :2] += f_translation * measure
            mv.set_static_meshes([body_mesh_wfingers, living_trimesh])
            body_image_wfingers = mv.render(render_wireframe=False)
            writer.writeFrame(body_image_wfingers)
        writer.close()