Example #1
0
    def load_mesh(self, data_dict):

        gender_type = "male" if data_dict["gender"] == -1 else "female"

        with torch.no_grad():
            bm = BodyModel(bm_path=self.bm_path%(gender_type), num_betas=self.num_betas, batch_size=1)
            body = bm.forward(pose_body=data_dict['pose_body'].unsqueeze(0), 
                            betas=data_dict['betas'].unsqueeze(0))

        mesh_ori = trimesh.Trimesh(vertices=c2c(body.v)[0], faces=c2c(body.f))

        # move the mesh to the original
        joints = c2c(body.Jtr)[0]
        root_xyz = joints[0]
        mesh_ori.vertices -= root_xyz

        verts = mesh_ori.vertices
        vert_normals = mesh_ori.vertex_normals
        face_normals = mesh_ori.face_normals
        faces = mesh_ori.faces

        mesh = HoppeMesh(
            verts=verts, 
            faces=faces, 
            vert_normals=vert_normals, 
            face_normals=face_normals)

        return {'mesh': mesh, 
                'A': body.A[0,:self.num_poses+1],
                'weights': body.weights}
Example #2
0
def amass_fk(npz_data_path, bm_path):
    if torch.cuda.is_available():
        comp_device = torch.device("cuda")
    else:
        comp_device = torch.device("cpu")
    bm = BodyModel(bm_path=bm_path, batch_size=1, num_betas=10).to(
        comp_device
    )
    bdata = np.load(npz_data_path)
    root_orient = torch.Tensor(bdata["poses"][:, :3]).to(comp_device)
    pose_body = torch.Tensor(bdata["poses"][:, 3:66]).to(comp_device)
    pose_hand = torch.Tensor(bdata["poses"][:, 66:]).to(comp_device)
    betas = torch.Tensor(bdata["betas"][:10][np.newaxis]).to(comp_device)
    rootTranslation = bdata["trans"]
    s1, s2 = rootTranslation.shape
    trans = np.expand_dims(rootTranslation, 1)
    joints = np.zeros((bdata["poses"].shape[0], 52, 3))
    verts = np.zeros((bdata["poses"].shape[0], 6890, 3))
    count = 0
    num_frames = bdata["poses"].shape[0]
    for fId in tqdm(range(1, num_frames)):
        body = bm(
            pose_body=pose_body[fId : fId + 1],
            pose_hand=pose_hand[fId : fId + 1],
            betas=betas,
            root_orient=root_orient[fId : fId + 1],
        )
        joints[count] = c2c(body.Jtr[0]) + trans[count]
        verts[count] = c2c(body.v[0]) + trans[count]
        count += 1
    return joints, verts
Example #3
0
    def _load_parametric_body_model(
            data_path: str, used_body_model_gender: str, num_betas: int,
            num_dmpls: int) -> Tuple[BodyModel, np.array]:
        """ loads the parametric model that is used to generate the mesh object

        :return:  parametric model. Type: tuple.
        """
        bm_path = os.path.join(data_path, 'body_models', 'smplh',
                               used_body_model_gender,
                               'model.npz')  # body model
        dmpl_path = os.path.join(data_path, 'body_models', 'dmpls',
                                 used_body_model_gender,
                                 'model.npz')  # deformation model
        if not os.path.exists(bm_path) or not os.path.exists(dmpl_path):
            raise Exception(
                "Parametric Body model doesn't exist, please follow download instructions section in AMASS Example"
            )
        comp_device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        body_model = BodyModel(bm_path=bm_path,
                               num_betas=num_betas,
                               num_dmpls=num_dmpls,
                               path_dmpl=dmpl_path).to(comp_device)
        faces = body_model.f.detach().cpu().numpy()
        return body_model, faces
    def forward(self, poZ_body=None, **kwargs):

        if self.poser_type == 'vposer':
            if self.model_type in ['smpl', 'smplh', 'smplx']:
                if poZ_body is None:  poZ_body = self.poZ_body

                pose = self.poser_body_pt.decode(poZ_body, output_type='aa').view(self.batch_size, -1)

                if pose.shape[1] > 63:
                    pose_body = pose[:, 3:66]
                    root_orient = pose[:, :3]
                else:
                    pose_body = pose[:, :63]
                    root_orient = None

                if self.use_hands and self.model_type in['smplh', 'smplx']:
                    pose_handL = self.poser_handL_pt.decode(self.poZ_handL, output_type='aa').view(self.batch_size, -1)
                    pose_handR = self.poser_handR_pt.decode(self.poZ_handR, output_type='aa').view(self.batch_size, -1)
                    pose_hand = torch.cat([pose_handL, pose_handR], dim=1)
                else:
                    pose_hand = None

                new_body = super(BodyModelWithPoser, self).forward(pose_body=pose_body, root_orient=root_orient, pose_hand=pose_hand, **kwargs)
                new_body.poZ_body = poZ_body


            if self.model_type in ['mano_left', 'mano_right']:
                pose_hand = self.poser_hand_pt.decode(self.poZ_hand, output_type='aa').view(self.batch_size, -1)
                new_body = super(BodyModelWithPoser, self).forward(pose_hand=pose_hand, **kwargs)

        else:
            new_body = BodyModel.forward(self)

        return new_body
Example #5
0
def load_body_model(bm_path, num_betas=10, model_type="smplh"):
    comp_device = torch.device("cpu")
    bm = BodyModel(
        bm_path=bm_path,
        num_betas=num_betas,
        # model_type=model_type
    ).to(comp_device)
    return bm
Example #6
0
def load(file, bm=None, bm_path=None):
    if bm is None:
        # Download the required body model. For SMPL-H download it from
        # http://mano.is.tue.mpg.de/.
        assert bm_path is not None, "Please provide SMPL body model path"
        comp_device = torch.device("cpu")
        num_betas = 10  # number of body parameters
        bm = BodyModel(bm_path=bm_path, num_betas=num_betas).to(comp_device)
    return create_motion_from_amass_data(filename=file, bm=bm)
Example #7
0
    def perform_training(self, num_epochs=None, message=None):
        starttime = datetime.now().replace(microsecond=0)
        if num_epochs is None: num_epochs = self.ps.num_epochs

        self.logger(
            'Started Training at %s for %d epochs' % (datetime.strftime(starttime, '%Y-%m-%d_%H:%M:%S'), num_epochs))

        vis_bm =  BodyModel(self.ps.bm_path, 'smplh', num_betas=16).to(self.comp_device)
        prev_lr = np.inf
        scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=int(num_epochs // 3), gamma=0.5)
        for epoch_num in range(1, num_epochs + 1):
            scheduler.step()
            cur_lr = self.optimizer.param_groups[0]['lr']
            if cur_lr != prev_lr:
                self.logger('--- Optimizer learning rate changed from %.2e to %.2e ---' % (prev_lr, cur_lr))
                prev_lr = cur_lr
            self.epochs_completed += 1
            train_loss_dict = self.train()
            eval_loss_dict = self.evaluate()

            with torch.no_grad():
                eval_msg = VPoserTrainer.creat_loss_message(eval_loss_dict, expr_code=self.ps.expr_code,
                                                            epoch_num=self.epochs_completed, it=len(self.ds_val),
                                                            try_num=self.try_num, mode='evald')
                if eval_loss_dict['loss_total'] < self.best_loss_total:
                    self.ps.best_model_fname = makepath(os.path.join(self.ps.work_dir, 'snapshots', 'TR%02d_E%03d.pt' % (
                    self.try_num, self.epochs_completed)), isfile=True)
                    self.logger(eval_msg + ' ** ')
                    self.best_loss_total = eval_loss_dict['loss_total']
                    torch.save(self.vposer_model.module.state_dict() if isinstance(self.vposer_model, torch.nn.DataParallel) else self.vposer_model.state_dict(), self.ps.best_model_fname)

                    imgname = '[%s]_TR%02d_E%03d.png' % (self.ps.expr_code, self.try_num, self.epochs_completed)
                    imgpath = os.path.join(self.ps.work_dir, 'images', imgname)
                    try:
                        VPoserTrainer.vis_results(self.vis_dorig, self.vposer_model, bm=vis_bm, imgpath=imgpath)
                    except:
                        print('The visualization failed.')
                else:
                    self.logger(eval_msg)

                self.swriter.add_scalars('total_loss/scalars', {'train_loss_total': train_loss_dict['loss_total'],
                                                                'evald_loss_total': eval_loss_dict['loss_total'], },
                                         self.epochs_completed)

            # if early_stopping(eval_loss_dict['loss_total']):
            #     self.logger("Early stopping at epoch %d"%self.epochs_completed)
            #     break

        endtime = datetime.now().replace(microsecond=0)

        self.logger('Finished Training at %s\n' % (datetime.strftime(endtime, '%Y-%m-%d_%H:%M:%S')))
        self.logger(
            'Training done in %s! Best val total loss achieved: %.2e\n' % (endtime - starttime, self.best_loss_total))
        self.logger('Best model path: %s\n' % self.ps.best_model_fname)
    def __init__(
        self,
        bm: Union[str, BodyModel],
        vids: Iterable[int],
        kpts_colors: Union[np.ndarray, None] = None,
    ):
        super(SourceKeyPoints, self).__init__()

        self.bm = BodyModel(bm, persistant_buffer=False) if isinstance(
            bm, str) else bm
        self.bm_f = []  #self.bm.f
        self.vids = vids
        self.kpts_colors = np.array([Color('grey').rgb for _ in vids
                                     ]) if kpts_colors == None else kpts_colors
Example #9
0
    def __init__(
        self,
        bm: Union[str, BodyModel],
        n_joints: int = 22,
        kpts_colors: Union[np.ndarray, None] = None,
    ):
        super(SourceKeyPoints, self).__init__()

        self.bm = BodyModel(bm, persistant_buffer=False) if isinstance(
            bm, str) else bm
        self.bm_f = []  #self.bm.f
        self.n_joints = n_joints
        self.kpts_colors = np.array(
            [Color('grey').rgb
             for _ in range(n_joints)]) if kpts_colors == None else kpts_colors
Example #10
0
def load(file,
         bm=None,
         bm_path=None,
         num_betas=10,
         model_type="smplh",
         override_betas=None):
    if bm is None:
        # Download the required body model. For SMPL-H download it from
        # http://mano.is.tue.mpg.de/.
        assert bm_path is not None, "Please provide SMPL body model path"
        comp_device = torch.device("cpu")
        bm = BodyModel(bm_path=bm_path,
                       num_betas=num_betas,
                       model_type=model_type).to(comp_device)
    return create_motion_from_amass_data(filename=file,
                                         bm=bm,
                                         override_betas=override_betas)
    def __init__(self, _config):
        super(VPoserTrainer, self).__init__()

        _support_data_dir = get_support_data_dir()

        vp_ps = load_config(**_config)

        make_deterministic(vp_ps.general.rnd_seed)

        self.expr_id = vp_ps.general.expr_id
        self.dataset_id = vp_ps.general.dataset_id

        self.work_dir = vp_ps.logging.work_dir = makepath(
            vp_ps.general.work_basedir, self.expr_id)
        self.dataset_dir = vp_ps.logging.dataset_dir = osp.join(
            vp_ps.general.dataset_basedir, vp_ps.general.dataset_id)

        self._log_prefix = '[{}]'.format(self.expr_id)
        self.text_logger = log2file(prefix=self._log_prefix)

        self.seq_len = vp_ps.data_parms.num_timeseq_frames

        self.vp_model = VPoser(vp_ps)

        with torch.no_grad():

            self.bm_train = BodyModel(vp_ps.body_model.bm_fname)

        if vp_ps.logging.render_during_training:
            self.renderer = vposer_trainer_renderer(
                self.bm_train, vp_ps.logging.num_bodies_to_display)
        else:
            self.renderer = None

        self.example_input_array = {
            'pose_body': torch.ones(vp_ps.train_parms.batch_size, 63),
        }
        self.vp_ps = vp_ps
Example #12
0
bm_fname = osp.join(
    support_dir, 'models/smplx/neutral/model.npz'
)  #'PATH_TO_SMPLX_model.npz'  obtain from https://smpl-x.is.tue.mpg.de/downloads
sample_amass_fname = osp.join(
    support_dir, 'amass_sample.npz')  # a sample npz file from AMASS

comp_device = torch.device('cuda')

sample_amass = np.load(sample_amass_fname)
print('sample_amass keys: ', list(sample_amass.keys()))
n_joints = 22

target_bm = BodyModel(bm_fname)(
    **{
        'pose_body': torch.tensor(sample_amass['poses'][:, 3:66]).type(
            torch.float),
        'root_orient': torch.tensor(sample_amass['poses'][:, :3]).type(
            torch.float),
        'trans': torch.tensor(sample_amass['trans']).type(torch.float),
    })

red = Color("red")
blue = Color("blue")
kpts_colors = [c.rgb for c in list(red.range_to(blue, n_joints))]

# create source and target key points and make sure they are index aligned
data_loss = torch.nn.MSELoss(reduction='sum')

stepwise_weights = [
    {
        'data': 10.,
        'poZ_body': .01,
Example #13
0
expr_code = 'V1_S1_T1'  # VERSION_SUBVERSION_TRY
work_dir = os.path.join(os.getcwd(), expr_code)
num_betas = 16  # number of body parameters
num_dmpls = 8  # numner of dmpls paramters
batch_size = 1
# path to the body models
# can be downloaded at http://mano.is.tue.mpg.de/
bm_path = os.path.join(os.getcwd(), expr_code, "body_models", "smplh", "male",
                       "model.npz")
# can be downloaded at http://smpl.is.tue.mpg.de/downloads
dmpl_path = os.path.join(os.getcwd(), expr_code, "body_models", "dmpls",
                         "male", "model.npz")

bm = BodyModel(bm_path=bm_path,
               num_betas=num_betas,
               path_dmpl=dmpl_path,
               num_dmpls=num_dmpls,
               batch_size=batch_size)  # .to(comp_device)
faces = c2c(bm.f)

# Choose the device to run the body model on.
#comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

for fold in ["train", "test", "vald"]:

    path_split_dump = os.path.join("amass_dump", fold)
    if not os.path.isdir(path_split_dump):
        os.mkdir(path_split_dump)

    path_split_dump_images = os.path.join("amass_dump", fold + "_images")
    if not os.path.isdir(path_split_dump_images):
Example #14
0
    def nakedgen(self,
                 output_file,
                 subject_id,
                 pose_id,
                 bg_image=None,
                 bg_color='white',
                 color='grey',
                 rotation=0,
                 imw=300,
                 imh=300,
                 frame_skip=2,
                 scale=1,
                 translation=[0, 0],
                 rotate_to=0,
                 translation_to=[0, 0]):
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        npz_bdata_path = "{}/Subject_{}_F_MoSh/Subject_{}_F_{}_poses.npz".format(
            self.bmlmovi_path, subject_id, subject_id, pose_id)
        bdata = np.load(npz_bdata_path)
        gender = bdata["gender"]
        bm_path = "{}/{}/model.npz".format(self.smplh_path, gender)
        dmpl_path = "{}/{}/model.npz".format(self.dmpl_path, gender)
        poses = torch.Tensor(bdata["poses"]).to(device)
        betas = torch.Tensor(bdata["betas"][:10][np.newaxis]).to(device)
        dmpls = torch.Tensor(bdata["dmpls"]).to(device)
        num_betas = 10
        num_dmpls = 8
        bm = BodyModel(bm_path=bm_path,
                       num_betas=num_betas,
                       num_dmpls=num_dmpls,
                       path_dmpl=dmpl_path).to(device)
        wall_face = torch.IntTensor([[0, 1, 2, 3]]).to(device)
        faces = c2c(bm.f)
        mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
        writer = skvideo.io.FFmpegWriter(output_file,
                                         outputdict={
                                             "-vcodec": "libx264",
                                             "-pix_fmt": "yuv420p"
                                         })
        sq_scale = 2.1
        #wall = torch.cat((
        #    torch.FloatTensor([[-sq_scale, sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[-sq_scale, -sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[sq_scale, -sq_scale, -1]]).to(device),
        #    torch.FloatTensor([[sq_scale, sq_scale, -1]]).to(device),
        #)).to(device)
        #uv = np.array([
        #    [0., 1],
        #    [0., 0],
        #    [1., 0],
        #    [1., 1.],
        #])
        #if bg_image:
        #    im = Image.open(bg_image)
        #    texture = TextureVisuals(image=im, uv=uv)
        #else:
        #    texture = None
        #wall_mesh = trimesh.Trimesh(vertices=c2c(wall), faces=wall_face, visual=texture, vertex_colors=np.tile(colors[bg_color], (4, 1)))

        translation = np.array(translation)
        translation_to = np.array(translation_to)
        translation_diff = translation_to - translation
        rotation_diff = rotate_to - rotation
        frames = len(poses)

        living_trimesh = as_mesh(trimesh.load('integrated.ply'))

        # Rotate & Move the object
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        7 / 6 * pi, (0, 0, 1)))
        living_trimesh.vertices[:, 1] += 1
        living_trimesh.vertices[:, 2] -= 1
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        pi, (0, 1, 0)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        1 / 6 * pi, (0, 0, 1)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        -1 / 7 * pi, (1, 0, 0)))
        living_trimesh.vertices[:, 0] += 2
        living_trimesh.vertices[:, 1] -= 0.2
        living_trimesh.vertices[:, 2] -= 1.7
        living_trimesh.vertices *= 1.3
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        -pi / 6, (0, 1, 0)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        1 / 18 * pi, (0, 0, 1)))
        apply_mesh_tranfsormations_([living_trimesh],
                                    trimesh.transformations.rotation_matrix(
                                        rotation, (0, 1, 0)))
        # End

        for fId in range(0, len(poses), frame_skip):
            f_rotation = rotation + rotation_diff / frames * fId
            f_translation = translation + translation_diff / frames * fId
            root_orient = poses[fId:fId + 1, :3]
            pose_body = poses[fId:fId + 1, 3:66]
            pose_hand = poses[fId:fId + 1, 66:]
            dmpl = dmpls[fId:fId + 1]
            body = bm(pose_body=pose_body,
                      pose_hand=pose_hand,
                      betas=betas,
                      root_orient=root_orient)
            body_mesh_wfingers = trimesh.Trimesh(vertices=c2c(body.v[0]),
                                                 faces=faces,
                                                 vertex_colors=np.tile(
                                                     colors[color], (6890, 1)))
            #living_trimesh.visual.vertex_colors = np.tile(colors[bg_color], (24829, 1))
            #living_trimesh.vertices[:, 2] -= 1.3
            apply_mesh_tranfsormations_(
                [body_mesh_wfingers],
                trimesh.transformations.rotation_matrix(-pi / 1.9, (1, 0, 0)))
            apply_mesh_tranfsormations_(
                [body_mesh_wfingers],
                trimesh.transformations.rotation_matrix(rotation, (0, 1, 0)))
            basepoint = body_mesh_wfingers.vertices[:, 2].max().item()
            ground = body_mesh_wfingers.vertices[:, 1].min().item()
            measure = (body_mesh_wfingers.vertices[:, 1].max().item() - ground)
            body_mesh_wfingers.vertices[:, 1] -= (ground - 0.2)
            body_mesh_wfingers.vertices[:, 2] -= basepoint
            body_mesh_wfingers.vertices *= scale
            body_mesh_wfingers.vertices[:, 2] += basepoint
            #body_mesh_wfingers.vertices[:, :2] += f_translation * measure
            #living_trimesh.vertices[:, :2] += f_translation * measure
            mv.set_static_meshes([body_mesh_wfingers, living_trimesh])
            body_image_wfingers = mv.render(render_wireframe=False)
            writer.writeFrame(body_image_wfingers)
        writer.close()
Example #15
0
    def __init__(self, work_dir, ps):
        from tensorboardX import SummaryWriter

        from human_body_prior.data.dataloader import VPoserDS

        self.pt_dtype = torch.float64 if ps.fp_precision == '64' else torch.float32

        torch.manual_seed(ps.seed)

        ps.work_dir = makepath(work_dir, isfile=False)

        logger = log2file(os.path.join(work_dir, '%s.log' % ps.expr_code))

        summary_logdir = os.path.join(work_dir, 'summaries')
        self.swriter = SummaryWriter(log_dir=summary_logdir)
        logger('tensorboard --logdir=%s' % summary_logdir)
        logger('Torch Version: %s\n' % torch.__version__)

        shutil.copy2(os.path.realpath(__file__), work_dir)

        use_cuda = torch.cuda.is_available()
        if use_cuda: torch.cuda.empty_cache()
        self.comp_device = torch.device(
            "cuda:%d" % ps.cuda_id if torch.cuda.is_available() else "cpu")

        logger('%d CUDAs available!' % torch.cuda.device_count())

        gpu_brand = torch.cuda.get_device_name(
            ps.cuda_id) if use_cuda else None
        logger('Training with %s [%s]' %
               (self.comp_device,
                gpu_brand) if use_cuda else 'Training on CPU!!!')
        logger('Base dataset_dir is %s' % ps.dataset_dir)

        kwargs = {'num_workers': ps.n_workers}
        ds_train = VPoserDS(dataset_dir=os.path.join(ps.dataset_dir, 'train'))
        self.ds_train = DataLoader(ds_train,
                                   batch_size=ps.batch_size,
                                   shuffle=True,
                                   drop_last=True,
                                   **kwargs)
        ds_val = VPoserDS(dataset_dir=os.path.join(ps.dataset_dir, 'vald'))
        self.ds_val = DataLoader(ds_val,
                                 batch_size=ps.batch_size,
                                 shuffle=True,
                                 drop_last=True,
                                 **kwargs)
        ds_test = VPoserDS(dataset_dir=os.path.join(ps.dataset_dir, 'test'))
        self.ds_test = DataLoader(ds_test,
                                  batch_size=ps.batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  **kwargs)
        logger('Train dataset size %.2f M' %
               (len(self.ds_train.dataset) * 1e-6))
        logger('Validation dataset size %d' % len(self.ds_val.dataset))
        logger('Test dataset size %d' % len(self.ds_test.dataset))

        ps.data_shape = list(ds_val[0]['pose_aa'].shape)
        self.vposer_model = VPoser(num_neurons=ps.num_neurons,
                                   latentD=ps.latentD,
                                   data_shape=ps.data_shape,
                                   use_cont_repr=ps.use_cont_repr)

        if ps.use_multigpu:
            self.vposer_model = nn.DataParallel(self.vposer_model)

        self.vposer_model.to(self.comp_device)

        varlist = [var[1] for var in self.vposer_model.named_parameters()]

        params_count = sum(p.numel() for p in varlist if p.requires_grad)
        logger('Total Trainable Parameters Count is %2.2f M.' %
               ((params_count) * 1e-6))

        self.optimizer = optim.Adam(varlist,
                                    lr=ps.base_lr,
                                    weight_decay=ps.reg_coef)

        self.logger = logger
        self.best_loss_total = np.inf
        self.try_num = ps.try_num
        self.epochs_completed = 0
        self.ps = ps

        if ps.best_model_fname is not None:
            if isinstance(self.vposer_model, torch.nn.DataParallel):
                self.vposer_model.module.load_state_dict(
                    torch.load(ps.best_model_fname,
                               map_location=self.comp_device))
            else:
                self.vposer_model.load_state_dict(
                    torch.load(ps.best_model_fname,
                               map_location=self.comp_device))

            logger('Restored model from %s' % ps.best_model_fname)

        chose_ids = np.random.choice(list(range(len(ds_val))),
                                     size=ps.num_bodies_to_display,
                                     replace=False,
                                     p=None)
        data_all = {}
        for id in chose_ids:
            for k, v in ds_val[id].items():
                if k in data_all.keys():
                    data_all[k] = torch.cat([data_all[k], v[np.newaxis]],
                                            dim=0)
                else:
                    data_all[k] = v[np.newaxis]

        self.vis_dorig = {
            k: data_all[k].to(self.comp_device)
            for k in data_all.keys()
        }

        self.bm = BodyModel(self.ps.bm_path,
                            'smplh',
                            batch_size=self.ps.batch_size,
                            use_posedirs=True).to(self.comp_device)
Example #16
0
# torch.autograd.set_detect_anomaly(True)
mode = "smplx"
if mode == "smplx":
    model_params = dict(
        model_path="assets/models",
        model_type="smplx",
        gender="female",
        # create_body_pose=True,
        dtype=torch.float32,
        use_face=False,
    )
    model = smplx.create(**model_params)
    model.cuda()
else:
    bm_path = "assets/models/smplx/SMPLX_FEMALE.npz"
    bm = BodyModel(bm_path=bm_path, batch_size=1).to("cuda")
    bm.cuda()
vp, ps = load_vposer("assets/vposer")
vp = vp.to("cuda")
vp.eval()

# Sample a 32 dimentional vector from a Normal distribution
poZ_body_sample = torch.zeros(1, 32).cuda()
pose_body = vp.decode(poZ_body_sample, output_type="aa").view(-1, 63)


pose_embedding = torch.zeros(
    [1, 32], requires_grad=True, device=poZ_body_sample.device
)
pose_embedding = torch.rand(
    [1, 32], requires_grad=True, device=poZ_body_sample.device
Example #17
0
        if not self.use_offscreen:
            sys.stderr.write(
                'Currently saving snapshots only works with off-screen renderer!\n'
            )
            return
        color_img = self.render()
        cv2.imwrite(fname, color_img)


if __name__ == '__main__':
    from human_body_prior.tools.omni_tools import copy2cpu as c2c
    from human_body_prior.body_model.body_model import BodyModel
    from supercap.marker_layout_detection.tools import marker_layout_as_points, equal_aspect_ratio, visualize3DData

    bodymodel_fname = '/ps/project/common/moshpp/smplx/unlocked_head/neutral/model.npz'
    body = BodyModel(bodymodel_fname)()

    superset_fname = '/ps/project/supercap/support_files/marker_layouts/superset_smplx_95.json'
    superset_data = marker_layout_as_points(c2c(body.v[0]),
                                            c2c(body.f))(superset_fname)

    markers = superset_data['markers']
    mv = MeshViewer(use_offscreen=False)
    body_v = c2c(body.v[0])
    faces = c2c(body.f)
    n_verts = body_v.shape[0]
    body_mesh = trimesh.Trimesh(vertices=body_v,
                                faces=faces,
                                vertex_colors=np.tile(colors['grey'],
                                                      (n_verts, 1)))
    mv.set_dynamic_meshes([body_mesh])
Example #18
0
def main(args):
    comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    bm = BodyModel(model_type="smplh",
                   bm_path=args.body_model_file,
                   num_betas=10).to(comp_device)
    faces = c2c(bm.f)

    img_shape = (1600, 1600)
    motion = bvh.load(
        file=args.input_file,
        scale=0.5,
        v_up_skel=np.array([0.0, 1.0, 0.0]),
        v_face_skel=np.array([0.0, 0.0, 1.0]),
        v_up_env=np.array([0.0, 0.0, 1.0]),
    )
    motion = motion_ops.rotate(
        motion,
        conversions.Ax2R(conversions.deg2rad(-90)),
    )
    mv = prepare_mesh_viewer(img_shape)

    out = cv2.VideoWriter(args.video_output_file,
                          cv2.VideoWriter_fourcc(*"XVID"), 30, img_shape)

    parents = bm.kintree_table[0].long()[:21 + 1]
    parents = parents.cpu().numpy()
    dfs_order = get_dfs_order(parents)
    for frame in tqdm.tqdm(range(motion.num_frames())):
        pose = motion.get_pose_by_frame(frame)
        R, p = conversions.T2Rp(pose.data[0])
        root_orient = conversions.R2A(R)
        trans = p

        num_joints = len(pose.data) - 1
        body_model_pose_data = np.zeros(num_joints * 3)
        for motion_joint, amass_joint in enumerate(dfs_order):
            # motion_joint is idx of joint in Motion class order
            # amass_joint is idx of joint in AMASS skeleton
            if amass_joint == 0:
                continue
            pose_idx = amass_joint - 1
            # Convert rotation matrix to axis angle
            axis_angles = conversions.R2A(
                conversions.T2R(pose.data[motion_joint]))
            body_model_pose_data[pose_idx * 3:pose_idx * 3 + 3] = axis_angles

        pose_data_t = (
            torch.Tensor(body_model_pose_data).to(comp_device).unsqueeze(0))
        root_orient_t = torch.Tensor(root_orient).to(comp_device).unsqueeze(0)
        trans_t = torch.Tensor(trans).to(comp_device).unsqueeze(0)
        body = bm(pose_body=pose_data_t,
                  root_orient=root_orient_t,
                  trans=trans_t)

        body_mesh = trimesh.Trimesh(
            vertices=c2c(body.v[0]),
            faces=faces,
            vertex_colors=np.tile(colors["grey"], (6890, 1)),
        )
        # TODO: Add floor trimesh to the scene to display the ground plane
        mv.set_static_meshes([body_mesh])
        body_image = mv.render()
        img = body_image.astype(np.uint8)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        out.write(img)
    out.release()
Example #19
0
#
#
# Code Developed by:
# Nima Ghorbani <https://www.linkedin.com/in/nghorbani/>
#
# 2019.08.09
import torch
import numpy as np

from human_body_prior.body_model.body_model import BodyModel
from human_body_prior.tools.omni_tools import copy2cpu as c2c

bm_path = '../body_models/smplh/male/model.npz' # obtain from http://mano.is.tue.mpg.de/downloads

comp_device = torch.device('cuda')
bm = BodyModel(bm_path=bm_path, batch_size=1, num_betas=10).to(comp_device)

npz_data_path = '../github_data/amass_sample.npz'
bdata = np.load(npz_data_path)
print(list(bdata.keys()))

root_orient = torch.Tensor(bdata['poses'][:, :3]).to(comp_device)
pose_body = torch.Tensor(bdata['poses'][:, 3:66]).to(comp_device)
pose_hand = torch.Tensor(bdata['poses'][:, 66:]).to(comp_device)
betas = torch.Tensor(bdata['betas'][:10][np.newaxis]).to(comp_device)

faces = c2c(bm.f)

from human_body_prior.mesh import MeshViewer
from human_body_prior.mesh.sphere import points_to_spheres
import trimesh
def load_file(file_name):
    npz_bdata_path = file_name
    bdata = np.load(npz_bdata_path)

    gender = bdata['gender']

    bm_path = MODELS_FOLDER + "/" + 'male' + '/model.npz'

    num_betas = 10  # number of body parameters
    model_type = 'smplh'

    FRAMES = bdata['poses'].shape[0]
    MAX_BATCH_SIZE = 1000
    batch_size = min(MAX_BATCH_SIZE, FRAMES)
    last_batch_size = FRAMES % MAX_BATCH_SIZE if FRAMES > MAX_BATCH_SIZE else FRAMES
    bm = BodyModel(bm_path=bm_path,
                   num_betas=num_betas,
                   model_type=model_type,
                   batch_size=batch_size).to(COMP_DEVICE)
    bm2 = BodyModel(bm_path=bm_path,
                    num_betas=num_betas,
                    model_type=model_type,
                    batch_size=last_batch_size).to(COMP_DEVICE)
    faces = c2c(bm.f)

    # print('Data keys available:%s'%list(bdata.keys()))
    # print('Vector poses has %d elements for each of %d frames.' % (bdata['poses'].shape[1], bdata['poses'].shape[0]))
    # print('Vector dmpls has %d elements for each of %d frames.' % (bdata['dmpls'].shape[1], bdata['dmpls'].shape[0]))
    # print('Vector trams has %d elements for each of %d frames.' % (bdata['trans'].shape[1], bdata['trans'].shape[0]))
    # print('Vector betas has %d elements constant for the whole sequence.'%bdata['betas'].shape[0])
    # print('The subject of the mocap sequence is %s.'%bdata['gender'])

    FRAME_TIME = 1.0 / bdata['mocap_framerate'].item()

    joint_names = get_joint_names()[:JOINT_COUNT]
    dependencies = get_dependencies()

    root_orient_allframes = torch.Tensor(bdata['poses'][:, :3]).to(
        COMP_DEVICE)  # controls the global root orientation
    pose_body_allframes = torch.Tensor(bdata['poses'][:, 3:66]).to(
        COMP_DEVICE)  # controls the body
    betas = torch.Tensor(bdata['betas'][:10][np.newaxis]).to(COMP_DEVICE)
    trans_allframes = bdata['trans']

    zipped_global_positions = np.empty((FRAMES, JOINT_COUNT, 3))

    frame_range = FRAMES // batch_size + 1
    if FRAMES == batch_size:
        frame_range = 1
    for fId in range(frame_range):

        startIdx = fId * batch_size
        if startIdx == FRAMES:
            continue
        endIdx = min(FRAMES, (fId + 1) * (batch_size))
        root_orient = root_orient_allframes[startIdx:endIdx]
        pose_body = pose_body_allframes[startIdx:endIdx]

        if endIdx - startIdx == batch_size:
            body = bm(root_orient=root_orient,
                      pose_body=pose_body,
                      betas=betas)
        else:
            body = bm2(root_orient=root_orient,
                       pose_body=pose_body,
                       betas=betas)
        joints = (c2c(body.Jtr))[:, :JOINT_COUNT, :]

        zipped_global_positions[
            startIdx:endIdx, :, :] = joints + trans_allframes[startIdx:endIdx,
                                                              None, :]
        # if (fId / FRAMES * 100) % 10 == 0:
        #     print("processing smplh file.. " + str(fId / FRAMES * 100) + "%")

    zipped_global_positions = np.array(zipped_global_positions)[:, :,
                                                                [0, 2, 1]]
    # dependencies_ = c2c(bm.kintree_table[0])

    # rots = get_rot_matrices_from_rodrigues(np.reshape(bdata['poses'], (-1, 3)))
    # rots = np.reshape(rots, (-1, dependencies_.shape[0], 3, 3))
    # default_pose = get_default_pose(bm.v_template, betas, bm.shapedirs, bm.J_regressor)
    return FRAMES, FRAME_TIME, joint_names, dependencies, zipped_global_positions  #, dependencies_, rots, default_pose, trans_allframes
Example #21
0
input_root = Path(args.input_root)
output_root = Path(args.input_root) / args.identifier
output_root.mkdir(parents=True, exist_ok=True)
assert input_root.exists()

id_path_list = []
cnt = 0

model_types = ['male', 'female', 'neutral']

bm_dict, faces_dict = {}, {}
for m in model_types:
    bm_path = str(input_root / 'MANO/smplh' / m / 'model.npz')
    dmpl_path = str(input_root / 'DMPL' / m / 'model.npz')
    bm_dict[m] = BodyModel(bm_path=bm_path,
                           num_betas=NUM_BETAS,
                           num_dmpls=NUM_DMPLS,
                           path_dmpl=dmpl_path).to(comp_device)
    faces_dict[m] = c2c(bm_dict[m].f)


def process(npz_bdata_path: Path):
    global cnt

    print(cnt, npz_bdata_path)
    bdata = np.load(str(npz_bdata_path))
    # print('Data keys available:%s' % list(bdata.keys()))
    # beta means shape
    # num of elements: 156 (pose), 8 (dmpl), 16 (beta)
    # print('Vector poses has %d elements for each of %d frames.' %
    #       (bdata['poses'].shape[1], bdata['poses'].shape[0]))
    # print('Vector dmpls has %d elements for each of %d frames.' %
Example #22
0
def SMPLD_register(args):
    cfg = config.load_config(args.config, 'configs/default.yaml')
    out_dir = cfg['training']['out_dir']
    generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])
    is_cuda = (torch.cuda.is_available() and not args.no_cuda)
    device = torch.device("cuda" if is_cuda else "cpu")

    if args.subject_idx >= 0 and args.sequence_idx >= 0:
        logger, _ = create_logger(generation_dir, phase='reg_subject{}_sequence{}'.format(args.subject_idx, args.sequence_idx), create_tf_logs=False)
    else:
        logger, _ = create_logger(generation_dir, phase='reg_all', create_tf_logs=False)

    # Get dataset
    if args.subject_idx >= 0 and args.sequence_idx >= 0:
        dataset = config.get_dataset('test', cfg, sequence_idx=args.sequence_idx, subject_idx=args.subject_idx)
    else:
        dataset = config.get_dataset('test', cfg)

    batch_size = cfg['generation']['batch_size']

    # Loader
    test_loader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, num_workers=1, shuffle=False)

    model_counter = defaultdict(int)

    # Set optimization hyper parameters
    iterations, pose_iterations, steps_per_iter, pose_steps_per_iter = 3, 2, 30, 30

    inner_dists = []
    outer_dists = []

    for it, data in enumerate(tqdm(test_loader)):
        idxs = data['idx'].cpu().numpy()
        loc = data['points.loc'].cpu().numpy()
        batch_size = idxs.shape[0]
        # Directories to load corresponding informations
        mesh_dir = os.path.join(generation_dir, 'meshes')   # directory for posed and (optionally) unposed implicit outer/inner meshes
        label_dir = os.path.join(generation_dir, 'labels')   # directory for part labels
        register_dir = os.path.join(generation_dir, 'registrations')   # directory for part labels

        if args.use_raw_scan:
            scan_dir = dataset.dataset_folder   # this is the folder that contains CAPE raw scans
        else:
            scan_dir = None

        all_posed_minimal_meshes = []
        all_posed_cloth_meshes = []
        all_posed_vertices = []
        all_unposed_vertices = []
        scan_part_labels = []

        for idx in idxs:
            model_dict = dataset.get_model_dict(idx)

            subset = model_dict['subset']
            subject = model_dict['subject']
            sequence = model_dict['sequence']
            gender = model_dict['gender']
            filebase = os.path.basename(model_dict['data_path'])[:-4]

            folder_name = os.path.join(subset, subject, sequence)
            # TODO: we assume batch size stays the same if one resumes the job
            # can be more flexible to support different batch sizes before and
            # after resume
            register_file = os.path.join(register_dir, folder_name, filebase + 'minimal.registered.ply')
            if os.path.exists(register_file):
                # batch already computed, break
                break

            # points_dict = np.load(model_dict['data_path'])
            # gender = str(points_dict['gender'])

            mesh_dir_ = os.path.join(mesh_dir, folder_name)
            label_dir_ = os.path.join(label_dir, folder_name)

            if scan_dir is not None:
                scan_dir_ = os.path.join(scan_dir, subject, sequence)

            # Load part labels and vertex translations
            label_file_name = filebase + '.minimal.npz'
            label_dict = dict(np.load(os.path.join(label_dir_, label_file_name)))
            labels = torch.tensor(label_dict['part_labels'].astype(np.int64)).to(device)   # part labels for each vertex (14 or 24)
            scan_part_labels.append(labels)

            # Load minimal implicit surfaces
            mesh_file_name = filebase + '.minimal.posed.ply'
            # posed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
            posed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)
            posed_vertices = np.array(posed_mesh.vertices)
            all_posed_vertices.append(posed_vertices)

            posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32'), requires_grad=False, device=device),
                    torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
            all_posed_minimal_meshes.append(posed_mesh)

            mesh_file_name = filebase + '.minimal.unposed.ply'
            if os.path.exists(os.path.join(mesh_dir_, mesh_file_name)) and args.init_pose:
                # unposed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
                unposed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)
                unposed_vertices = np.array(unposed_mesh.vertices)
                all_unposed_vertices.append(unposed_vertices)

            if args.use_raw_scan:
                # Load raw scans
                mesh_file_name = filebase + '.ply'
                # posed_mesh = Mesh(filename=os.path.join(scan_dir_, mesh_file_name))
                posed_mesh = trimesh.load(os.path.join(scan_dir_, mesh_file_name), process=False)

                posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32') / 1000, requires_grad=False, device=device),
                        torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
                all_posed_cloth_meshes.append(posed_mesh)
            else:
                # Load clothed implicit surfaces
                mesh_file_name = filebase + '.cloth.posed.ply'
                # posed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
                posed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)

                posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32'), requires_grad=False, device=device),
                        torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
                all_posed_cloth_meshes.append(posed_mesh)

        if args.num_joints == 24:
            bm = BodyModel(bm_path='body_models/smpl/male/model.pkl', num_betas=10, batch_size=batch_size).to(device)
            parents = bm.kintree_table[0].detach().cpu().numpy()
            labels = bm.weights.argmax(1)
            # Convert 24 parts to 14 parts
            smpl2ipnet = torch.from_numpy(SMPL2IPNET_IDX).to(device)
            labels = smpl2ipnet[labels].clone().unsqueeze(0)
            del bm
        elif args.num_joints == 14:
            with open('body_models/misc/smpl_parts_dense.pkl', 'rb') as f:
                part_labels = pkl.load(f)

            labels = np.zeros((6890,), dtype=np.int64)
            for n, k in enumerate(part_labels):
                labels[part_labels[k]] = n
            labels = torch.tensor(labels).to(device).unsqueeze(0)
        else:
            raise ValueError('Got {} joints but umber of joints can only be either 14 or 24'.format(args.num_joints))

        th_faces = torch.tensor(smpl_faces.astype('float32'), dtype=torch.long).to(device)

        # We assume loaded meshes are properly scaled and offsetted to the orignal SMPL space,
        if len(all_posed_minimal_meshes) > 0 and len(all_unposed_vertices) == 0:
            # IPNet optimization without vertex traslation
            # raise NotImplementedError('Optimization for IPNet is not implemented yet.')
            if args.num_joints == 24:
                for idx in range(len(scan_part_labels)):
                    scan_part_labels[idx] = smpl2ipnet[scan_part_labels[idx]].clone()

            prior = get_prior(gender=gender, precomputed=True)
            pose_init = torch.zeros((batch_size, 72))
            pose_init[:, 3:] = prior.mean
            betas, pose, trans = torch.zeros((batch_size, 10)), pose_init, torch.zeros((batch_size, 3))

            # Init SMPL, pose with mean smpl pose, as in ch.registration
            smpl = th_batch_SMPL(batch_size, betas, pose, trans, faces=th_faces, gender=gender).to(device)
            smpl_part_labels = torch.cat([labels] * batch_size, axis=0)

            # Optimize pose first
            optimize_pose_only(all_posed_minimal_meshes, smpl, pose_iterations, pose_steps_per_iter, scan_part_labels,
                               smpl_part_labels, None, args)

            # Optimize pose and shape
            optimize_pose_shape(all_posed_minimal_meshes, smpl, iterations, steps_per_iter, scan_part_labels, smpl_part_labels,
                                None, args)

            inner_vertices, _, _, _ = smpl()

            # Optimize vertices for SMPLD
            init_smpl_meshes = [tm.from_tensors(vertices=v.clone().detach(),
                                                faces=smpl.faces) for v in inner_vertices]
            optimize_offsets(all_posed_cloth_meshes, smpl, init_smpl_meshes, 5, 10, args)

            outer_vertices, _, _, _ = smpl()
        elif len(all_posed_minimal_meshes) > 0:
            # NASA+PTFs optimization with vertex traslations
            # Compute poses from implicit surfaces and correspondences
            # TODO: we could also compute bone-lengths if we train PTFs to predict A-pose with a global translation
            # that equals to the centroid of the pointcloud
            poses = compute_poses(all_posed_vertices, all_unposed_vertices, scan_part_labels, parents, args)
            # Convert 24 parts to 14 parts
            for idx in range(len(scan_part_labels)):
                scan_part_labels[idx] = smpl2ipnet[scan_part_labels[idx]].clone()

            pose_init = torch.from_numpy(poses).float()
            betas, pose, trans = torch.zeros((batch_size, 10)), pose_init, torch.zeros((batch_size, 3))

            # Init SMPL, pose with mean smpl pose, as in ch.registration
            smpl = th_batch_SMPL(batch_size, betas, pose, trans, faces=th_faces, gender=gender).to(device)
            smpl_part_labels = torch.cat([labels] * batch_size, axis=0)

            # Optimize pose first
            optimize_pose_only(all_posed_minimal_meshes, smpl, pose_iterations, pose_steps_per_iter, scan_part_labels,
                               smpl_part_labels, None, args)

            # Optimize pose and shape
            optimize_pose_shape(all_posed_minimal_meshes, smpl, iterations, steps_per_iter, scan_part_labels, smpl_part_labels,
                                None, args)

            inner_vertices, _, _, _ = smpl()

            # Optimize vertices for SMPLD
            init_smpl_meshes = [tm.from_tensors(vertices=v.clone().detach(),
                                                faces=smpl.faces) for v in inner_vertices]
            optimize_offsets(all_posed_cloth_meshes, smpl, init_smpl_meshes, 5, 10, args)

            outer_vertices, _, _, _ = smpl()
        else:
            inner_vertices = outer_vertices = None

        if args.use_raw_scan:
            for i, idx in enumerate(idxs):
                model_dict = dataset.get_model_dict(idx)

                subset = model_dict['subset']
                subject = model_dict['subject']
                sequence = model_dict['sequence']
                filebase = os.path.basename(model_dict['data_path'])[:-4]

                folder_name = os.path.join(subset, subject, sequence)
                register_dir_ = os.path.join(register_dir, folder_name)
                if not os.path.exists(register_dir_):
                    os.makedirs(register_dir_)

                if not os.path.exists(os.path.join(register_dir_, filebase + 'minimal.registered.ply')):
                    registered_mesh = trimesh.Trimesh(inner_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'minimal.registered.ply'))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'cloth.registered.ply')):
                    registered_mesh = trimesh.Trimesh(outer_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'cloth.registered.ply'))
        else:
            # Evaluate registered mesh
            gt_smpl_mesh = data['points.minimal_smpl_vertices'].to(device)
            gt_smpld_mesh = data['points.smpl_vertices'].to(device)
            if inner_vertices is None:
                # if vertices are None, we assume they already exist due to previous runs
                inner_vertices = []
                outer_vertices = []
                for i, idx in enumerate(idxs):

                    model_dict = dataset.get_model_dict(idx)

                    subset = model_dict['subset']
                    subject = model_dict['subject']
                    sequence = model_dict['sequence']
                    filebase = os.path.basename(model_dict['data_path'])[:-4]

                    folder_name = os.path.join(subset, subject, sequence)
                    register_dir_ = os.path.join(register_dir, folder_name)

                    # registered_mesh = Mesh(filename=os.path.join(register_dir_, filebase + 'minimal.registered.ply'))
                    registered_mesh = trimesh.load(os.path.join(register_dir_, filebase + 'minimal.registered.ply'), process=False)
                    registered_v = torch.tensor(registered_mesh.vertices.astype(np.float32), requires_grad=False, device=device)
                    inner_vertices.append(registered_v)

                    # registered_mesh = Mesh(filename=os.path.join(register_dir_, filebase + 'cloth.registered.ply'))
                    registered_mesh = trimesh.load(os.path.join(register_dir_, filebase + 'cloth.registered.ply'), process=False)
                    registered_v = torch.tensor(registered_mesh.vertices.astype(np.float32), requires_grad=False, device=device)
                    outer_vertices.append(registered_v)

                inner_vertices = torch.stack(inner_vertices, dim=0)
                outer_vertices = torch.stack(outer_vertices, dim=0)

            inner_dist = torch.norm(gt_smpl_mesh - inner_vertices, dim=2).mean(-1)
            outer_dist = torch.norm(gt_smpld_mesh - outer_vertices, dim=2).mean(-1)

            for i, idx in enumerate(idxs):
                model_dict = dataset.get_model_dict(idx)

                subset = model_dict['subset']
                subject = model_dict['subject']
                sequence = model_dict['sequence']
                filebase = os.path.basename(model_dict['data_path'])[:-4]

                folder_name = os.path.join(subset, subject, sequence)
                register_dir_ = os.path.join(register_dir, folder_name)
                if not os.path.exists(register_dir_):
                    os.makedirs(register_dir_)

                logger.info('Inner distance for input {}: {} cm'.format(filebase, inner_dist[i].item()))
                logger.info('Outer distance for input {}: {} cm'.format(filebase, outer_dist[i].item()))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'minimal.registered.ply')):
                    registered_mesh = trimesh.Trimesh(inner_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'minimal.registered.ply'))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'cloth.registered.ply')):
                    registered_mesh = trimesh.Trimesh(outer_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'cloth.registered.ply'))

            inner_dists.extend(inner_dist.detach().cpu().numpy())
            outer_dists.extend(outer_dist.detach().cpu().numpy())

    logger.info('Mean inner distance: {} cm'.format(np.mean(inner_dists)))
    logger.info('Mean outer distance: {} cm'.format(np.mean(outer_dists)))
Example #23
0
print('Data keys available:%s' % list(bdata.keys()))

print('The subject of the mocap sequence is  {}.'.format(subject_gender))

from human_body_prior.body_model.body_model import BodyModel

bm_fname = osp.join(support_dir,
                    'body_models/smplh/{}/model.npz'.format(subject_gender))
dmpl_fname = osp.join(support_dir,
                      'body_models/dmpls/{}/model.npz'.format(subject_gender))

num_betas = 16  # number of body parameters
num_dmpls = 8  # number of DMPL parameters

bm = BodyModel(bm_fname=bm_fname,
               num_betas=num_betas,
               num_dmpls=num_dmpls,
               dmpl_fname=dmpl_fname).to(comp_device)
faces = c2c(bm.f)

time_length = len(bdata['trans'])

body_parms = {
    'root_orient':
    torch.Tensor(bdata['poses'][:, :3]).to(
        comp_device),  # controls the global root orientation
    'pose_body':
    torch.Tensor(bdata['poses'][:, 3:66]).to(comp_device),  # controls the body
    'pose_hand':
    torch.Tensor(bdata['poses'][:, 66:]).to(
        comp_device),  # controls the finger articulation
    'trans':
Example #24
0
def cape_extract(args):
    cape_subjects = args.subjects.split(',')

    if not os.path.exists(args.points_folder):
        os.makedirs(args.points_folder)

    logger, _ = create_logger(args.points_folder)

    faces = np.load(
        os.path.join(args.dataset_path, 'cape_release/misc/smpl_tris.npy'))
    with open(
            os.path.join(args.dataset_path,
                         'cape_release/misc/subj_genders.pkl'), 'rb') as f:
        genders = pkl.load(f)

    for subject in cape_subjects:
        gender = genders[subject]
        subject_dir = os.path.join(args.dataset_path, subject)

        minimal_shape_path = os.path.join(args.dataset_path, 'cape_release',
                                          'minimal_body_shape', subject,
                                          subject + '_minimal.npy')
        minimal_shape = np.load(minimal_shape_path)

        bm_path = os.path.join(args.bm_path, gender, 'model.pkl')
        # A-pose joint locations are determined by minimal body shape only
        bm = BodyModel(bm_path=bm_path,
                       num_betas=10,
                       batch_size=1,
                       v_template=minimal_shape).cuda()

        sequences = sorted(glob.glob(os.path.join(subject_dir, '*')))
        sequences = [os.path.basename(sequence) for sequence in sequences]

        # J_regressor = bm.J_regressor.detach().cpu().numpy()
        # Jtr_cano = np.dot(J_regressor, minimal_shape)
        # Jtr_cano = Jtr_cano[IPNET2SMPL_IDX, :]

        for sequence in sequences:
            sequence_dir = os.path.join(subject_dir, sequence)
            frames = sorted(glob.glob(os.path.join(sequence_dir, '*.npz')))
            frames = [os.path.basename(frame) for frame in frames]

            if not os.path.exists(
                    os.path.join(args.points_folder, subject, sequence)):
                os.makedirs(os.path.join(args.points_folder, subject,
                                         sequence))

            for f_idx in range(0, len(frames), args.sampling_rate):
                frame = frames[f_idx]

                frame_path = os.path.join(sequence_dir, frame)
                frame_name = frame[:-4]
                frame_name = os.path.join(
                    subject, sequence, sequence + '.{:06d}'.format(f_idx + 1))

                filename = os.path.join(args.points_folder,
                                        frame_name + '.npz')

                if not args.overwrite and os.path.exists(filename):
                    print('Points already exist: %s' % filename)
                    continue

                try:
                    data = np.load(frame_path)
                except Exception:
                    logger.warning(
                        'Something wrong with {}'.format(frame_path))
                    continue

                pose_body = torch.Tensor(data['pose'][3:66]).view(1, -1).cuda()
                pose_hand = torch.Tensor(data['pose'][66:72]).view(1,
                                                                   -1).cuda()
                root_orient = torch.Tensor(data['pose'][:3]).view(1, -1).cuda()
                trans = torch.Tensor(data['transl']).view(1, -1).cuda()
                v_cano = torch.Tensor(data['v_cano']).view(1, 6890, 3).cuda()

                with torch.no_grad():
                    body = bm(root_orient=root_orient,
                              pose_body=pose_body,
                              pose_hand=pose_hand,
                              trans=trans,
                              clothed_v_template=v_cano)

                    bone_transforms = body.bone_transforms.detach().cpu(
                    ).numpy()
                    abs_bone_transforms = body.abs_bone_transforms.detach(
                    ).cpu().numpy()

                    pose_body = pose_body.detach().cpu().numpy()
                    pose_hand = pose_hand.detach().cpu().numpy()
                    Jtr = body.Jtr.detach().cpu().numpy()
                    v_cano = body.v_a_pose.detach().cpu().numpy()
                    v_posed = body.v.detach().cpu().numpy()
                    trans = trans.detach().cpu().numpy()
                    root_orient = root_orient.detach().cpu().numpy()

                process_single_file(v_posed[0], v_cano[0], Jtr[0],
                                    root_orient[0], pose_body[0], pose_hand[0],
                                    bone_transforms[0], abs_bone_transforms[0],
                                    trans[0], frame_name, gender, faces, args)

        del bm