def view(opt_objs, body_v, virtual_markers, opt_it):
        if verbosity <= 0: return
        opt_objs_cpu = {k: c2c(v) for k, v in opt_objs.items()}

        total_loss = np.sum([np.sum(v) for k, v in opt_objs_cpu.items()])
        message = 'it {} -- [total loss = {:.2e}] - {}'.format(opt_it, total_loss, ' | '.join(['%s = %2.2e' % (k, np.sum(v)) for k, v in opt_objs_cpu.items()]))
        logger(message)
        if verbosity>1:
            bs = body_v.shape[0]
            np.random.seed(100)
            frame_ids = list(range(bs)) if bs <= len(mvs) else np.random.choice(bs , size=len(mvs), replace=False).tolist()
            if bs > len(mvs): message += ' -- [frame_ids: {}]'.format(frame_ids)
            for dispId, fId in enumerate(frame_ids): # check for the number of frames in mvs and show a randomly picked number of frames in body if there is more to show than row*cols available
                new_body_v = rotateXYZ(body_v[fId], [-90,0,0])

                orig_mrk_mesh = points_to_spheres(rotateXYZ(c2c(points[fId]), [-90,0,0]), radius=0.01, color=kpts_colors)
                virtual_markers_mesh = points_to_cubes(rotateXYZ(virtual_markers[fId], [-90,0,0]), radius=0.01, color=kpts_colors)
                new_body_mesh = Mesh(new_body_v, bm_f, vc=colors['grey'])

                # linev = rotateXYZ(np.hstack((c2c(points[fId]), virtual_markers[fId])).reshape((-1, 3)), [-90,0,0])
                # linee = np.arange(len(linev)).reshape((-1, 2))
                # ll = Lines(v=linev, e=linee)
                # ll.vc = (ll.v * 0. + 1) * np.array([0.00, 0.00, 1.00])
                # mvs[dispId].set_dynamic_lines([ll])

                # orig_mrk_mesh = points_to_spheres(data_pc, radius=0.01, vc=colors['blue'])
                mvs[dispId].set_dynamic_meshes([orig_mrk_mesh, virtual_markers_mesh])
                mvs[dispId].set_static_meshes([new_body_mesh])

            mvs[0].set_titlebar(message)
    def fit(weights, free_vars):

        fit.gstep += 1
        optimizer.zero_grad()

        free_vars['pose_body'] = vp_model.decode(free_vars['poZ_body'])['pose_body'].contiguous().view(-1, 63)
        nonan_mask = torch.isnan(free_vars['poZ_body']).sum(-1) == 0

        opt_objs = {}

        res = source_kpts_model(free_vars)

        opt_objs['data'] = data_loss(res['source_kpts'], static_vars['target_kpts'])

        opt_objs['betas'] = torch.pow(free_vars['betas'][nonan_mask],2).sum()
        opt_objs['poZ_body'] = torch.pow(free_vars['poZ_body'][nonan_mask],2).sum()


        opt_objs = {k: opt_objs[k]*v for k, v in weights.items() if k in opt_objs.keys()}
        loss_total = torch.sum(torch.stack(list(opt_objs.values())))
        # breakpoint()

        loss_total.backward()

        if on_step is not None:
            on_step(opt_objs, c2c(res['body'].v), c2c(res['source_kpts']), fit.gstep)

        fit.free_vars = {k:v for k,v in free_vars.items()}# if k in IK_Engine.fields_to_optimize}
        # fit.nonan_mask = nonan_mask
        fit.final_loss = loss_total

        return loss_total
Beispiel #3
0
    def load_mesh(self, data_dict):

        gender_type = "male" if data_dict["gender"] == -1 else "female"

        with torch.no_grad():
            bm = BodyModel(bm_path=self.bm_path%(gender_type), num_betas=self.num_betas, batch_size=1)
            body = bm.forward(pose_body=data_dict['pose_body'].unsqueeze(0), 
                            betas=data_dict['betas'].unsqueeze(0))

        mesh_ori = trimesh.Trimesh(vertices=c2c(body.v)[0], faces=c2c(body.f))

        # move the mesh to the original
        joints = c2c(body.Jtr)[0]
        root_xyz = joints[0]
        mesh_ori.vertices -= root_xyz

        verts = mesh_ori.vertices
        vert_normals = mesh_ori.vertex_normals
        face_normals = mesh_ori.face_normals
        faces = mesh_ori.faces

        mesh = HoppeMesh(
            verts=verts, 
            faces=faces, 
            vert_normals=vert_normals, 
            face_normals=face_normals)

        return {'mesh': mesh, 
                'A': body.A[0,:self.num_poses+1],
                'weights': body.weights}
def smpl_params2ply(bm, out_dir, pose_body, pose_hand = None, trans=None, betas=None, root_orient=None):
    '''
    :param bm: pytorch body model with batch_size 1
    :param pose_body: can be a single list of pose parameters, or a list of list of pose parameters:
    :param trans: Nx3
    :param betas: Nxnum_betas
    :return
    dumps are all parameter as gltf objects
    '''

    faces = c2c(bm.f)

    makepath(out_dir)

    for fIdx in range(0, len(pose_body)):

        bm.pose_body.data[0,:] = bm.pose_body.new(pose_body[fIdx].reshape(1,-1))
        if pose_hand is not None: bm.pose_hand.data[0,:] = bm.pose_hand.new(pose_hand[fIdx].reshape(1,-1))
        if trans is not None: bm.trans.data[0,:] = bm.trans.new(trans[fIdx].reshape(1,-1))
        if betas is not None: bm.betas.data[0,:len(betas[fIdx])] = bm.betas.new(betas[fIdx])
        if root_orient is not None: bm.root_orient.data[0,:] = bm.root_orient.new(root_orient[fIdx])

        v = c2c(bm.forward().v)[0]

        mesh = trimesh.base.Trimesh(v, faces)
        mesh.export(os.path.join(out_dir, '%03d.ply' % fIdx))
Beispiel #5
0
def render_smpl_params(bm, body_parms):
    '''
    :param bm: pytorch body model with batch_size 1
    :param pose_body: Nx21x3
    :param trans: Nx3
    :param betas: Nxnum_betas
    :return: N x 400 x 400 x 3
    '''

    from human_body_prior.tools.omni_tools import copy2cpu as c2c
    from body_visualizer.mesh.mesh_viewer import MeshViewer

    imw, imh = 400, 400

    mv = MeshViewer(width=imw, height=imh, use_offscreen=True)

    faces = c2c(bm.f)

    v = c2c(bm(**body_parms).v)

    T, num_verts = v.shape[:-1]

    images = []
    for fIdx in range(T):

        mesh = trimesh.base.Trimesh(v[fIdx],
                                    faces,
                                    vertex_colors=num_verts * colors['grey'])
        mv.set_meshes([mesh], 'static')

        images.append(mv.render())

    return np.array(images).reshape(T, imw, imh, 3)
Beispiel #6
0
def amass_fk(npz_data_path, bm_path):
    if torch.cuda.is_available():
        comp_device = torch.device("cuda")
    else:
        comp_device = torch.device("cpu")
    bm = BodyModel(bm_path=bm_path, batch_size=1, num_betas=10).to(
        comp_device
    )
    bdata = np.load(npz_data_path)
    root_orient = torch.Tensor(bdata["poses"][:, :3]).to(comp_device)
    pose_body = torch.Tensor(bdata["poses"][:, 3:66]).to(comp_device)
    pose_hand = torch.Tensor(bdata["poses"][:, 66:]).to(comp_device)
    betas = torch.Tensor(bdata["betas"][:10][np.newaxis]).to(comp_device)
    rootTranslation = bdata["trans"]
    s1, s2 = rootTranslation.shape
    trans = np.expand_dims(rootTranslation, 1)
    joints = np.zeros((bdata["poses"].shape[0], 52, 3))
    verts = np.zeros((bdata["poses"].shape[0], 6890, 3))
    count = 0
    num_frames = bdata["poses"].shape[0]
    for fId in tqdm(range(1, num_frames)):
        body = bm(
            pose_body=pose_body[fId : fId + 1],
            pose_hand=pose_hand[fId : fId + 1],
            betas=betas,
            root_orient=root_orient[fId : fId + 1],
        )
        joints[count] = c2c(body.Jtr[0]) + trans[count]
        verts[count] = c2c(body.v[0]) + trans[count]
        count += 1
    return joints, verts
def render_smpl_params(bm, pose_body, pose_hand = None, trans=None, betas=None, root_orient=None):
    '''
    :param bm: pytorch body model with batch_size 1
    :param pose_body: Nx21x3
    :param trans: Nx3
    :param betas: Nxnum_betas
    :return: N x 400 x 400 x 3
    '''

    from human_body_prior.tools.omni_tools import copy2cpu as c2c
    from human_body_prior.tools.omni_tools import colors
    from human_body_prior.mesh.mesh_viewer import MeshViewer
    faces = c2c(bm.f)

    imw, imh = 400, 400

    mv = MeshViewer(width=imw, height=imh, use_offscreen=True)

    images = []
    for fIdx in range(0, len(pose_body)):

        bm.pose_body.data[0,:] = bm.pose_body.new(pose_body[fIdx].reshape(1,-1))
        if pose_hand is not None: bm.pose_hand.data[0,:] = bm.pose_hand.new(pose_hand[fIdx])
        if trans is not None: bm.trans.data[0,:] = bm.trans.new(trans[fIdx])
        if betas is not None: bm.betas.data[0,:len(betas[fIdx])] = bm.betas.new(betas[fIdx])
        if root_orient is not None: bm.root_orient.data[0,:] = bm.root_orient.new(root_orient[fIdx])

        v = c2c(bm.forward().v)[0]

        mesh = trimesh.base.Trimesh(v, faces, vertex_colors=np.ones_like(v)*colors['grey'])
        mv.set_meshes([mesh], 'static')

        images.append(mv.render())

    return np.array(images).reshape(len(pose_body), imw, imh, 3)
    def test_matrot2aa(self):
        np.random.seed(100)
        aa = np.random.randn(10, 3)
        matrot = c2c(VPoser.aa2matrot(torch.tensor(aa))).reshape(-1, 9)

        cv2_aa = []
        for id in range(matrot.shape[0]):
            cv2_aa.append(cv2.Rodrigues(matrot[id].reshape(3, 3))[0])
        cv2_aa = np.array(cv2_aa).reshape(-1, 3)

        vposer_aa = c2c(VPoser.matrot2aa(torch.tensor(matrot))).reshape(-1, 3)
        self.assertAlmostEqual(np.square((vposer_aa - cv2_aa)).sum(), 0.0)
Beispiel #9
0
def dump_vposer_samples(bm, pose_body, out_imgpath=False, save_ply=False):
    '''
    
    :param bm: the BodyModel instance
    :param pose_body: Nx63 will pose the body
    :param out_imgpath: the final png path
    :param save_ply: if True will dump as ply files
    :return: 
    '''

    view_angles = [0, 90, -90]
    imw, imh = 400, 400
    mv = MeshViewer(width=imw, height=imh, use_offscreen=True)

    images = np.zeros([len(view_angles), len(pose_body), 1, imw, imh, 3])
    for cId in range(0, len(pose_body)):

        bm.pose_body.data[:] = bm.pose_body.new(pose_body[cId].reshape(-1))

        body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]),
                                    faces=c2c(bm.f),
                                    vertex_colors=np.tile(
                                        colors['grey'], (6890, 1)))

        for rId, angle in enumerate(view_angles):
            apply_mesh_tranfsormations_(
                [body_mesh],
                trimesh.transformations.rotation_matrix(
                    np.radians(angle), (0, 1, 0)))
            mv.set_meshes([body_mesh], group_name='static')
            images[rId, cId, 0] = mv.render()
            apply_mesh_tranfsormations_(
                [body_mesh],
                trimesh.transformations.rotation_matrix(
                    np.radians(-angle), (0, 1, 0)))

    if out_imgpath:
        imagearray2file(images, out_imgpath)

        np.savez(out_imgpath.replace('.png', '.npz'), pose=pose_body)

        if save_ply:
            im_id = os.path.basename(out_imgpath).split('.')[0]
            out_dir = makepath(
                os.path.join(os.path.dirname(out_imgpath), '%s_ply' % im_id))
            smpl_params2ply(bm, out_dir=out_dir, pose_body=pose_body)

        print('Saved image: %s' % out_imgpath)

    return images
    def validation_step(self, batch, batch_idx):

        drec = self(batch['pose_body'].view(-1, 63))

        loss = self._compute_loss(batch, drec)
        val_loss = loss['unweighted_loss']['loss_total']

        if self.renderer is not None and self.global_rank == 0 and batch_idx % 500 == 0 and np.random.rand(
        ) > 0.5:
            out_fname = makepath(
                self.work_dir,
                'renders/vald_rec_E{:03d}_It{:04d}_val_loss_{:.2f}.png'.format(
                    self.current_epoch, batch_idx, val_loss.item()),
                isfile=True)
            self.renderer([batch, drec], out_fname=out_fname)
            dgen = self.vp_model.sample_poses(
                self.vp_ps.logging.num_bodies_to_display)
            out_fname = makepath(self.work_dir,
                                 'renders/vald_gen_E{:03d}_I{:04d}.png'.format(
                                     self.current_epoch, batch_idx),
                                 isfile=True)
            self.renderer([dgen], out_fname=out_fname)

        progress_bar = {'v2v': val_loss}
        return {
            'val_loss': c2c(val_loss),
            'progress_bar': progress_bar,
            'log': progress_bar
        }
def registration2markers(registration_dir, out_marker_dir):
    np.random.seed(100)
    m2b_distance = 0.0095

    genders = {
        '50002': 'male',
        '50004': 'female',
        '50007': 'male',
        '50009': 'male',
        '50020': 'female',
        '50021': 'female',
        '50022': 'female',
        '50025': 'female',
        '50026': 'male',
        '50027': 'male'
    }

    with open('./ssm_all_marker_placements.json') as f:
        all_marker_placements = json.load(f)
    all_mrks_keys = list(all_marker_placements.keys())

    for dfaust_subject in genders.keys():
        subject_reg_pkls = glob.glob(
            os.path.join(registration_dir, dfaust_subject, '*.pkl'))

        chosen_k = all_mrks_keys[np.random.choice(len(all_marker_placements))]
        chosen_marker_set = all_marker_placements[chosen_k]
        print('chose %s markerset for dfaust subject %s' %
              (chosen_k, dfaust_subject))
        for reg_pkl in subject_reg_pkls:
            with open(reg_pkl, 'rb') as f:
                data = pickle.load(f, encoding='latin-1')

            marker_data = np.zeros([len(data['v']), len(chosen_marker_set), 3])

            cur_m2b_distance = m2b_distance + abs(
                np.random.normal(0, m2b_distance / 3., size=[3
                                                             ]))  # Noise in 3D

            for fIdx in range(0, len(data['v'])):
                vertices = rotate_mesh(data['v'][fIdx].copy(), 90)
                vn = c2c(
                    compute_vertex_normal(torch.Tensor(vertices),
                                          torch.Tensor(data['f'])))

                for mrk_id, vid in enumerate(chosen_marker_set.values()):
                    marker_data[
                        fIdx,
                        mrk_id] = vertices[vid] + cur_m2b_distance * vn[vid]

            outpath = makepath(os.path.join(out_marker_dir, dfaust_subject,
                                            os.path.basename(reg_pkl)),
                               isfile=True)
            np.savez(
                outpath, **{
                    'markers': marker_data,
                    'labels': list(chosen_marker_set.keys()),
                    'frame_rate': 60,
                    'gender': genders[dfaust_subject]
                })
Beispiel #12
0
    def train(self):
        self.vposer_model.train()
        save_every_it = len(self.ds_train) / self.ps.log_every_epoch
        train_loss_dict = {}
        for it, dorig in enumerate(self.ds_train):
            dorig = {k: dorig[k].to(self.comp_device) for k in dorig.keys()}

            self.optimizer.zero_grad()
            drec = self.vposer_model(dorig['pose_aa'], output_type='aa')
            loss_total, cur_loss_dict = self.compute_loss(dorig, drec)
            loss_total.backward()
            self.optimizer.step()

            train_loss_dict = {k: train_loss_dict.get(k, 0.0) + v.item() for k, v in cur_loss_dict.items()}
            if it % (save_every_it + 1) == 0:
                cur_train_loss_dict = {k: v / (it + 1) for k, v in train_loss_dict.items()}
                train_msg = VPoserTrainer.creat_loss_message(cur_train_loss_dict, expr_code=self.ps.expr_code,
                                                             epoch_num=self.epochs_completed, it=it,
                                                             try_num=self.try_num, mode='train')

                self.logger(train_msg)
                self.swriter.add_histogram('q_z_sample', c2c(drec['mean']), it)

        train_loss_dict = {k: v / len(self.ds_train) for k, v in train_loss_dict.items()}
        return train_loss_dict
def get_default_pose(v_template, betas, shapedirs, J_regressor):
    from smplx.lbs import vertices2joints, blend_shapes
    v_shaped = v_template + blend_shapes(betas, shapedirs)

    # Get the joints
    # NxJx3 array
    J = vertices2joints(J_regressor, v_shaped)
    return c2c(J[0])
Beispiel #14
0
    def test_samples(self):
        ''' given the same network weights, the random pose generator must produce the same pose for a seed'''
        ps = Configer(default_ps_fname='../human_body_prior/train/vposer_smpl_defaults.ini')
        vposer = VPoser(num_neurons=ps.num_neurons, latentD=ps.latentD, data_shape = ps.data_shape)
        body_pose_rnd = vposer.sample_poses(num_poses=1, seed=100)

        body_pose_gt = np.load('samples/body_pose_rnd.npz')['data']
        self.assertAlmostEqual(np.square((c2c(body_pose_rnd) - body_pose_gt)).sum(), 0.0)
    def test_aa2matrot(self):
        aa = np.random.randn(10, 3)
        cv2_matrot = []
        for id in range(aa.shape[0]):
            cv2_matrot.append(cv2.Rodrigues(aa[id:id + 1])[0])
        cv2_matrot = np.array(cv2_matrot).reshape(-1, 9)

        vposer_matrot = c2c(VPoser.aa2matrot(torch.tensor(aa))).reshape(-1, 9)
        self.assertAlmostEqual(
            np.square((vposer_matrot - cv2_matrot)).sum(), 0.0)
Beispiel #16
0
def extract_weights_asnumpy(exp_id, vp_model=False):
    from human_body_prior.tools.omni_tools import makepath
    from human_body_prior.tools.omni_tools import copy2cpu as c2c

    vposer_pt, vposer_ps = load_vposer(exp_id, vp_model=vp_model)

    save_wt_dir = makepath(os.path.join(vposer_ps.work_dir, 'weights_npy'))

    weights = {}
    for var_name, var in vposer_pt.named_parameters():
        weights[var_name] = c2c(var)
    np.savez(os.path.join(save_wt_dir, 'vposerWeights.npz'), **weights)

    print(('Dumped weights as numpy arrays to %s' % save_wt_dir))
    return vposer_ps, weights
    def training_step(self, batch, batch_idx, optimizer_idx=None):

        drec = self(batch['pose_body'].view(-1, 63))

        loss = self._compute_loss(batch, drec)

        train_loss = loss['weighted_loss']['loss_total']

        tensorboard_logs = {'train_loss': train_loss}
        progress_bar = {k: c2c(v) for k, v in loss['weighted_loss'].items()}
        return {
            'loss': train_loss,
            'progress_bar': progress_bar,
            'log': tensorboard_logs
        }
Beispiel #18
0
    def vis_results(dorig, vposer_model, bm, imgpath):
        from human_body_prior.mesh import MeshViewer
        from human_body_prior.tools.omni_tools import copy2cpu as c2c
        import trimesh
        from human_body_prior.tools.omni_tools import colors
        from human_body_prior.tools.omni_tools import apply_mesh_tranfsormations_

        from human_body_prior.tools.visualization_tools import imagearray2file
        from human_body_prior.train.vposer_smpl import VPoser

        view_angles = [0, 180, 90, -90]
        imw, imh = 800, 800
        batch_size = len(dorig['pose_aa'])

        mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
        mv.render_wireframe = True

        dorig_aa = dorig['pose_aa']

        prec_aa = vposer_model(dorig_aa, output_type='aa')['pose_aa'].view(batch_size,-1)
        if hasattr(vposer_model, 'module'):
            pgen_aa = vposer_model.module.sample_poses(num_poses=batch_size, output_type='aa')
        else:
            pgen_aa = vposer_model.sample_poses(num_poses=batch_size, output_type='aa')

        pgen_aa = pgen_aa.view(batch_size,-1)
        dorig_aa = dorig_aa.view(batch_size, -1)

        images = np.zeros([len(view_angles), batch_size, 1, imw, imh, 3])
        images_gen = np.zeros([len(view_angles), batch_size, 1, imw, imh, 3])
        for cId in range(0, batch_size):

            bm.pose_body.data[:] = bm.pose_body.new(dorig_aa[cId])
            orig_body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]), faces=c2c(bm.f), vertex_colors=np.tile(colors['grey'], (6890, 1)))

            bm.pose_body.data[:] = bm.pose_body.new(prec_aa[cId])
            rec_body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]), faces=c2c(bm.f), vertex_colors=np.tile(colors['blue'], (6890, 1)))

            bm.pose_body.data[:] = bm.pose_body.new(pgen_aa[cId])
            gen_body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]), faces=c2c(bm.f), vertex_colors=np.tile(colors['blue'], (6890, 1)))

            all_meshes = [orig_body_mesh, rec_body_mesh, gen_body_mesh]

            for rId, angle in enumerate(view_angles):
                if angle != 0: apply_mesh_tranfsormations_(all_meshes, trimesh.transformations.rotation_matrix(np.radians(angle), (0, 1, 0)))
                mv.set_meshes([orig_body_mesh, rec_body_mesh], group_name='static')
                images[rId, cId, 0] = mv.render()
                mv.set_meshes([gen_body_mesh], group_name='static')
                images_gen[rId, cId, 0] = mv.render()

                if angle != 0: apply_mesh_tranfsormations_(all_meshes, trimesh.transformations.rotation_matrix(np.radians(-angle), (0, 1, 0)))

        imagearray2file(images, imgpath)
        imagearray2file(images_gen, imgpath.replace('.png','_gen.png'))
    def render_once(body_parms,
                    body_colors=[colors['grey'], colors['brown-light']],
                    out_fname=None):
        '''

        :param body_parms: list of dictionaries of body parameters.
        :param body_colors: list of np arrays of color rgb values
        :param movie_outpath: a mp4 path
        :return:
        '''

        if out_fname is not None: makepath(out_fname, isfile=True)
        assert len(body_parms) <= len(body_colors), ValueError(
            'Not enough colors provided for #{} body_parms'.format(
                len(body_parms)))

        bs = body_parms[0]['pose_body'].shape[0]

        body_ids = np.random.choice(bs, num_bodies_to_display)

        body_evals = [
            c2c(
                bm(root_orient=v['root_orient'].view(bs, -1) if 'root_orient'
                   in v else torch.zeros(bs, 3).type_as(v['pose_body']),
                   pose_body=v['pose_body'].contiguous().view(bs, -1)).v)
            for v in body_parms
        ]
        num_verts = body_evals[0].shape[1]

        render_meshes = []
        for bId in body_ids:
            concat_cur_meshes = None
            for body, body_color in zip(body_evals, body_colors):
                cur_body_mesh = Mesh(body[bId],
                                     faces,
                                     vertex_colors=np.ones([num_verts, 3]) *
                                     body_color)
                concat_cur_meshes = cur_body_mesh if concat_cur_meshes is None else mesh_cat(
                    concat_cur_meshes, cur_body_mesh)
            render_meshes.append(concat_cur_meshes)

        img = renderer(render_meshes)

        if out_fname is not None: imagearray2file(img, out_fname, fps=10)

        return
Beispiel #20
0
def process(npz_bdata_path: Path):
    global cnt

    print(cnt, npz_bdata_path)
    bdata = np.load(str(npz_bdata_path))
    # print('Data keys available:%s' % list(bdata.keys()))
    # beta means shape
    # num of elements: 156 (pose), 8 (dmpl), 16 (beta)
    # print('Vector poses has %d elements for each of %d frames.' %
    #       (bdata['poses'].shape[1], bdata['poses'].shape[0]))
    # print('Vector dmpls has %d elements for each of %d frames.' %
    #       (bdata['dmpls'].shape[1], bdata['dmpls'].shape[0]))
    # print('Vector trams has %d elements for each of %d frames.' %
    #       (bdata['trans'].shape[1], bdata['trans'].shape[0]))
    # print('Vector betas has %d elements constant for the whole sequence.' %
    #       bdata['betas'].shape[0])
    # print('The subject of the mocap sequence is %s.' % bdata['gender'])

    try:
        # frame id of the mocap sequence
        fId = np.random.randint(len(bdata['poses']))
        # gender = str(bdata['gender'])

        # Don't know why, but there exists key like b'female'..
        if "female" in str(bdata['gender']):
            gender = "female"
        elif "male" in str(bdata['gender']):
            gender = "male"
        elif "neutral" in str(bdata['gender']):
            gender = "neutral"
        else:
            print(cnt, npz_bdata_path, str(bdata['gender']))
            raise NotImplementedError
        # controls the global root orientation
        # root_orient = torch.Tensor(
        # bdata['poses'][fId:fId+1, :3]).to(comp_device)
        # controls the body
        pose_body = torch.Tensor(bdata['poses'][fId:fId + 1,
                                                3:66]).to(comp_device)
        # controls the finger articulation
        pose_hand = torch.Tensor(bdata['poses'][fId:fId + 1,
                                                66:]).to(comp_device)
        # controls the body shape
        betas = torch.Tensor(bdata['betas'][:10][np.newaxis]).to(comp_device)
        # controls soft tissue dynamics
        dmpls = torch.Tensor(bdata['dmpls'][fId:fId + 1]).to(comp_device)
        # trans = torch.Tensor(bdata['trans'][fId:fId+1]).to(comp_device)
    except KeyError:
        return

    output_stl_name = output_root / f'{cnt:06}.stl'
    if not output_stl_name.exists():
        # ignore only rotation/translation because it is easy to augment
        body = bm_dict[gender](pose_body=pose_body,
                               betas=betas,
                               pose_hand=pose_hand,
                               dmpls=dmpls)
        body_mesh = trimesh.Trimesh(vertices=c2c(body.v[0]),
                                    faces=faces_dict[gender],
                                    vertex_colors=np.tile(
                                        colors['grey'], (6890, 1)))
        body_mesh.export(output_stl_name)

    # record and increment
    id_path_list.append((f'{cnt:06}', f))
    cnt += 1
def prepare_vposer_datasets(amass_splits,
                            amass_dir,
                            vposer_datadir,
                            logger=None):

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(os.path.join(vposer_datadir, '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % vposer_datadir)

    stageI_outdir = os.path.join(vposer_datadir, 'stage_I')

    shutil.copy2(sys.argv[0],
                 os.path.join(vposer_datadir, os.path.basename(sys.argv[0])))

    logger('Stage I: Fetch data from AMASS npz files')

    for split_name, datasets in amass_splits.items():
        if os.path.exists(os.path.join(stageI_outdir, split_name, 'pose.pt')):
            continue
        dump_amass2pytroch(datasets,
                           amass_dir,
                           stageI_outdir,
                           split_name=split_name,
                           logger=logger)

    logger(
        'Stage II: augment data by noise and save into h5 files to be used in a cross framework scenario.'
    )
    ## Writing to h5 files is also convinient since appending to files is possible
    from torch.utils.data import DataLoader
    import tables as pytables
    from tqdm import tqdm

    class AMASS_ROW(pytables.IsDescription):

        gender = pytables.Int16Col(1)  # 1-character String
        pose = pytables.Float32Col(52 * 3)  # float  (single-precision)
        pose_matrot = pytables.Float32Col(52 * 9)  # float  (single-precision)
        betas = pytables.Float32Col(16)  # float  (single-precision)
        trans = pytables.Float32Col(3)  # float  (single-precision)

    stageII_outdir = makepath(os.path.join(vposer_datadir, 'stage_II'))

    batch_size = 256
    max_num_epochs = 1  # how much augmentation we would get

    for split_name in amass_splits.keys():
        h5_outpath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if os.path.exists(h5_outpath): continue

        ds = AMASS_Augment(dataset_dir=os.path.join(stageI_outdir, split_name))
        logger('%s has %d data points!' % (split_name, len(ds)))
        dataloader = DataLoader(ds,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=32,
                                drop_last=False)
        with pytables.open_file(h5_outpath, mode="w") as h5file:
            table = h5file.create_table('/', 'data', AMASS_ROW)

            for epoch_num in range(max_num_epochs):
                for bId, bData in tqdm(enumerate(dataloader)):
                    for i in range(len(bData['trans'])):
                        for k in bData.keys():
                            table.row[k] = c2c(bData[k][i])
                        table.row.append()
                    table.flush()

    logger('Stage III: dump every thing as a final thing to pt files')
    # we would like to use pt files because their interface could run in multiple threads
    stageIII_outdir = makepath(os.path.join(vposer_datadir, 'stage_III'))

    for split_name in amass_splits.keys():
        h5_filepath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if not os.path.exists(h5_filepath): continue

        with pytables.open_file(h5_filepath, mode="r") as h5file:
            data = h5file.get_node('/data')
            data_dict = {k: [] for k in data.colnames}
            for id in range(len(data)):
                cdata = data[id]
                for k in data_dict.keys():
                    data_dict[k].append(cdata[k])

        for k, v in data_dict.items():
            outfname = makepath(os.path.join(stageIII_outdir, split_name,
                                             '%s.pt' % k),
                                isfile=True)
            if os.path.exists(outfname): continue
            torch.save(torch.from_numpy(np.asarray(v)), outfname)

    logger('Dumped final pytorch dataset at %s' % stageIII_outdir)
Beispiel #22
0
                        pose_handL = self.poser_handL_pt.decode(
                            self.poZ_handL,
                            output_type='aa').view(self.batch_size, -1)
                        pose_handR = self.poser_handR_pt.decode(
                            self.poZ_handR,
                            output_type='aa').view(self.batch_size, -1)
                        self.pose_hand.data[:] = torch.cat(
                            [pose_handL, pose_handR], dim=1)


if __name__ == '__main__':
    import trimesh
    from human_body_prior.tools.omni_tools import copy2cpu as c2c

    bm_path = '/ps/project/common/moshpp/smpl/locked_head/female/model.npz'

    smpl_exp_dir = '/ps/project/common/vposer/smpl/004_00_WO_accad'

    bm = BodyModelWithPoser(bm_path=bm_path,
                            batch_size=1,
                            model_type='smpl',
                            poser_type='vposer',
                            smpl_exp_dir=smpl_exp_dir).to('cuda')
    bm.randomize_pose()

    vertices = c2c(bm.forward().v)[0]
    faces = c2c(bm.f)

    mesh = trimesh.base.Trimesh(vertices, faces).show()
Beispiel #23
0
def prepare_amass(amass_splits,
                  amass_dir,
                  work_dir,
                  logger=None,
                  betas_range=None,
                  betas_limit=None,
                  frame_len=None,
                  max_len=None,
                  downsample_rate=None):

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(os.path.join(work_dir, '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % work_dir)

    stageI_outdir = os.path.join(work_dir, 'stage_I')

    shutil.copy2(sys.argv[0],
                 os.path.join(work_dir, os.path.basename(sys.argv[0])))

    logger('Stage I: Fetch data from AMASS npz files')

    # split mode - split a single dataset into train/vald/test with specified proportions
    # e.g.
    # amass_splits = {
    #       'dataset': 'HumanEva',
    #       'splits': (.85, .05, .1)  # train, vald, test
    # }
    if 'splits' in amass_splits.keys():
        import numbers
        from functools import reduce
        splits = amass_splits['splits']
        _amass_splits = {}
        assert [isinstance(s, numbers.Number) for s in splits] == [
            True, True, True
        ], "amass_splits['splits'] must be (number, number, number)"
        assert reduce(
            lambda x, y: x + y, splits
        ) <= 1., "sum of amass_splits['splits'] must equal or less than 1.0"

        for split_idx, split_name in enumerate(('train', 'vald', 'test')):
            # if there is a zero-split, skip through the dataset creation
            if split_idx > 0 and splits[split_idx] == 0: continue

            final_splits = (0., 1.)
            outpath = makepath(os.path.join(stageI_outdir, split_name,
                                            'pose.pt'),
                               isfile=True)
            # reconstruct amass_splits as normal mode for stage II and III
            _amass_splits[split_name] = amass_splits['dataset']
            if os.path.exists(outpath): continue
            if split_name is 'train': final_splits = (0., splits[0])
            elif split_name is 'vald':
                final_splits = (splits[0], splits[0] + splits[1])
            else:
                final_splits = (splits[0] + splits[1],
                                splits[0] + splits[1] + splits[2])

            if frame_len:
                downsample_amass2pytroch(amass_splits['dataset'],
                                         amass_dir,
                                         outpath,
                                         logger=logger,
                                         betas_range=betas_range,
                                         betas_limit=betas_limit,
                                         splits=final_splits,
                                         frame_len=frame_len,
                                         max_len=max_len,
                                         downsample_rate=downsample_rate)
            else:
                dump_amass2pytroch(amass_splits['dataset'],
                                   amass_dir,
                                   outpath,
                                   logger=logger,
                                   betas_range=betas_range,
                                   betas_limit=betas_limit,
                                   splits=final_splits,
                                   max_len=max_len)

        # assigin the reconstructed amass_splits back after stage I compeletion
        amass_splits = _amass_splits

    # normal mode - using different datasets as train/vald/test
    # e.g.
    # amass_splits = {
    #       'vald': ['HumanEva'],
    #       'test': ['SSM_synced'],
    #       'train': ['CMU']
    # }
    else:
        for split_name, datasets in amass_splits.items():
            outpath = makepath(os.path.join(stageI_outdir, split_name,
                                            'pose.pt'),
                               isfile=True)
            if os.path.exists(outpath): continue
            if frame_len:
                downsample_amass2pytroch(datasets,
                                         amass_dir,
                                         outpath,
                                         logger=logger,
                                         betas_range=betas_range,
                                         betas_limit=betas_limit,
                                         frame_len=frame_len,
                                         max_len=max_len,
                                         downsample_rate=downsample_rate)
            else:
                dump_amass2pytroch(datasets,
                                   amass_dir,
                                   outpath,
                                   logger=logger,
                                   betas_range=betas_range,
                                   betas_limit=betas_limit,
                                   max_len=max_len)

    logger(
        'Stage II: augment the data and save into h5 files to be used in a cross framework scenario.'
    )

    class AMASS_ROW(pytables.IsDescription):
        fid = pytables.Int16Col(1)  # 1-character String
        fname = pytables.Int32Col(1)  # 1-character String
        gender = pytables.Int16Col(1)  # 1-character String
        pose = pytables.Float32Col(52 * 3)  # float  (single-precision)
        dmpl = pytables.Float32Col(8)  # float  (single-precision)
        pose_matrot = pytables.Float32Col(52 * 9)  # float  (single-precision)
        betas = pytables.Float32Col(16)  # float  (single-precision)
        trans = pytables.Float32Col(3)  # float  (single-precision)

    stageII_outdir = makepath(os.path.join(work_dir, 'stage_II'))

    batch_size = 256
    max_num_epochs = 1  # how much augmentation we would get

    for split_name in amass_splits.keys():
        h5_outpath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if os.path.exists(h5_outpath): continue

        ds = AMASS_Augment(dataset_dir=os.path.join(stageI_outdir, split_name))
        logger('%s has %d data points!' % (split_name, len(ds)))
        dataloader = DataLoader(ds,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=32,
                                drop_last=False)
        with pytables.open_file(h5_outpath, mode="w") as h5file:
            table = h5file.create_table('/', 'data', AMASS_ROW)

            for epoch_num in range(max_num_epochs):
                for bId, bData in tqdm(enumerate(dataloader)):
                    for i in range(len(bData['trans'])):
                        for k in bData.keys():
                            table.row[k] = c2c(bData[k][i])
                        table.row.append()
                    table.flush()

    logger(
        '\nStage III: dump every data field for all the splits as final pytorch pt files'
    )
    # we would like to use pt files because their interface could run in multiple threads
    stageIII_outdir = makepath(os.path.join(work_dir, 'stage_III'))

    for split_name in amass_splits.keys():
        h5_filepath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if not os.path.exists(h5_filepath): continue

        with pytables.open_file(h5_filepath, mode="r") as h5file:
            data = h5file.get_node('/data')
            data_dict = {k: [] for k in data.colnames}
            for id in range(len(data)):
                cdata = data[id]
                for k in data_dict.keys():
                    data_dict[k].append(cdata[k])

        for k, v in data_dict.items():
            outfname = makepath(os.path.join(stageIII_outdir, split_name,
                                             '%s.pt' % k),
                                isfile=True)
            if os.path.exists(outfname): continue
            torch.save(torch.from_numpy(np.asarray(v)), outfname)

    logger('Dumped final pytorch dataset at %s' % stageIII_outdir)
Beispiel #24
0
bm_path = '../body_models/smplh/male/model.npz' # obtain from http://mano.is.tue.mpg.de/downloads

comp_device = torch.device('cuda')
bm = BodyModel(bm_path=bm_path, batch_size=1, num_betas=10).to(comp_device)

npz_data_path = '../github_data/amass_sample.npz'
bdata = np.load(npz_data_path)
print(list(bdata.keys()))

root_orient = torch.Tensor(bdata['poses'][:, :3]).to(comp_device)
pose_body = torch.Tensor(bdata['poses'][:, 3:66]).to(comp_device)
pose_hand = torch.Tensor(bdata['poses'][:, 66:]).to(comp_device)
betas = torch.Tensor(bdata['betas'][:10][np.newaxis]).to(comp_device)

faces = c2c(bm.f)

from human_body_prior.mesh import MeshViewer
from human_body_prior.mesh.sphere import points_to_spheres
import trimesh
from human_body_prior.tools.omni_tools import colors
from human_body_prior.tools.visualization_tools import imagearray2file
from human_body_prior.tools.omni_tools import apply_mesh_tranfsormations_
from tqdm import tqdm

imw, imh=1600, 1800
step = 10
T = bdata['poses'].shape[0]//step

mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
images = np.zeros([2, 3, T, imh, imw, 3], dtype=np.float32)
Beispiel #25
0
    print("load pretrain parameters from %s" % trained_model_fname)

    vposer_pt.eval()

    return vposer_pt


def extract_weights_asnumpy(exp_id, vp_model=False):
    from human_body_prior.tools.omni_tools import makepath
    from human_body_prior.tools.omni_tools import copy2cpu as c2c

    vposer_pt, vposer_ps = load_vposer(exp_id, vp_model=vp_model)

    save_wt_dir = makepath(os.path.join(vposer_ps.work_dir, 'weights_npy'))

    weights = {}
    for var_name, var in vposer_pt.named_parameters():
        weights[var_name] = c2c(var)
    np.savez(os.path.join(save_wt_dir, 'vposerWeights.npz'), **weights)

    print(('Dumped weights as numpy arrays to %s' % save_wt_dir))
    return vposer_ps, weights


if __name__ == '__main__':
    from human_body_prior.tools.omni_tools import copy2cpu as c2c
    expr_dir = '/ps/project/humanbodyprior/VPoser/smpl/pytorch/0020_06_amass'
    from human_body_prior.train.vposer_smpl import VPoser
    vposer_pt, ps = load_vposer(expr_dir, vp_model='snapshot')
    pose = c2c(vposer_pt.sample_poses(10))
    print(pose.shape)
def load_file(file_name):
    npz_bdata_path = file_name
    bdata = np.load(npz_bdata_path)

    gender = bdata['gender']

    bm_path = MODELS_FOLDER + "/" + 'male' + '/model.npz'

    num_betas = 10  # number of body parameters
    model_type = 'smplh'

    FRAMES = bdata['poses'].shape[0]
    MAX_BATCH_SIZE = 1000
    batch_size = min(MAX_BATCH_SIZE, FRAMES)
    last_batch_size = FRAMES % MAX_BATCH_SIZE if FRAMES > MAX_BATCH_SIZE else FRAMES
    bm = BodyModel(bm_path=bm_path,
                   num_betas=num_betas,
                   model_type=model_type,
                   batch_size=batch_size).to(COMP_DEVICE)
    bm2 = BodyModel(bm_path=bm_path,
                    num_betas=num_betas,
                    model_type=model_type,
                    batch_size=last_batch_size).to(COMP_DEVICE)
    faces = c2c(bm.f)

    # print('Data keys available:%s'%list(bdata.keys()))
    # print('Vector poses has %d elements for each of %d frames.' % (bdata['poses'].shape[1], bdata['poses'].shape[0]))
    # print('Vector dmpls has %d elements for each of %d frames.' % (bdata['dmpls'].shape[1], bdata['dmpls'].shape[0]))
    # print('Vector trams has %d elements for each of %d frames.' % (bdata['trans'].shape[1], bdata['trans'].shape[0]))
    # print('Vector betas has %d elements constant for the whole sequence.'%bdata['betas'].shape[0])
    # print('The subject of the mocap sequence is %s.'%bdata['gender'])

    FRAME_TIME = 1.0 / bdata['mocap_framerate'].item()

    joint_names = get_joint_names()[:JOINT_COUNT]
    dependencies = get_dependencies()

    root_orient_allframes = torch.Tensor(bdata['poses'][:, :3]).to(
        COMP_DEVICE)  # controls the global root orientation
    pose_body_allframes = torch.Tensor(bdata['poses'][:, 3:66]).to(
        COMP_DEVICE)  # controls the body
    betas = torch.Tensor(bdata['betas'][:10][np.newaxis]).to(COMP_DEVICE)
    trans_allframes = bdata['trans']

    zipped_global_positions = np.empty((FRAMES, JOINT_COUNT, 3))

    frame_range = FRAMES // batch_size + 1
    if FRAMES == batch_size:
        frame_range = 1
    for fId in range(frame_range):

        startIdx = fId * batch_size
        if startIdx == FRAMES:
            continue
        endIdx = min(FRAMES, (fId + 1) * (batch_size))
        root_orient = root_orient_allframes[startIdx:endIdx]
        pose_body = pose_body_allframes[startIdx:endIdx]

        if endIdx - startIdx == batch_size:
            body = bm(root_orient=root_orient,
                      pose_body=pose_body,
                      betas=betas)
        else:
            body = bm2(root_orient=root_orient,
                       pose_body=pose_body,
                       betas=betas)
        joints = (c2c(body.Jtr))[:, :JOINT_COUNT, :]

        zipped_global_positions[
            startIdx:endIdx, :, :] = joints + trans_allframes[startIdx:endIdx,
                                                              None, :]
        # if (fId / FRAMES * 100) % 10 == 0:
        #     print("processing smplh file.. " + str(fId / FRAMES * 100) + "%")

    zipped_global_positions = np.array(zipped_global_positions)[:, :,
                                                                [0, 2, 1]]
    # dependencies_ = c2c(bm.kintree_table[0])

    # rots = get_rot_matrices_from_rodrigues(np.reshape(bdata['poses'], (-1, 3)))
    # rots = np.reshape(rots, (-1, dependencies_.shape[0], 3, 3))
    # default_pose = get_default_pose(bm.v_template, betas, bm.shapedirs, bm.J_regressor)
    return FRAMES, FRAME_TIME, joint_names, dependencies, zipped_global_positions  #, dependencies_, rots, default_pose, trans_allframes
num_dmpls = 8  # numner of dmpls paramters
batch_size = 1
# path to the body models
# can be downloaded at http://mano.is.tue.mpg.de/
bm_path = os.path.join(os.getcwd(), expr_code, "body_models", "smplh", "male",
                       "model.npz")
# can be downloaded at http://smpl.is.tue.mpg.de/downloads
dmpl_path = os.path.join(os.getcwd(), expr_code, "body_models", "dmpls",
                         "male", "model.npz")

bm = BodyModel(bm_path=bm_path,
               num_betas=num_betas,
               path_dmpl=dmpl_path,
               num_dmpls=num_dmpls,
               batch_size=batch_size)  # .to(comp_device)
faces = c2c(bm.f)

# Choose the device to run the body model on.
#comp_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

for fold in ["train", "test", "vald"]:

    path_split_dump = os.path.join("amass_dump", fold)
    if not os.path.isdir(path_split_dump):
        os.mkdir(path_split_dump)

    path_split_dump_images = os.path.join("amass_dump", fold + "_images")
    if not os.path.isdir(path_split_dump_images):
        os.mkdir(path_split_dump_images)

    split_dir = os.path.join(work_dir, 'stage_III', fold)
def get_rot_matrices_from_rodrigues(rots):
    from smplx.lbs import batch_rodrigues
    return c2c(batch_rodrigues(rot_vecs=torch.Tensor(rots).to(COMP_DEVICE)))
 def r(self):
     from human_body_prior.tools.omni_tools import copy2cpu as c2c
     return c2c(self.forward().v)
Beispiel #30
0
assert input_root.exists()

id_path_list = []
cnt = 0

model_types = ['male', 'female', 'neutral']

bm_dict, faces_dict = {}, {}
for m in model_types:
    bm_path = str(input_root / 'MANO/smplh' / m / 'model.npz')
    dmpl_path = str(input_root / 'DMPL' / m / 'model.npz')
    bm_dict[m] = BodyModel(bm_path=bm_path,
                           num_betas=NUM_BETAS,
                           num_dmpls=NUM_DMPLS,
                           path_dmpl=dmpl_path).to(comp_device)
    faces_dict[m] = c2c(bm_dict[m].f)


def process(npz_bdata_path: Path):
    global cnt

    print(cnt, npz_bdata_path)
    bdata = np.load(str(npz_bdata_path))
    # print('Data keys available:%s' % list(bdata.keys()))
    # beta means shape
    # num of elements: 156 (pose), 8 (dmpl), 16 (beta)
    # print('Vector poses has %d elements for each of %d frames.' %
    #       (bdata['poses'].shape[1], bdata['poses'].shape[0]))
    # print('Vector dmpls has %d elements for each of %d frames.' %
    #       (bdata['dmpls'].shape[1], bdata['dmpls'].shape[0]))
    # print('Vector trams has %d elements for each of %d frames.' %