示例#1
0
def meshPlay(folder, every=100, wait=0.05):
    files = glob.glob(folder + '/*')
    files.sort()
    files = files[-1000:]
    view = MeshViewer()
    for i in range(0, len(files), every):
        mesh = Mesh(filename=files[i])
        view.dynamic_meshes = [mesh]
        time.sleep(wait)
示例#2
0
文件: fit_SMPLX.py 项目: MoyGcc/IPNet
def optimize_pose_shape(th_scan_meshes,
                        smpl,
                        iterations,
                        steps_per_iter,
                        th_pose_3d=None,
                        display=None):
    """
    Optimize SMPL.
    :param display: if not None, pass index of the scan in th_scan_meshes to visualize.
    """
    # Optimizer
    optimizer = torch.optim.Adam([smpl.trans, smpl.betas, smpl.pose],
                                 0.02,
                                 betas=(0.9, 0.999))

    # Get loss_weights
    weight_dict = get_loss_weights()

    # Display
    if display is not None:
        assert int(display) < len(th_scan_meshes)
        mv = MeshViewer()

    for it in range(iterations):
        loop = tqdm(range(steps_per_iter))
        loop.set_description('Optimizing SMPL')
        for i in loop:
            optimizer.zero_grad()
            # Get losses for a forward pass
            loss_dict = forward_step(th_scan_meshes, smpl, th_pose_3d)
            # Get total loss for backward pass
            tot_loss = backward_step(loss_dict, weight_dict, it)
            tot_loss.backward()
            optimizer.step()

            l_str = 'Iter: {}'.format(i)
            for k in loss_dict:
                l_str += ', {}: {:0.4f}'.format(
                    k, weight_dict[k](loss_dict[k], it).mean().item())
                loop.set_description(l_str)

            if display is not None:
                verts, _, _, _ = smpl()
                smpl_mesh = Mesh(v=verts[display].cpu().detach().numpy(),
                                 f=smpl.faces.cpu().numpy())
                scan_mesh = Mesh(
                    v=th_scan_meshes[display].vertices.cpu().detach().numpy(),
                    f=th_scan_meshes[display].faces.cpu().numpy(),
                    vc=np.array([0, 1, 0]))
                mv.set_static_meshes([scan_mesh, smpl_mesh])

    print('** Optimised smpl pose and shape **')
示例#3
0
def evaluate(coma, test_loader, dataset, template_mesh, device, visualize,
             output_dir):
    coma.eval()
    total_loss = 0

    for i, data in enumerate(test_loader):
        data = data.to(device)
        with torch.no_grad():
            out, mu, logvar = coma(data)
        loss = loss_function(out, data.y, mu, logvar)
        total_loss += data.num_graphs * loss.item()

        if visualize and i % 100 == 0:
            meshviewer = MeshViewer(shape=(1, 2))

            save_out = out.detach().cpu().numpy()
            save_out = save_out * dataset.std.numpy() + dataset.mean.numpy()
            expected_out = (data.y.detach().cpu().numpy()
                            ) * dataset.std.numpy() + dataset.mean.numpy()
            result_mesh = Mesh(v=save_out, f=template_mesh.f)
            expected_mesh = Mesh(v=expected_out, f=template_mesh.f)
            meshviewer[0][0].set_dynamic_meshes([result_mesh])
            meshviewer[0][1].set_dynamic_meshes([expected_mesh])
            meshviewer[0][0].save_snapshot(os.path.join(
                output_dir, 'file' + str(i) + '.png'),
                                           blocking=True)

            result_mesh.write_ply('{}/result_{}.ply'.format(output_dir, i))
            expected_mesh.write_ply('{}/expected_{}.ply'.format(output_dir, i))
            print('result mesh and expected mesh are saved as .ply')

    return total_loss / len(dataset)
示例#4
0
def render_video(mesh_dir, video_fn, overwrite=False):
    from psbody.mesh import Mesh, MeshViewer
    from os.path import join, exists, splitext
    from glob import glob
    import tempfile
    from subprocess import call
    from pickle import load
    import numpy as np
    from tqdm import tqdm

    if exists(video_fn):
        if overwrite:
            print("File {0} exists, removing it and remaking it".format(
                video_fn))
            call(['rm', '-rf', video_fn])
        else:
            print("File {0} exists, not re-rendering".format(video_fn))
            return

    files_seq = sorted(glob(join(mesh_dir, '*.obj')))

    if len(files_seq) == 0:
        print('No files to render in {}'.format(mesh_dir))
        return

    # Load the meshes
    print("Loading meshes from {}..".format(mesh_dir))
    meshes = []
    for fn in files_seq:
        meshes.append(Mesh(filename=fn))

    from shutil import rmtree
    from tempfile import mkdtemp
    tmp_folder = str(mkdtemp())
    if exists(tmp_folder):
        rmtree(tmp_folder)
    from os import mkdir
    mkdir(tmp_folder)

    mv = MeshViewer(window_width=1000, window_height=800)

    print('Rendering extracted meshes (tmp file, auto-removed later)..')
    for k, mesh in enumerate(tqdm(meshes)):
        mv.set_dynamic_meshes([mesh])
        mv.save_snapshot(join(tmp_folder, '{:0>6d}.png'.format(k)),
                         blocking=True)

    cmd = [
        'ffmpeg', '-i', '{0}/%06d.png'.format(tmp_folder), '-vcodec', 'h264',
        '-pix_fmt', 'yuv420p', '-r', '15', '-an', '-b:v', '5000k', video_fn
    ]
    call(cmd)
    rmtree(tmp_folder)
示例#5
0
def final_fit(
    opt,
    part_mesh,
    v,
    v_offset,
    dist_o,
    dist_i,
    smpl_h_ref,
    rn_m,
    debug_rn,
    dif_mask,
    v_ids_template,
    faces_template,
    v_ids_side,
    faces_side,
    max_y,
    proj_cam,
    ref_joint_list_coup,
):
    if opt.disp_mesh_side:
        mv = MeshViewer()
    else:
        mv = None
    if opt.disp_mesh_whl:
        mv2 = MeshViewer()
    else:
        mv2 = None

    import scipy.sparse as sp
    sparse_solver = lambda A, x: sp.linalg.cg(A, x, maxiter=500)[0]

    tgt_pose = get_pose_prior(init_pose_path=opt.init_pose_path,
                              gar_type=opt.gar_type)

    E = {
        'mask':
        gaussian_pyramid(rn_m * dist_o * opt.ref_wt_dist_o +
                         (1 - rn_m) * dist_i,
                         n_levels=4,
                         normalization='size') * opt.ref_wt_mask
    }

    x0 = [v_offset]
    if opt.ref_wt_coup:
        # x0 = [smpl_h_ref.trans, smpl_h_ref.betas, v_offset]
        E['coupling'] = (v + v_offset -
                         smpl_h_ref[v_ids_template]) * opt.ref_wt_coup

        if opt.ref_wt_shp:
            E['beta_prior'] = ch.linalg.norm(smpl_h_ref.betas) * opt.ref_wt_shp

        if opt.ref_wt_pose:

            E['pose'] = (smpl_h_ref.pose - tgt_pose) * opt.ref_wt_pose

        if ref_joint_list_coup != None:
            range_joint = []
            for item in ref_joint_list_coup:
                range_joint.append(3 * int(item))
                range_joint.append(3 * int(item) + 1)
                range_joint.append(3 * int(item) + 2)

            x0 = x0 + [smpl_h_ref.pose[range_joint]]

        if opt.ref_use_betas:
            x0 = x0 + [smpl_h_ref.betas]

    if opt.ref_wt_proj:
        error_bd = get_rings_error(proj_cam, max_y)
        E['proj_bd'] = error_bd * opt.ref_wt_proj

    if opt.ref_wt_bd:
        gar_rings = compute_boundaries(v + v_offset, faces_template)
        error = smooth_rings(gar_rings, v + v_offset)
        E['boundary'] = error * opt.ref_wt_bd

    if opt.ref_wt_lap:
        lap_op = np.asarray(laplacian(part_mesh).todense())
        lap_err = ch.dot(lap_op, v + v_offset)
        E['laplacian'] = lap_err * opt.ref_wt_lap

    ch.minimize(E,
                x0,
                method='dogleg',
                options={
                    'e_3': .000001,
                    'sparse_solver': sparse_solver
                },
                callback=get_callback_ref(rend=debug_rn,
                                          mask=dif_mask,
                                          vertices=v + v_offset,
                                          display=opt.display,
                                          v_ids_sides=v_ids_side,
                                          faces_template=faces_template,
                                          faces_side=faces_side,
                                          disp_mesh_side=opt.disp_mesh_side,
                                          disp_mesh_whl=opt.disp_mesh_whl,
                                          save_dir=opt.save_opt_images,
                                          mv=mv,
                                          mv2=mv2,
                                          show=opt.show))

    final_mask = rn_m.r

    mask = dif_mask.copy()
    mask[dif_mask == 0.5] = 1

    final_iou = compute_iou(mask, final_mask)

    return v + v_offset, final_iou
示例#6
0
def init_fit(opt, dist_o, dist_i, dif_mask, rn_m, smpl_h, v_ids_template,
             faces_template, debug_rn, v_ids_side, faces_side, joints_list):

    range_joint = []

    for item in joints_list:
        range_joint.append(3 * int(item))
        range_joint.append(3 * int(item) + 1)
        range_joint.append(3 * int(item) + 2)

    tgt_pose = get_pose_prior(init_pose_path=opt.init_pose_path,
                              gar_type=opt.gar_type)

    # ============================================
    #                 FIRST STAGE
    # ============================================
    from psbody.mesh import MeshViewer

    if opt.disp_mesh_side:
        mv = MeshViewer()
    else:
        mv = None
    if opt.disp_mesh_whl:
        mv2 = MeshViewer()
    else:
        mv2 = None

    if opt.init_first_stage == "Trans":
        x0 = [smpl_h.trans]

    elif opt.init_first_stage == 'Pose':
        x0 = [smpl_h.trans, smpl_h.pose[range_joint]]
        # x0 = [smpl_h.trans]

    elif opt.init_first_stage == 'Shape':
        x0 = [smpl_h.trans, smpl_h.betas]

    elif opt.init_first_stage == 'Both':
        x0 = [smpl_h.trans, smpl_h.betas, smpl_h.pose[range_joint]]

    E = {
        'mask':
        gaussian_pyramid(rn_m * dist_o * opt.init_fst_wt_dist_o +
                         (1 - rn_m) * dist_i,
                         n_levels=4,
                         normalization='size') * opt.init_fst_wt_mask
    }

    if opt.init_fst_wt_betas:
        E['beta_prior'] = ch.linalg.norm(smpl_h.betas) * opt.init_fst_wt_betas

    if opt.init_fst_wt_pose:
        E['pose'] = (smpl_h.pose - tgt_pose) * opt.init_fst_wt_pose

    ch.minimize(E,
                x0,
                method='dogleg',
                options={
                    'e_3': .0001,
                    'disp': True
                },
                callback=get_callback(rend=debug_rn,
                                      mask=dif_mask,
                                      smpl=smpl_h,
                                      v_ids_template=v_ids_template,
                                      v_ids_sides=v_ids_side,
                                      faces_template=faces_template,
                                      faces_side=faces_side,
                                      display=opt.display,
                                      disp_mesh_side=opt.disp_mesh_side,
                                      disp_mesh_whl=opt.disp_mesh_whl,
                                      mv=mv,
                                      mv2=mv2,
                                      save_dir=opt.save_opt_images,
                                      show=opt.show))

    # ===============================================
    #                 SECOND STAGE
    # ===============================================

    if opt.init_second_stage != "None":
        if opt.init_second_stage == 'Pose':
            x0 = [smpl_h.trans, smpl_h.pose[range_joint]]

        elif opt.init_second_stage == 'Shape':
            x0 = [smpl_h.trans, smpl_h.betas]

        elif opt.init_second_stage == 'Both':
            x0 = [smpl_h.trans, smpl_h.betas, smpl_h.pose[range_joint]]

        E = {
            'mask':
            gaussian_pyramid(rn_m * dist_o * opt.init_sec_wt_dist_o +
                             (1 - rn_m) * dist_i,
                             n_levels=4,
                             normalization='size') * opt.init_sec_wt_mask
        }

        if opt.init_sec_wt_betas:
            E['beta_prior'] = ch.linalg.norm(
                smpl_h.betas) * opt.init_sec_wt_betas

        if opt.init_sec_wt_pose:
            E['pose'] = (smpl_h.pose - tgt_pose) * opt.init_sec_wt_pose

        ch.minimize(E,
                    x0,
                    method='dogleg',
                    options={'e_3': .0001},
                    callback=get_callback(rend=debug_rn,
                                          mask=dif_mask,
                                          smpl=smpl_h,
                                          v_ids_template=v_ids_template,
                                          v_ids_sides=v_ids_side,
                                          faces_template=faces_template,
                                          faces_side=faces_side,
                                          display=opt.display,
                                          disp_mesh_side=opt.disp_mesh_side,
                                          disp_mesh_whl=opt.disp_mesh_whl,
                                          mv=mv,
                                          mv2=mv2,
                                          save_dir=opt.save_opt_images,
                                          show=opt.show))

    temp_params = {
        'pose': smpl_h.pose.r,
        'betas': smpl_h.betas.r,
        'trans': smpl_h.trans.r,
        'v_personal': smpl_h.v_personal.r
    }

    part_mesh = Mesh(smpl_h.r[v_ids_template], faces_template)

    return part_mesh, temp_params
示例#7
0
def optimize_pose_only(th_scan_meshes,
                       smplx,
                       iterations,
                       steps_per_iter,
                       scan_part_labels,
                       smplx_part_labels,
                       search_tree=None,
                       pen_distance=None,
                       tri_filtering_module=None,
                       display=None):
    """
    Initially we want to only optimize the global rotation of SMPLX. Next we optimize full pose.
    We optimize pose based on the 3D keypoints in th_pose_3d.
    :param  th_pose_3d: array containing the 3D keypoints.
    """

    batch_sz = 1  # smplx.pose.shape[0]
    split_smplx = th_batch_SMPLX_split_params(
        batch_sz,
        top_betas=smplx.betas.data[:, :2],
        other_betas=smplx.betas.data[:, 2:],
        global_pose=smplx.global_pose.data,
        body_pose=smplx.body_pose.data,
        left_hand_pose=smplx.left_hand_pose.data,
        right_hand_pose=smplx.right_hand_pose.data,
        expression=smplx.expression.data,
        jaw_pose=smplx.jaw_pose.data,
        leye_pose=smplx.leye_pose.data,
        reye_pose=smplx.reye_pose.data,
        faces=smplx.faces,
        gender=smplx.gender).to(DEVICE)
    # split_smplx.expression.requires_grad = False
    # split_smplx.jaw_pose.requires_grad = False
    optimizer = torch.optim.Adam(
        [split_smplx.trans, split_smplx.top_betas, split_smplx.global_pose],
        0.02,
        betas=(0.9, 0.999))

    # Get loss_weights
    weight_dict = get_loss_weights()

    if display is not None:
        assert int(display) < len(th_scan_meshes)
        # mvs = MeshViewers((1,1))
        mv = MeshViewer(keepalive=True)

    iter_for_global = 1
    for it in range(iter_for_global + iterations):
        loop = tqdm(range(steps_per_iter))
        if it < iter_for_global:
            # Optimize global orientation
            print('Optimizing SMPLX global orientation')
            loop.set_description('Optimizing SMPLX global orientation')
        elif it == iter_for_global:
            # Now optimize full SMPLX pose
            print('Optimizing SMPLX pose only')
            loop.set_description('Optimizing SMPLX pose only')
            optimizer = torch.optim.Adam([
                split_smplx.trans, split_smplx.top_betas,
                split_smplx.global_pose, split_smplx.body_pose,
                split_smplx.left_hand_pose, split_smplx.right_hand_pose
            ],
                                         0.02,
                                         betas=(0.9, 0.999))
        else:
            loop.set_description('Optimizing SMPLX pose only')

        for i in loop:
            optimizer.zero_grad()
            # Get losses for a forward pass
            loss_dict = forward_step(th_scan_meshes, split_smplx,
                                     scan_part_labels, smplx_part_labels,
                                     search_tree, pen_distance,
                                     tri_filtering_module)
            # Get total loss for backward pass
            tot_loss = backward_step(loss_dict, weight_dict, it)
            tot_loss.backward()
            optimizer.step()

            l_str = 'Iter: {}'.format(i)
            for k in loss_dict:
                l_str += ', {}: {:0.4f}'.format(
                    k, weight_dict[k](loss_dict[k], it).mean().item())
                loop.set_description(l_str)

            if display is not None:
                # verts, _, _, _ = split_smplx()
                verts = split_smplx()
                smplx_mesh = Mesh(v=verts[display].cpu().detach().numpy(),
                                  f=smplx.faces.cpu().numpy())
                scan_mesh = Mesh(
                    v=th_scan_meshes[display].vertices.cpu().detach().numpy(),
                    f=th_scan_meshes[display].faces.cpu().numpy(),
                    vc=np.array([0, 1, 0]))
                scan_mesh.set_vertex_colors_from_weights(
                    scan_part_labels[display].cpu().detach().numpy())

                mv.set_dynamic_meshes([smplx_mesh, scan_mesh])

    # Put back pose, shape and trans into original smplx
    smplx.global_pose.data = split_smplx.global_pose.data
    smplx.body_pose.data = split_smplx.body_pose.data
    smplx.left_hand_pose.data = split_smplx.left_hand_pose.data
    smplx.right_hand_pose.data = split_smplx.right_hand_pose.data
    # smplx.jaw_pose.data = split_smplx.jaw_pose.data
    smplx.leye_pose.data = split_smplx.leye_pose.data
    smplx.reye_pose.data = split_smplx.reye_pose.data
    smplx.betas.data = split_smplx.betas.data
    smplx.trans.data = split_smplx.trans.data

    print('** Optimised smplx pose **')
示例#8
0
def optimize_pose_shape(th_scan_meshes,
                        smplx,
                        iterations,
                        steps_per_iter,
                        scan_part_labels,
                        smplx_part_labels,
                        search_tree=None,
                        pen_distance=None,
                        tri_filtering_module=None,
                        display=None):
    """
    Optimize SMPLX.
    :param display: if not None, pass index of the scan in th_scan_meshes to visualize.
    """
    # smplx.expression.requires_grad = False
    # smplx.jaw_pose.requires_grad = False
    # Optimizer
    optimizer = torch.optim.Adam([
        smplx.trans, smplx.betas, smplx.global_pose, smplx.body_pose,
        smplx.left_hand_pose, smplx.right_hand_pose
    ],
                                 0.02,
                                 betas=(0.9, 0.999))

    # Get loss_weights
    weight_dict = get_loss_weights()

    # Display
    if display is not None:
        assert int(display) < len(th_scan_meshes)
        mv = MeshViewer()

    for it in range(iterations):
        loop = tqdm(range(steps_per_iter))
        loop.set_description('Optimizing SMPLX')
        for i in loop:
            optimizer.zero_grad()
            # Get losses for a forward pass
            loss_dict = forward_step(th_scan_meshes, smplx, scan_part_labels,
                                     smplx_part_labels, search_tree,
                                     pen_distance, tri_filtering_module)
            # Get total loss for backward pass
            tot_loss = backward_step(loss_dict, weight_dict, it)
            tot_loss.backward()
            optimizer.step()

            l_str = 'Iter: {}'.format(i)
            for k in loss_dict:
                l_str += ', {}: {:0.4f}'.format(
                    k, weight_dict[k](loss_dict[k], it).mean().item())
                loop.set_description(l_str)

            if display is not None:
                # verts, _, _, _ = smplx()
                verts = smplx()
                smplx_mesh = Mesh(v=verts[display].cpu().detach().numpy(),
                                  f=smplx.faces.cpu().numpy())
                scan_mesh = Mesh(
                    v=th_scan_meshes[display].vertices.cpu().detach().numpy(),
                    f=th_scan_meshes[display].faces.cpu().numpy(),
                    vc=np.array([0, 1, 0]))
                scan_mesh.set_vertex_colors_from_weights(
                    scan_part_labels[display].cpu().detach().numpy())
                mv.set_static_meshes([scan_mesh, smplx_mesh])

    print('** Optimised smplx pose and shape **')
示例#9
0
文件: fit_SMPLX.py 项目: MoyGcc/IPNet
def optimize_pose_only(th_scan_meshes,
                       smpl,
                       iterations,
                       steps_per_iter,
                       th_pose_3d,
                       prior_weight,
                       display=None):
    """
    Initially we want to only optimize the global rotation of SMPL. Next we optimize full pose.
    We optimize pose based on the 3D keypoints in th_pose_3d.
    :param  th_pose_3d: array containing the 3D keypoints.
    :param prior_weight: weights corresponding to joints depending on visibility of the joint in the 3D scan.
                         eg: hand could be inside pocket.
    """

    batch_sz = smpl.pose.shape[0]
    split_smpl = th_batch_SMPL_split_params(batch_sz,
                                            top_betas=smpl.betas.data[:, :2],
                                            other_betas=smpl.betas.data[:, 2:],
                                            global_pose=smpl.pose.data[:, :3],
                                            other_pose=smpl.pose.data[:, 3:],
                                            faces=smpl.faces,
                                            gender=smpl.gender).cuda()
    optimizer = torch.optim.Adam(
        [split_smpl.trans, split_smpl.top_betas, split_smpl.global_pose],
        0.02,
        betas=(0.9, 0.999))

    # Get loss_weights
    weight_dict = get_loss_weights()

    if display is not None:
        assert int(display) < len(th_scan_meshes)
        # mvs = MeshViewers((1,1))
        mv = MeshViewer(keepalive=True)

    iter_for_global = 1
    for it in range(iter_for_global + iterations):
        loop = tqdm(range(steps_per_iter))
        if it < iter_for_global:
            # Optimize global orientation
            print('Optimizing SMPL global orientation')
            loop.set_description('Optimizing SMPL global orientation')
        elif it == iter_for_global:
            # Now optimize full SMPL pose
            print('Optimizing SMPL pose only')
            loop.set_description('Optimizing SMPL pose only')
            optimizer = torch.optim.Adam([
                split_smpl.trans, split_smpl.top_betas, split_smpl.global_pose,
                split_smpl.other_pose
            ],
                                         0.02,
                                         betas=(0.9, 0.999))
        else:
            loop.set_description('Optimizing SMPL pose only')

        for i in loop:
            optimizer.zero_grad()
            # Get losses for a forward pass
            loss_dict = forward_step_pose_only(split_smpl, th_pose_3d,
                                               prior_weight)
            # Get total loss for backward pass
            tot_loss = backward_step(loss_dict, weight_dict, it)
            tot_loss.backward()
            optimizer.step()

            l_str = 'Iter: {}'.format(i)
            for k in loss_dict:
                l_str += ', {}: {:0.4f}'.format(
                    k, weight_dict[k](loss_dict[k], it).mean().item())
                loop.set_description(l_str)

            if display is not None:
                verts, _, _, _ = split_smpl()
                smpl_mesh = Mesh(v=verts[display].cpu().detach().numpy(),
                                 f=smpl.faces.cpu().numpy())
                scan_mesh = Mesh(
                    v=th_scan_meshes[display].vertices.cpu().detach().numpy(),
                    f=th_scan_meshes[display].faces.cpu().numpy(),
                    vc=np.array([0, 1, 0]))

                mv.set_dynamic_meshes([smpl_mesh, scan_mesh])

                # from matplotlib import cm
                # col = cm.tab20c(np.arange(len(th_pose_3d[display]['pose_keypoints_3d'])) % 20)[:, :3]
                #
                # jts, _, _ = split_smpl.get_landmarks()
                # Js = plot_points(jts[display].detach().cpu().numpy(), cols=col)
                # Js_observed = plot_points(th_pose_3d[display]['pose_keypoints_3d'][:,  :3].numpy(), cols=col)

                # mvs[0][0].set_static_meshes([smpl_mesh, scan_mesh])
                # mvs[0][1].set_static_meshes(Js)
                # mvs[0][2].set_static_meshes(Js_observed)

    # Put back pose, shape and trans into original smpl
    smpl.pose.data = split_smpl.pose.data
    smpl.betas.data = split_smpl.betas.data
    smpl.trans.data = split_smpl.trans.data

    print('** Optimised smpl pose **')
示例#10
0
    if mesh.v.max() > 1e3:
        mesh.v /= 1e3

    n_vertices = len(mesh.v)

    prediction_vertex_values = np.arange(n_vertices)

    minima = min(prediction_vertex_values)
    maxima = max(prediction_vertex_values)
    norm = matplotlib.colors.Normalize(vmin=minima, vmax=maxima, clip=True)
    mapper = cm.ScalarMappable(norm=norm, cmap=cm.viridis)

    mapper_list = [mapper.to_rgba(x)[0] for x in prediction_vertex_values]

    mesh.vc = mapper_list
    mv = MeshViewer()
    mv.dynamic_meshes = [mesh]
    time.sleep(100)
    mv.save_snapshot(
        '/home/eman/Documents/PhD/pytorch_geometric-master/examples/reference-mesh.png'
    )

####################################################################

if args.case == 'MPI':
    data_path = '/home/eman/Documents/PhD/body-modeling-master_initial/smpl-viewer/Data-survey-smpl/AllData/'
    dirs = os.listdir(data_path)
    dirs.sort()

    print(dirs)
示例#11
0
print len(objFiles)

for i in range(len(objFiles)):
    #Displaying original faces
    #i = 24
    originalMeshFile = Mesh(filename=path + objFiles[i])
    originalMeshFile.texture_filepath = path + bmpFiles[i]

    leftVerts = originalMeshFile
    rightVerts = originalMeshFile

    #print vars(leftVerts)

    name = (objFiles[i])[:-4]

    mv = MeshViewer()

    mv.dynamic_meshes = [originalMeshFile]
    time.sleep(0.3)
    mv.save_snapshot(saving_path + name + '_frontal.png')
    #time.sleep(0.1)
    #mv.

    #Left view
    newleftVerts = (np.dot(rotation_matrix(axis, theta_l),
                           (leftVerts.v).transpose()))
    leftVerts.v = newleftVerts.transpose()
    #mv_l = MeshViewer()
    mv.dynamic_meshes = [leftVerts]
    time.sleep(0.3)
    mv.save_snapshot(saving_path + name + '_left.png')