示例#1
0
def vis_verts(mean_shape, verts, face, mvs=None, textures=None):
    """
    mean_shape: N x 3
    verts: B x N x 3
    face: numpy F x 3
    textures: B x F x T x T (x T) x 3
    """
    from psbody.mesh.mesh import Mesh
    from psbody.mesh.meshviewer import MeshViewers
    if mvs is None:
        mvs = MeshViewers((2, 3))

    num_row = len(mvs)
    num_col = len(mvs[0])

    mean_shape = convert2np(mean_shape)
    verts = convert2np(verts)

    num_show = min(num_row * num_col, verts.shape[0] + 1)

    mvs[0][0].set_dynamic_meshes([Mesh(mean_shape, face)])
    # 0th is mean shape:

    if textures is not None:
        tex = convert2np(textures)
    for k in np.arange(1, num_show):
        vert_here = verts[k - 1]
        if textures is not None:
            tex_here = tex[k - 1]
            fc = tex_here.reshape(tex_here.shape[0], -1, 3).mean(axis=1)
            mesh = Mesh(vert_here, face, fc=fc)
        else:
            mesh = Mesh(vert_here, face)
        mvs[int(k % num_row)][int(k / num_row)].set_dynamic_meshes([mesh])
示例#2
0
def run_2d_lmk_fitting(model_fname, template_fname, flame_lmk_path, texture_mapping, target_img_path, target_lmk_path, out_path):
    if 'generic' not in model_fname:
        print('You are fitting a gender specific model (i.e. female / male). Please make sure you selected the right gender model. Choose the generic model if gender is unknown.')
    if not os.path.exists(template_fname):
        print('Template mesh (in FLAME topology) not found - %s' % template_fname)
        return
    if not os.path.exists(flame_lmk_path):
        print('FLAME landmark embedding not found - %s ' % flame_lmk_path)
        return
    if not os.path.exists(target_img_path):
        print('Target image not found - s' % target_img_path)
        return
    if not os.path.exists(target_lmk_path):
        print('Landmarks of target image not found - s' % target_lmk_path)
        return

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    lmk_face_idx, lmk_b_coords = load_embedding(flame_lmk_path)

    target_img = cv2.imread(target_img_path)
    lmk_2d = np.load(target_lmk_path)

    weights = {}
    # Weight of the landmark distance term
    weights['lmk'] = 1.0
    # Weight of the shape regularizer
    weights['shape'] = 1e-3
    # Weight of the expression regularizer
    weights['expr'] = 1e-3
    # Weight of the neck pose (i.e. neck rotationh around the neck) regularizer
    weights['neck_pose'] = 100.0
    # Weight of the jaw pose (i.e. jaw rotation for opening the mouth) regularizer
    weights['jaw_pose'] = 1e-3
    # Weight of the eyeball pose (i.e. eyeball rotations) regularizer
    weights['eyeballs_pose'] = 10.0

    result_mesh, result_scale = fit_lmk2d(target_img, lmk_2d, template_fname, model_fname, lmk_face_idx, lmk_b_coords, weights)

    if sys.version_info >= (3, 0):
        texture_data = np.load(texture_mapping, allow_pickle=True, encoding='latin1').item()
    else:
        texture_data = np.load(texture_mapping, allow_pickle=True).item()
    texture_map = compute_texture_map(target_img, result_mesh, result_scale, texture_data)

    out_mesh_fname = os.path.join(out_path, os.path.splitext(os.path.basename(target_img_path))[0] + '.obj')
    out_img_fname = os.path.join(out_path, os.path.splitext(os.path.basename(target_img_path))[0] + '.png')

    cv2.imwrite(out_img_fname, texture_map)
    result_mesh.set_vertex_colors('white')
    result_mesh.vt = texture_data['vt']
    result_mesh.ft = texture_data['ft']
    result_mesh.set_texture_image(out_img_fname)
    result_mesh.write_obj(out_mesh_fname)
    np.save(os.path.join(out_path, os.path.splitext(os.path.basename(target_img_path))[0] + '_scale.npy'), result_scale)

    mv = MeshViewers(shape=[1,2], keepalive=True)
    mv[0][0].set_static_meshes([Mesh(result_mesh.v, result_mesh.f)])
    mv[0][1].set_static_meshes([result_mesh])
def save_and_display_results(result_mesh, result_scale, out_path, target_img_path):
    out_mesh_fname = os.path.join(out_path, os.path.splitext(os.path.basename(target_img_path))[0] + '.obj')
    write_obj(result_mesh, out_mesh_fname)
    np.save(os.path.join(out_path, os.path.splitext(os.path.basename(target_img_path))[0] + '_scale.npy'), result_scale)

    mv = MeshViewers(shape=[1, 2], keepalive=True)
    mv[0][0].set_static_meshes([Mesh(result_mesh.v, result_mesh.f)])
    mv[0][1].set_static_meshes([result_mesh])
示例#4
0
    def setUp(self):
        fnames = [
            os.path.join(test_data_folder, i)
            for i in os.listdir(test_data_folder)
            if os.path.splitext(i)[1].lower() == '.ply'
        ]

        self.meshes = [Mesh(filename=fname) for fname in fnames]

        self.mvs = MeshViewers(shape=[2, 2])
        self.mvs[0][0].set_static_meshes([self.meshes[0]])
        self.mvs[0][1].set_static_meshes([self.meshes[1]])
        self.mvs[1][0].set_static_meshes([self.meshes[2]])
        self.mvs[1][1].set_static_meshes(
            [self.meshes[0]])  # only 2 .ply files left in the GitHub version
示例#5
0
def vis_vert2kp(verts, vert2kp, face, mvs=None):
    """
    verts: N x 3
    vert2kp: K x N

    For each keypoint, visualize its weights on each vertex.
    Base color is white, pick a color for each kp.
    Using the weights, interpolate between base and color.

    """
    from psbody.mesh.mesh import Mesh
    from psbody.mesh.meshviewer import MeshViewer, MeshViewers
    from psbody.mesh.sphere import Sphere

    num_kp = vert2kp.shape[0]
    if mvs is None:
        mvs = MeshViewers((4, 4))
    # mv = MeshViewer()
    # Generate colors
    import pylab
    cm = pylab.get_cmap('gist_rainbow')
    cms = 255 * np.array([cm(1. * i / num_kp)[:3] for i in range(num_kp)])
    base = np.zeros((1, 3)) * 255
    # base = np.ones((1, 3)) * 255

    verts = convert2np(verts)
    vert2kp = convert2np(vert2kp)

    num_row = len(mvs)
    num_col = len(mvs[0])

    colors = []
    for k in range(num_kp):
        # Nx1 for this kp.
        weights = vert2kp[k].reshape(-1, 1)
        # So we can see it,,
        weights = weights / weights.max()
        cm = cms[k, None]
        # Simple linear interpolation,,
        # cs = np.uint8((1-weights) * base + weights * cm)
        # In [0, 1]
        cs = ((1 - weights) * base + weights * cm) / 255.
        colors.append(cs)

        # sph = [Sphere(center=jc, radius=.03).to_mesh(c/255.) for jc, c in zip(vert,cs)]
        # mvs[int(k/4)][k%4].set_dynamic_meshes(sph)
        mvs[int(k % num_row)][int(k / num_row)].set_dynamic_meshes(
            [Mesh(verts, face, vc=cs)])
示例#6
0
    def setUp(self):

        fnames = [
            os.path.join(test_data_folder, i)
            for i in os.listdir(test_data_folder)
            if os.path.splitext(i)[1].lower() == '.ply'
        ]

        # We build a cycle to make sure we have enough meshes
        self.meshes = itertools.cycle(Mesh(filename=fname) for fname in fnames)

        self.mvs = MeshViewers(shape=[2, 2])
        self.mvs[0][0].set_static_meshes([next(self.meshes)])
        self.mvs[0][1].set_static_meshes([next(self.meshes)])
        self.mvs[1][0].set_static_meshes([next(self.meshes)])
        self.mvs[1][1].set_static_meshes([next(self.meshes)])
示例#7
0
# Denormalize
template_mesh.v = np.squeeze(v.detach().numpy())
template_mesh.v *= meshdata.std.numpy()
template_mesh.v += meshdata.mean.numpy()

compressed_sensing_mesh.v = np.squeeze(out.detach().numpy())
compressed_sensing_mesh.v *= meshdata.std.numpy()
compressed_sensing_mesh.v += meshdata.mean.numpy()

compressed_sensing_opt_mesh.v = np.squeeze(out_opt.detach().numpy())
compressed_sensing_opt_mesh.v *= meshdata.std.numpy()
compressed_sensing_opt_mesh.v += meshdata.mean.numpy()

vae_mesh.v = np.squeeze(outVAE.detach().numpy())
vae_mesh.v *= meshdata.std.numpy()
vae_mesh.v += meshdata.mean.numpy()

# Display reults, qualitative results
# Bottom left: Ground Truth
# Bottom right: optimization with single initialization
# Top left: auto-encoded result
# Top right: optimization result with initialization from encoder
mvs = MeshViewers(shape=[2, 2])
mvs[0][0].set_static_meshes([template_mesh])
mvs[0][1].set_static_meshes([compressed_sensing_mesh])
mvs[1][0].set_static_meshes([vae_mesh])
mvs[1][1].set_static_meshes([compressed_sensing_opt_mesh])

# TODO do quantitative experiments
# TODO put quantitative and qualitative experiements in separate function
示例#8
0
def optimize_smal(fposes,
                  ftrans,
                  fbetas,
                  model,
                  cams,
                  segs,
                  imgs,
                  landmarks,
                  landmarks_names,
                  key_vids,
                  symIdx=None,
                  frameId=0,
                  opt_model_dir=None,
                  save_name=None,
                  COMPUTE_OPT=True,
                  img_paths=None,
                  img_offset=None,
                  img_scales=None):

    mesh_v_opt_save_path = join(opt_model_dir,
                                'mesh_v_opt_no_mc_' + str(frameId) + '.ply')
    mesh_v_opt_mc_save_path = join(opt_model_dir,
                                   'mesh_v_opt_' + str(frameId) + '.ply')
    mesh_init_save_path = join(opt_model_dir,
                               'mesh_init_' + str(frameId) + '.ply')
    nViews = len(fposes)
    if not COMPUTE_OPT:
        if not exists(opt_model_dir):
            makedirs(opt_model_dir)
        dv = 0
        compute_texture(nViews, opt_model_dir, dv, model, frameId,
                        mesh_init_save_path, fposes, ftrans, fbetas,
                        '_no_refine', cams, imgs, segs, img_paths, img_offset,
                        img_scales)
        return

    # Write the initial mesh
    np_betas = np.zeros_like(model.betas)
    np_betas[:len(fbetas[0])] = fbetas[0]
    tmp = verts_decorated(v_template=model.v_template,
                          pose=ch.zeros_like(model.pose.r),
                          trans=ch.zeros_like(model.trans),
                          J=model.J_regressor,
                          kintree_table=model.kintree_table,
                          betas=ch.array(np_betas),
                          weights=model.weights,
                          posedirs=model.posedirs,
                          shapedirs=model.shapedirs,
                          bs_type='lrotmin',
                          bs_style='lbs',
                          f=model.f)
    tmp_mesh = Mesh(v=tmp.r, f=tmp.f)

    tmp_path = join(opt_model_dir, 'mesh_init_' + str(frameId) + '.ply')
    tmp_mesh.write_ply(tmp_path)
    del tmp

    assert (nViews == len(cams))
    assert (nViews == len(segs))
    assert (nViews == len(imgs))

    # Define a displacement vector. We set a small non zero displacement as initialization
    dv = ch.array(np.random.rand(model.r.shape[0], 3) / 1000.)

    # Cell structure for ARAP
    f = model.f
    _, A3, A = edgesIdx(nV=dv.shape[0], f=f, save_dir='.', name='smal')
    wedge = wedges(A3, dv)

    s = np.zeros_like(dv)
    arap = ARAP(reg_e=MatVecMult(A3.T,
                                 model.ravel() + dv.ravel()).reshape(-1, 3),
                model_e=MatVecMult(A3.T, model.ravel()).reshape(-1, 3),
                w=wedge,
                A=A)

    k_arap = settings['ref_k_arap_per_view'] * nViews
    for weight, part in zip(settings['ref_W_arap_values'],
                            settings['ref_W_arap_parts']):
        k_arap, W_per_vertex = get_arap_part_weights(
            A, k_arap, [part], [weight])  #, animal_name) # was only Head

    W = np.zeros((W_per_vertex.shape[0], 3))
    for i in range(3):
        W[:, i] = W_per_vertex

    k_lap = settings['ref_k_lap'] * nViews * W
    k_sym = settings['ref_k_sym'] * nViews
    k_keyp = settings['ref_k_keyp_weight'] * nViews

    # Load already computed mesh
    if not exists(opt_model_dir):
        makedirs(opt_model_dir)
    shape_model, compute = load_shape_models(nViews, opt_model_dir, dv, model,
                                             frameId, mesh_v_opt_save_path,
                                             fposes, ftrans, fbetas)

    mv = None

    # Remove inside mouth faces
    '''
    if settings['ref_remove_inside_mouth']:
        # Giraffe
        faces_orig = shape_model[0].f.copy()
        im_v = im_up_v + im_down_v
        idx = [np.where(model.f == ix)[0] for ix in im_v]
        idx = np.concatenate(idx).ravel()
        for i in range(nViews):
            shape_model[i].f = np.delete(shape_model[i].f, idx, 0)
    '''

    if compute:
        objs = {}

        FIX_CAM = True
        free_variables = []
        kp_weights = k_keyp * np.ones((landmarks[0].shape[0], 1))
        print('removing shoulders, often bad annotated')
        kp_weights[landmarks_names.index('leftShoulder'), :] *= 0
        kp_weights[landmarks_names.index('rightShoulder'), :] *= 0
        objs_pose = None
        j2d = None
        #k_silh_term = settings['ref_k_silh_term']
        k_m2s = settings['ref_k_m2s']
        k_s2m = settings['ref_k_s2m']

        objs, params_, j2d = set_pose_objs(shape_model,
                                           cams,
                                           landmarks,
                                           key_vids,
                                           kp_weights=kp_weights,
                                           FIX_CAM=FIX_CAM,
                                           ONLY_KEYP=True,
                                           OPT_SHAPE=False)

        if np.any(k_arap) != 0:
            objs['arap'] = k_arap * arap
        if k_sym != 0:
            objs['sym_0'] = k_sym * (ch.abs(dv[:, 0] - dv[symIdx, 0]))
            objs['sym_1'] = k_sym * (ch.abs(dv[:, 1] + dv[symIdx, 1] -
                                            0.00014954))
            objs['sym_2'] = k_sym * (ch.abs(dv[:, 2] - dv[symIdx, 2]))
        if np.any(k_lap) != 0:
            lap_op = np.asarray(
                laplacian(Mesh(v=dv, f=shape_model[0].f)).todense())
            objs['lap'] = k_lap * ch.dot(lap_op, dv)

        mv = None
        mv2 = MeshViewers(shape=(1, nViews))  #None
        vc = np.ones_like(dv)
        dv_r = fit_silhouettes_pyramid_opt(objs,
                                           shape_model,
                                           dv,
                                           segs,
                                           cams,
                                           j2d=j2d,
                                           weights=1.,
                                           mv=mv,
                                           imgs=imgs,
                                           s2m_weights=k_s2m,
                                           m2s_weights=k_m2s,
                                           max_iter=100,
                                           free_variables=free_variables,
                                           vc=vc,
                                           symIdx=symIdx,
                                           mv2=mv2,
                                           objs_pose=objs_pose)

        # Save result image
        for i in range(nViews):
            img_res = render_mesh(Mesh(shape_model[i].r, shape_model[i].f),
                                  imgs[i].shape[1],
                                  imgs[i].shape[0],
                                  cams[i],
                                  img=imgs[i],
                                  world_frame=True)
            img_result = np.hstack((imgs[i], img_res * 255.))
            save_img_path = save_name[i].replace('.pkl', '_v_opt.png')
            cv2.imwrite(save_img_path, img_result)

        shape_model[0].pose[:] = 0
        shape_model[0].trans[:] = 0
        V = shape_model[0].r.copy()
        vm = V[symIdx, :].copy()
        vm[:, 1] = -1 * vm[:, 1]
        V2 = (V + vm) / 2.0

        mesh_out = Mesh(v=V2, f=shape_model[0].f)
        mesh_out.show()
        mesh_out.write_ply(mesh_v_opt_save_path)

        save_dv_data_path = mesh_v_opt_save_path.replace('.ply', '_dv.pkl')
        dv_data = {'betas': shape_model[0].betas.r, 'dv': dv_r}
        pkl.dump(dv_data, open(save_dv_data_path, 'wb'))

    compute_texture(nViews, opt_model_dir, dv, model, frameId,
                    mesh_v_opt_save_path, fposes, ftrans, fbetas, '_non_opt',
                    cams, imgs, segs, img_paths, img_offset, img_scales)

    return
示例#9
0
    from mycore.io import load_animal_model
    model = load_animal_model(model_name)
    model.v_template[:] = mesh.v
    uv_mesh = Mesh(filename='smal_00781_4_all_template_w_tex_uv_001.obj')

    # show texture
    mesh.ft = uv_mesh.ft
    mesh.vt = uv_mesh.vt
    mesh.set_vertex_colors("white")

    mesh.texture_filepath = texture_filename

    # Read poses
    for frame in frames:
        data = pkl.load(open(join(pose_location, 'frame' + frame + '.pkl'),
                             'rb'),
                        encoding='latin1')
        pose = data['pose']

        model.pose[:] = pose
        mesh.v = model.r.copy()
        v = mesh.v.copy()
        mesh.v[:, 1] = -v[:, 1]
        mesh.v[:, 2] = -v[:, 2]

        mv = MeshViewers(shape=(1, 1))
        mv[0][0].set_background_color(np.ones(3))
        mv[0][0].set_static_meshes([mesh])
        import pdb
        pdb.set_trace()