Exemplo n.º 1
0
def test_mean_pointcloud_type():
    points = np.array([[1, 2, 3], [1, 1, 1]])
    trilist = np.array([0, 1, 2])
    pcs = [TriMesh(points, trilist), TriMesh(points + 2, trilist)]
    mean_pc = mean_pointcloud(pcs)
    assert isinstance(mean_pc, TriMesh)
    assert_allclose(mean_pc.points, points + 1)
Exemplo n.º 2
0
def calculate_errors_4DMaja_real(path_fits, path_gt, model):
    # Parameters:
    # --------------
    #  path_fits: 'string' of the directory that contains your reconstructed meshes.
    #             Meshes' filenames should be the same with the
    #             corresponding ground truth meshes filenames + the suffix of
    #             the mesh type (e.g <frame_number>.obj, <frame_number>.ply)
    #  path_gt  : 'string' of the full path of the ground truth mesh.
    #  model    : 'string' of the model template you are using. Should be one of the
    #             following: 'LSFM', 'Basel', 'Surrey'
    # Returns:
    # --------------
    #  errors   : python list that contains the error per vertex between the ground
    #             truth mesh and a mean mesh calculated from your reconstructed meshes

    path_fits = Path(path_fits)
    path_gt = Path(path_gt)

    # load meshes' filenames
    filenames = [p.name for p in path_fits.glob('*')]
    filenames.sort()

    # load gt_mesh (it is olny one, maja's neautral face)
    gt_mesh = m3io.import_mesh(path_gt, texture_resolver=None)
    gt_mesh.landmarks['ibug49'] = PointCloud(gt_mesh.points[lms_indexes][19:])

    errors = [0]

    # accumulate fits
    acc_points = np.zeros((gt_mesh.n_points, 3))
    for i, filename in enumerate(print_progress(filenames)):

        fit_3d = m3io.import_mesh(path_fits / filename, texture_resolver=None)
        if model == 'Surrey':
            lms = face_ibug_49_to_face_ibug_49(
                PointCloud(fit_3d.points[eos.load_eos_low_res_lm_index()]))
            fit_3d = eos.upsample_eos_low_res_to_fw_no_texture(fit_3d)
            fit_3d.landmarks['ibug49'] = lms
        elif model == 'LSFM' or model == 'Basel':
            fit_3d.landmarks['ibug49'] = PointCloud(
                fit_3d.points[lms_indexes][19:])
        else:
            print('Error: Not supported model template')
            return

        acc_points += fit_3d.points

    # create mean_fit_3d
    mean_fit_3d = TriMesh(acc_points / len(filenames), gt_mesh.trilist)
    mean_fit_3d.landmarks['ibug49'] = PointCloud(
        mean_fit_3d.points[lms_indexes][19:])

    # calculate per vertex errors between the neutral gt_mesh and the mean_fit_3d
    gt_mesh, eval_mask = landmark_and_mask_gt_mesh(gt_mesh, distance=1)
    errors[0], _, _ = mask_align_and_calculate_dense_error(
        mean_fit_3d, gt_mesh, eval_mask)

    return errors
Exemplo n.º 3
0
def test_trimesh_face_normals():
    points = np.array([[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0],
                       [0.0, 1.0, 0.0]])
    trilist = np.array([[0, 1, 3], [1, 2, 3]])
    expected_normals = np.array(
        [[-np.sqrt(3) / 3, -np.sqrt(3) / 3,
          np.sqrt(3) / 3], [-0, -0, 1]])
    trimesh = TriMesh(points, trilist)
    face_normals = trimesh.tri_normals()
    assert_allclose(face_normals, expected_normals)
Exemplo n.º 4
0
def test_trimesh_boundary_tri_index():
    points = np.array([[0.0, 0.0, 0.0], [0.5, 0.5, 0.0], [0.0, 0.5, 0.0],
                       [-0.5, 0.5, 0.0], [-0.5, -0.5, 0.0], [0.5, -0.5, 0.0],
                       [0.0, -1.0, 0.0]])
    trilist = np.array([[0, 2, 3], [2, 0, 1], [4, 0, 3], [0, 5, 1], [4, 5, 0],
                        [5, 4, 6]])
    trimesh = TriMesh(points, trilist)
    boundary_tri_index = trimesh.boundary_tri_index()
    # The "middle" triangle is [4, 5, 0] which is surrounded on all sides
    # [5, 4, 6] has two edges that have no neighbours
    assert_allclose(boundary_tri_index, [True, True, True, True, False, True])
Exemplo n.º 5
0
def test_trimesh_face_normals():
    points = np.array([[0.0, 0.0, -1.0],
                       [1.0, 0.0, 0.0],
                       [1.0, 1.0, 0.0],
                       [0.0, 1.0, 0.0]])
    trilist = np.array([[0, 1, 3],
                        [1, 2, 3]])
    expected_normals = np.array([[-np.sqrt(3)/3, -np.sqrt(3)/3, np.sqrt(3)/3],
                                 [-0, -0, 1]])
    trimesh = TriMesh(points, trilist)
    face_normals = trimesh.tri_normals()
    assert_allclose(face_normals, expected_normals)
def test_trimesh_copy():
    points = np.ones([10, 3])
    trilist = np.ones([10, 3])
    landmarks = PointCloud(np.ones([3, 3]), copy=False)

    tmesh = TriMesh(points, trilist=trilist, copy=False)
    tmesh.landmarks['test'] = landmarks
    tmesh_copy = tmesh.copy()

    assert (not is_same_array(tmesh_copy.points, tmesh.points))
    assert (not is_same_array(tmesh_copy.trilist, tmesh.trilist))
    assert (not is_same_array(tmesh_copy.landmarks['test'].lms.points,
                              tmesh.landmarks['test'].lms.points))
Exemplo n.º 7
0
def test_trimesh_vertex_normals():
    points = np.array([[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0],
                       [0.0, 1.0, 0.0]])
    trilist = np.array([[0, 1, 3], [1, 2, 3]])
    # 0 and 2 are the corner of the triangles and so the maintain the
    # face normals. The other two are the re-normalised vertices:
    # normalise(n0 + n2)
    expected_normals = np.array(
        [[-np.sqrt(3) / 3, -np.sqrt(3) / 3,
          np.sqrt(3) / 3], [-0.32505758, -0.32505758, 0.88807383], [0, 0, 1],
         [-0.32505758, -0.32505758, 0.88807383]])
    trimesh = TriMesh(points, trilist)
    vertex_normals = trimesh.vertex_normals()
    assert_allclose(vertex_normals, expected_normals)
Exemplo n.º 8
0
def duplicate_vertices(mesh):
    # generate a new mesh with unique vertices per triangle
    # (i.e. duplicate verts so that each triangle is unique)    old_to_new = mesh.trilist.ravel()
    old_to_new = mesh.trilist.ravel()
    new_trilist = np.arange(old_to_new.shape[0]).reshape([-1, 3])
    new_points = mesh.points[old_to_new]
    return TriMesh(new_points, trilist=new_trilist), old_to_new
Exemplo n.º 9
0
def test_trimesh__init_2d_grid():
    tm = TriMesh.init_2d_grid([10, 10])
    assert tm.n_points == 100
    assert tm.n_dims == 2
    # 162 = 9 * 9 * 2
    assert_allclose(tm.trilist.shape, (162, 3))
    assert_allclose(tm.range(), [9, 9])
Exemplo n.º 10
0
def test_chain_pwa_before_tps():
    a_tm = TriMesh(np.random.random([10, 2]))
    b = PointCloud(np.random.random([10, 2]))
    pwa = PiecewiseAffine(a_tm, b)
    tps = ThinPlateSplines(b, a_tm)
    chain = pwa.compose_before(tps)
    assert (isinstance(chain, TransformChain))
Exemplo n.º 11
0
    def constrain_to_pointcloud(self, pointcloud, trilist=None):
        r"""
        Restricts this mask to be equal to the convex hull around a point cloud

        Parameters
        ----------
        pointcloud : :map:`PointCloud`
            The pointcloud of points that should be constrained to

        trilist: (t, 3) ndarray, Optional
            Triangle list to be used on the points in selecting
            the mask region. If None defaults to performing Delaunay
            triangulation on the points.

            Default: None
        """
        from menpo.transform.piecewiseaffine import PiecewiseAffine
        from menpo.transform.piecewiseaffine import TriangleContainmentError

        if self.n_dims != 2:
            raise ValueError("can only constrain mask on 2D images.")

        if trilist is not None:
            from menpo.shape import TriMesh
            pointcloud = TriMesh(pointcloud.points, trilist)

        pwa = PiecewiseAffine(pointcloud, pointcloud)
        try:
            pwa.apply(self.indices)
        except TriangleContainmentError as e:
            self.from_vector_inplace(~e.points_outside_source_domain)
Exemplo n.º 12
0
def test_trimesh_creation_copy_warning():
    points = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]])
    trilist = np.array([[0, 1, 3], [1, 2, 3]], order='F')
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        TriMesh(points, trilist=trilist, copy=False)
        assert len(w) == 1
def face_ibug_68_to_face_ibug_68_trimesh(pcloud):
    r"""
    Apply the IBUG 68-point semantic labels, with trimesh connectivity.

    The semantic labels applied are as follows:

      - tri

    References
    ----------
    .. [1] http://www.multipie.org/
    .. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
    """
    from menpo.shape import TriMesh

    n_expected_points = 68
    validate_input(pcloud, n_expected_points)

    tri_list = np.array([[47, 29, 28], [44, 43, 23], [38, 20, 21],
                         [47, 28, 42], [49, 61, 60], [40, 41, 37],
                         [37, 19, 20], [28, 40, 39], [38, 21, 39],
                         [36,  1,  0], [48, 59,  4], [49, 60, 48],
                         [67, 59, 60], [13, 53, 14], [61, 51, 62],
                         [57,  8,  7], [52, 51, 33], [61, 67, 60],
                         [52, 63, 51], [66, 56, 57], [35, 30, 29],
                         [53, 52, 35], [37, 36, 17], [18, 37, 17],
                         [37, 38, 40], [38, 37, 20], [19, 37, 18],
                         [38, 39, 40], [28, 29, 40], [41, 36, 37],
                         [27, 39, 21], [41, 31,  1], [30, 32, 31],
                         [33, 51, 50], [33, 30, 34], [31, 40, 29],
                         [36,  0, 17], [31,  2,  1], [31, 41, 40],
                         [ 1, 36, 41], [31, 49,  2], [ 2, 49,  3],
                         [60, 59, 48], [ 3, 49, 48], [31, 32, 50],
                         [48,  4,  3], [59,  5,  4], [58, 67, 66],
                         [ 5, 59, 58], [58, 59, 67], [ 7,  6, 58],
                         [66, 57, 58], [13, 54, 53], [ 7, 58, 57],
                         [ 6,  5, 58], [50, 61, 49], [62, 67, 61],
                         [31, 50, 49], [32, 33, 50], [30, 33, 32],
                         [34, 52, 33], [35, 52, 34], [53, 63, 52],
                         [62, 63, 65], [62, 51, 63], [66, 65, 56],
                         [63, 53, 64], [62, 66, 67], [62, 65, 66],
                         [57, 56,  9], [65, 63, 64], [ 8, 57,  9],
                         [ 9, 56, 10], [10, 56, 11], [11, 56, 55],
                         [11, 55, 12], [56, 65, 55], [55, 64, 54],
                         [55, 65, 64], [55, 54, 12], [64, 53, 54],
                         [12, 54, 13], [45, 46, 44], [35, 34, 30],
                         [14, 53, 35], [15, 46, 45], [27, 28, 39],
                         [27, 42, 28], [35, 29, 47], [30, 31, 29],
                         [15, 35, 46], [15, 14, 35], [43, 22, 23],
                         [27, 21, 22], [24, 44, 23], [44, 47, 43],
                         [43, 47, 42], [46, 35, 47], [26, 45, 44],
                         [46, 47, 44], [25, 44, 24], [25, 26, 44],
                         [16, 15, 45], [16, 45, 26], [22, 42, 43],
                         [50, 51, 61], [27, 22, 42]])
    new_pcloud = TriMesh(pcloud.points, tri_list)

    mapping = OrderedDict()
    mapping['tri'] = np.arange(new_pcloud.n_points)

    return new_pcloud, mapping
def eye_ibug_close_17_to_eye_ibug_close_17_trimesh(pcloud):
    r"""
    Apply the IBUG 17-point close eye semantic labels, with trimesh
    connectivity.

    The semantic labels applied are as follows:

      - tri
    """
    from menpo.shape import TriMesh

    n_expected_points = 17
    validate_input(pcloud, n_expected_points)

    tri_list = np.array([[10, 11, 13], [ 3, 13,  2], [ 4, 14,  3],
                         [15,  5, 16], [12, 11,  0], [13, 14, 10],
                         [13, 12,  2], [14, 13,  3], [ 0,  1, 12],
                         [ 2, 12,  1], [13, 11, 12], [ 9, 10, 14],
                         [15,  9, 14], [ 7,  8, 15], [ 5,  6, 16],
                         [15, 14,  4], [ 7, 15, 16], [ 8,  9, 15],
                         [15,  4,  5], [16,  6,  7]])

    new_pcloud = TriMesh(pcloud.points, tri_list, copy=False)

    mapping = OrderedDict()
    mapping['tri'] = np.arange(new_pcloud.n_points)

    return new_pcloud, mapping
Exemplo n.º 15
0
def _construct_shape_type(points, trilist, tcoords, texture,
                          colour_per_vertex):
    # Four different outcomes - either we have a textured mesh, a coloured
    # mesh or just a plain mesh or we fall back to a plain pointcloud.
    if trilist is None:
        obj = PointCloud(points, copy=False)
    elif tcoords is not None and texture is not None:
        obj = TexturedTriMesh(points,
                              tcoords,
                              texture,
                              trilist=trilist,
                              copy=False)
    elif colour_per_vertex is not None:
        obj = ColouredTriMesh(points,
                              trilist=trilist,
                              colours=colour_per_vertex,
                              copy=False)
    else:
        # TriMesh fall through
        obj = TriMesh(points, trilist=trilist, copy=False)

    if tcoords is not None and texture is None:
        warnings.warn('tcoords were found, but no texture was recovered, '
                      'reverting to an untextured mesh.')
    if texture is not None and tcoords is None:
        warnings.warn('texture was found, but no tcoords were recovered, '
                      'reverting to an untextured mesh.')

    return obj
Exemplo n.º 16
0
def getTriMeshfromPly(path):
    data = []
    with open(path) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=' ')
        for row in csv_reader:
            data.append(row)

    flag = False
    points = []
    trilist = []
    count = 0
    for row in range(len(data)):
        if (data[row][0] == 'element') and (data[row][1] == 'vertex'):
            numOfVertices = int(data[row][2])
        if flag and count < numOfVertices:
            data[row][0] = "{0:.6f}".format(float(data[row][0]))
            data[row][1] = "{0:.6f}".format(float(data[row][1]))
            data[row][2] = "{0:.6f}".format(float(data[row][2]))
            points.append([
                float(data[row][0]),
                float(data[row][1]),
                float(data[row][2])
            ])
            count = count + 1
        elif flag and count >= numOfVertices:
            if data[row][0] == '3':
                trilist.append(
                    [int(data[row][1]),
                     int(data[row][2]),
                     int(data[row][3])])
        if (data[row][0] == 'end_header'):
            flag = True
    points_np = np.array(points)
    trilist_np = np.array(trilist)
    return TriMesh(points_np, trilist_np)
Exemplo n.º 17
0
def chain_tps_after_pwa_test():
    a_tm = TriMesh(np.random.random([10, 2]))
    b = PointCloud(np.random.random([10, 2]))
    pwa = PiecewiseAffine(a_tm, b)
    tps = ThinPlateSplines(b, a_tm)
    chain = tps.compose_after(pwa)
    assert(isinstance(chain, TransformChain))
Exemplo n.º 18
0
def test_trimesh_init_2d_grid():
    tm = TriMesh.init_2d_grid([10, 10])
    assert tm.n_points == 100
    assert tm.n_dims == 2
    # 162 = 9 * 9 * 2
    assert_allclose(tm.trilist.shape, (162, 3))
    assert_allclose(tm.range(), [9, 9])
Exemplo n.º 19
0
 def __init__(self, source, target):
     from menpo.shape import TriMesh  # to avoid circular import
     if not isinstance(source, TriMesh):
         source = TriMesh(source.points)
     Alignment.__init__(self, source, target)
     if self.n_dims != 2:
         raise ValueError("source and target must be 2 "
                          "dimensional")
Exemplo n.º 20
0
def test_trimesh_init_from_depth_image():
    fake_z = np.random.uniform(size=(10, 10))
    tm = TriMesh.init_from_depth_image(Image(fake_z))
    assert tm.n_points == 100
    assert tm.n_dims == 3
    assert_allclose(tm.range()[:2], [9, 9])
    assert tm.points[:, -1].max() <= 1.0
    assert tm.points[:, -1].min() >= 0.0
Exemplo n.º 21
0
def test_trimesh_vertex_normals():
    points = np.array([[0.0, 0.0, -1.0],
                       [1.0, 0.0, 0.0],
                       [1.0, 1.0, 0.0],
                       [0.0, 1.0, 0.0]])
    trilist = np.array([[0, 1, 3],
                        [1, 2, 3]])
    # 0 and 2 are the corner of the triangles and so the maintain the
    # face normals. The other two are the re-normalised vertices:
    # normalise(n0 + n2)
    expected_normals = np.array([[-np.sqrt(3)/3, -np.sqrt(3)/3, np.sqrt(3)/3],
                                 [-0.32505758,  -0.32505758, 0.88807383],
                                 [0, 0, 1],
                                 [-0.32505758,  -0.32505758, 0.88807383]])
    trimesh = TriMesh(points, trilist)
    vertex_normals = trimesh.vertex_normals()
    assert_allclose(vertex_normals, expected_normals)
Exemplo n.º 22
0
def test_trimesh_creation():
    points = np.array([[0, 0, 0],
                       [1, 0, 0],
                       [1, 1, 0],
                       [0, 1, 0]])
    trilist = np.array([[0, 1, 3],
                        [1, 2, 3]])
    TriMesh(points, trilist)
Exemplo n.º 23
0
def test_trimesh_init_from_depth_image():
    fake_z = np.random.uniform(size=(10, 10))
    tm = TriMesh.init_from_depth_image(Image(fake_z))
    assert tm.n_points == 100
    assert tm.n_dims == 3
    assert_allclose(tm.range()[:2], [9, 9])
    assert tm.points[:, -1].max() <= 1.0
    assert tm.points[:, -1].min() >= 0.0
Exemplo n.º 24
0
def test_trimesh_n_tris():
    points = np.array([[0, 0, 0],
                       [1, 0, 0],
                       [1, 1, 0],
                       [0, 1, 0]])
    trilist = np.array([[0, 1, 3],
                        [1, 2, 3]])
    trimesh = TriMesh(points, trilist)
    assert(trimesh.n_tris == 2)
Exemplo n.º 25
0
def test_trimesh_from_tri_mask():
    points = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]])
    trilist = np.array([[0, 1, 3], [1, 2, 3]])
    mask = np.zeros(2, dtype=np.bool)
    mask[0] = True
    trimesh = TriMesh(points, trilist=trilist).from_tri_mask(mask)
    assert (trimesh.n_tris == 1)
    assert (trimesh.n_points == 3)
    assert_allclose(trimesh.points, points[trilist[0]])
Exemplo n.º 26
0
def load_tassos_lsfm_combined_model(path):
    m = loadmat(str(path))
    mean = TriMesh(m['mean'].reshape([-1, 3]), trilist=m['trilist'])
    return {
        'shape_model': PCAModel.init_from_components(
            m['components'].T,  m['eigenvalues'].ravel(),
            mean, 8888, True),
        'n_id_comps': int(m['n_trunc_ids'][0][0]),
        'n_exp_comps': int(m['n_trunc_expressions'][0][0])
    }
Exemplo n.º 27
0
def test_trimesh_creation_copy_false():
    points = np.array([[0, 0, 0],
                       [1, 0, 0],
                       [1, 1, 0],
                       [0, 1, 0]])
    trilist = np.array([[0, 1, 3],
                        [1, 2, 3]])
    tm = TriMesh(points, trilist, copy=False)
    assert (is_same_array(tm.points, points))
    assert (is_same_array(tm.trilist, trilist))
Exemplo n.º 28
0
def generative_construct(DB, fitter, trilist, label=None,
    fit_group='mean', train_group='final',
    feature=igo,
    diagonal=200,
    scales=(0.5, 1.0),
    n_processes=24,
    model_class=HolisticAAM,
    increament_model=None,
    original_shape_model=None,
    shape_forgetting_factor=1.0,
    appearance_forgetting_factor=1.0,
    max_iters=10,

):
    # fix appearance optimize shape
    error = []

#       Multi Processs Computation
    frs = mp_fit(DB, fitter, group=fit_group, n_processes=n_processes, max_iters=max_iters)

    for fr, img in zip(frs, DB):
        img.landmarks[train_group] = TriMesh(fr.final_shape.points, trilist=trilist)
        error.append(fr.final_error(alignment_error))
        if label:
            img.landmarks[label] = TriMesh(fr.final_shape.points, trilist=trilist)

    del fitter
    if increament_model:
        pdm = copy.deepcopy(increament_model)
        pdm.increment(DB, verbose=True, group=train_group,
            shape_forgetting_factor=shape_forgetting_factor,
            appearance_forgetting_factor=appearance_forgetting_factor)
    else:
        pdm = model_class(DB, holistic_features=feature, diagonal=diagonal, scales=scales, verbose=True, group=train_group)

    if original_shape_model:
        pdm.shape_models = original_shape_model

    if increament_model:
        del increament_model

    return pdm, np.mean(error)
Exemplo n.º 29
0
def from_UV_2_3D(uv, uv_layout='oval', topology='full', plot=False):
    res = uv.shape[0]
    info_dict = import_uv_info(uv, res, uv_layout=uv_layout, topology=topology)

    tmask = info_dict['tmask']
    tc_ps = info_dict['tcoords_pixel_scaled']
    tmask_im = info_dict['tmask_image']
    trilist = info_dict['trilist']

    #uv = interpolaton_of_uv_xyz(uv,tmask).as_unmasked()
    x = uv.pixels[0][(tc_ps.points.astype(int).T[0, :],
                      tc_ps.points.astype(int).T[1, :])]
    y = uv.pixels[1][(tc_ps.points.astype(int).T[0, :],
                      tc_ps.points.astype(int).T[1, :])]
    z = uv.pixels[2][(tc_ps.points.astype(int).T[0, :],
                      tc_ps.points.astype(int).T[1, :])]
    points = np.hstack((x.T[:, None], y.T[:, None], z.T[:, None]))
    if plot is True:
        TriMesh(points, trilist).view()
    return TriMesh(points, trilist)
Exemplo n.º 30
0
def test_trimesh_init_from_depth_image_masked():
    fake_z = np.random.uniform(size=(10, 10))
    mask = np.zeros(fake_z.shape, dtype=np.bool)
    mask[2:6, 2:6] = True
    im = MaskedImage(fake_z, mask=mask)
    tm = TriMesh.init_from_depth_image(im)
    assert tm.n_points == 16
    assert tm.n_dims == 3
    assert_allclose(tm.range()[:2], [3, 3])
    assert tm.points[:, -1].max() <= 1.0
    assert tm.points[:, -1].min() >= 0.0
Exemplo n.º 31
0
def _construct_shape_type(points, trilist, tcoords, texture,
                          colour_per_vertex):
    r"""
    Construct the correct Shape subclass given the inputs. TexturedTriMesh
    can only be created when tcoords and texture are available. ColouredTriMesh
    can only be created when colour_per_vertex is non None and TriMesh
    can only be created when trilist is non None. Worst case fall back is
    PointCloud.

    Parameters
    ----------
    points : ``(N, D)`` `ndarray`
        The N-D points.
    trilist : ``(N, 3)`` `ndarray`` or ``None``
        Triangle list or None.
    tcoords : ``(N, 2)`` `ndarray` or ``None``
        Texture coordinates.
    texture : :map:`Image` or ``None``
        Texture.
    colour_per_vertex : ``(N, 1)`` or ``(N, 3)`` `ndarray` or ``None``
        The colour per vertex.

    Returns
    -------
    shape : :map:`PointCloud` or subclass
        The correct shape for the given inputs.
    """
    # Four different outcomes - either we have a textured mesh, a coloured
    # mesh or just a plain mesh or we fall back to a plain pointcloud.
    if trilist is None:
        obj = PointCloud(points, copy=False)
    elif tcoords is not None and texture is not None:
        obj = TexturedTriMesh(points,
                              tcoords,
                              texture,
                              trilist=trilist,
                              copy=False)
    elif colour_per_vertex is not None:
        obj = ColouredTriMesh(points,
                              trilist=trilist,
                              colours=colour_per_vertex,
                              copy=False)
    else:
        # TriMesh fall through
        obj = TriMesh(points, trilist=trilist, copy=False)

    if tcoords is not None and texture is None:
        warnings.warn('tcoords were found, but no texture was recovered, '
                      'reverting to an untextured mesh.')
    if texture is not None and tcoords is None:
        warnings.warn('texture was found, but no tcoords were recovered, '
                      'reverting to an untextured mesh.')

    return obj
Exemplo n.º 32
0
def test_trimesh_init_from_depth_image_masked():
    fake_z = np.random.uniform(size=(10, 10))
    mask = np.zeros(fake_z.shape, dtype=np.bool)
    mask[2:6, 2:6] = True
    im = MaskedImage(fake_z, mask=mask)
    tm = TriMesh.init_from_depth_image(im)
    assert tm.n_points == 16
    assert tm.n_dims == 3
    assert_allclose(tm.range()[:2], [3, 3])
    assert tm.points[:, -1].max() <= 1.0
    assert tm.points[:, -1].min() >= 0.0
Exemplo n.º 33
0
def _get_mean_model(model):
    """
    Get mean BFM mesh.

    Parameters:
        model (BFM dict model): BFM model

    Returns:
        mean_mesh (menpo.shape.mesh.base.TriMesh): mean mesh model
    """
    return TriMesh(points=np.reshape(model['shapeMU'], (-1, 3)),
                   trilist=model['tri'])
Exemplo n.º 34
0
def face_ibug_68_to_face_ibug_51_trimesh(pcloud):
    r"""
    Apply the IBUG 51-point semantic labels, with trimesh connectivity..

    The semantic labels applied are as follows:

      - tri

    References
    ----------
    .. [1] http://www.multipie.org/
    .. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
    """
    from menpo.shape import TriMesh

    # Apply face_ibug_68_to_face_ibug_51
    new_pcloud = face_ibug_68_to_face_ibug_51(pcloud)

    # This is in terms of the 51 points
    tri_list = np.array([[30, 12, 11], [27, 26, 6], [21, 3, 4], [30, 11, 25],
                         [32, 44, 43], [23, 24, 20], [20, 2, 3], [11, 23, 22],
                         [21, 4, 22], [32, 43, 31], [50, 42, 43], [44, 34, 45],
                         [35, 34, 16], [44, 50, 43], [35, 46,
                                                      34], [49, 39, 40],
                         [18, 13, 12], [36, 35, 18], [20, 19, 0], [1, 20, 0],
                         [20, 21, 23], [21, 20, 3], [2, 20, 1], [21, 22, 23],
                         [11, 12, 23], [24, 19, 20], [10, 22, 4], [13, 15, 14],
                         [16, 34, 33], [16, 13, 17], [14, 23,
                                                      12], [14, 24, 23],
                         [43, 42, 31], [14, 15, 33], [41, 50,
                                                      49], [41, 42, 50],
                         [49, 40, 41], [33, 44, 32], [45, 50,
                                                      44], [14, 33, 32],
                         [15, 16, 33], [13, 16, 15], [17, 35,
                                                      16], [18, 35, 17],
                         [36, 46, 35], [45, 46, 48], [45, 34,
                                                      46], [49, 48, 39],
                         [46, 36, 47], [45, 49, 50], [45, 48,
                                                      49], [48, 46, 47],
                         [39, 48, 38], [38, 47, 37], [38, 48,
                                                      47], [47, 36, 37],
                         [28, 29, 27], [18, 17, 13], [10, 11, 22],
                         [10, 25, 11], [18, 12, 30], [13, 14, 12], [26, 5, 6],
                         [10, 4, 5], [7, 27, 6], [27, 30, 26], [26, 30, 25],
                         [29, 18, 30], [9, 28, 27], [29, 30, 27], [8, 27, 7],
                         [8, 9, 27], [5, 25, 26], [33, 34, 44], [10, 5, 25]])

    new_pcloud = TriMesh(new_pcloud.points, trilist=tri_list, copy=False)

    mapping = OrderedDict()
    mapping['tri'] = np.arange(new_pcloud.n_points)

    return new_pcloud, mapping
Exemplo n.º 35
0
    def pseudoinverse(self):
        r"""
        The pseudoinverse of the transform - that is, the transform that
        results from swapping `source` and `target`, or more formally, negating
        the transforms parameters. If the transform has a true inverse this
        is returned instead.

        :type: ``type(self)``
        """
        from menpo.shape import PointCloud, TriMesh  # to avoid circular import
        new_source = TriMesh(self.target.points, self.source.trilist)
        new_target = PointCloud(self.source.points)
        return type(self)(new_source, new_target)
Exemplo n.º 36
0
def non_rigid_icp(source, target, eps=1e-3, stiffness_values=None,
                  verbose=False, landmarks=None, lm_weight=None):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range() ** 2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_values is not None:
        stiffness = stiffness_values
        if verbose:
            print('using user defined stiffness values: {}'.format(stiffness))
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print('using default stiffness values: {}'.format(stiffness))

    if lm_weight is not None:
        lm_weight = lm_weight
        if verbose:
            print('using user defined lm_weight values: {}'.format(lm_weight))
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        lm_weight = [5,  2, .5, 0,   0,   0,    0,    0]
        if verbose:
            print('using default lm_weight values: {}'.format(lm_weight))

    # to store per iteration information
    info = []

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(),
                     np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(),
                     x[:, n_dims]))

    if landmarks is not None:
        if verbose:
            print("'{}' landmarks will be used as a landmark constraint.".format(landmarks))
        source_lm_index = source.distance_to(
            source.landmarks[landmarks].lms).argmin(axis=0)
        target_lms = target.landmarks[landmarks].lms
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    o = np.ones(n)

    for alpha, beta in zip(stiffness, lm_weight):
        alpha_M_kron_G_s = alpha * M_kron_G_s  # get the term for stiffness
        j = 0
        while True:  # iterate until convergence
            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist, copy=False)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            w_i = np.logical_and(w_i_n, w_i_e)
            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n, w_i_e), w_i_i)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n
            j = j + 1

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            # nullify the masked U values
            U[~w_i] = 0

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)), U]

            if landmarks:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i = D_s.dot(X)
            err = np.linalg.norm(X_prev - X, ord='fro')

            if landmarks is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L) ** 2).sum(axis=1).mean()

            if verbose:
                v_str = ('a: {}, ({}) - total : {:.0%} norms: {:.0%} '
                         'edges: {:.0%}'.format(alpha, j, prop_w_i,
                                                prop_w_i_n, prop_w_i_e))
                if landmarks is not None:
                    v_str += ' beta: {}, lm_err: {:.5f}'.format(beta, lm_err)

                print(v_str)

            info_dict = {
                'alpha': alpha,
                'iteration': j + 1,
                'prop_omitted': prop_w_i,
                'prop_omitted_norms': prop_w_i_n,
                'prop_omitted_edges': prop_w_i_e,
                'delta': err
            }
            if landmarks:
                info_dict['beta'] = beta
                info_dict['lm_err'] = lm_err
            info.append(info_dict)
            X_prev = X

            if err / np.sqrt(np.size(X_prev)) < eps:
                break

    # final result if we choose closest points
    point_corr = closest_points_on_target(v_i)[0]

    result = {
        'deformed_source': restore.apply(v_i),
        'matched_target': restore.apply(point_corr),
        'matched_tri_indices': tri_indices,
        'info': info
    }

    if landmarks is not None:
        result['source_lm_index'] = source_lm_index

    return result
Exemplo n.º 37
0
def non_rigid_icp_generator(source, target, eps=1e-3,
                            stiffness_weights=None, data_weights=None,
                            landmark_group=None, landmark_weights=None,
                            v_i_update_func=None, verbose=False):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # If landmarks are provided, we should always start with a simple
    # AlignmentSimilarity between the landmarks to initialize optimally.
    if landmark_group is not None:
        if verbose:
            print("'{}' landmarks will be used as "
                  "a landmark constraint.".format(landmark_group))
            print("performing similarity alignment using landmarks")
        lm_align = AlignmentSimilarity(source.landmarks[landmark_group],
                                       target.landmarks[landmark_group]).as_non_alignment()
        source = lm_align.apply(source)

    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range() ** 2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s, unique_edge_pairs = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_weights is not None:
        if verbose:
            print('using user-defined stiffness_weights')
        validate_weights('stiffness_weights', stiffness_weights,
                         source.n_points, verbose=verbose)
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print('using default '
                  'stiffness_weights: {}'.format(stiffness_weights))

    n_iterations = len(stiffness_weights)

    if landmark_weights is not None:
        if verbose:
            print('using user defined '
                  'landmark_weights: {}'.format(landmark_weights))
    elif landmark_group is not None:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        landmark_weights = [5, 2, .5, 0, 0, 0, 0, 0]
        if verbose:
            print('using default '
                  'landmark_weights: {}'.format(landmark_weights))
    else:
        # no landmark_weights provided - no landmark_group in use. We still
        # need a landmark group for the iterator
        landmark_weights = [None] * n_iterations

    # We should definitely have some landmark weights set now - check the
    # number is correct.
    # Note we say verbose=False, as we have done custom reporting above, and
    # per-vertex landmarks are not supported.
    validate_weights('landmark_weights', landmark_weights, source.n_points,
                     n_iterations=n_iterations, verbose=False)

    if data_weights is not None:
        if verbose:
            print('using user-defined data_weights')
        validate_weights('data_weights', data_weights,
                         source.n_points, n_iterations=n_iterations,
                         verbose=verbose)
    else:
        data_weights = [None] * n_iterations
        if verbose:
            print('Not customising data_weights')

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(),
                     np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(),
                     x[:, n_dims]))
    o = np.ones(n)

    if landmark_group is not None:
        source_lm_index = source.distance_to(
            source.landmarks[landmark_group]).argmin(axis=0)
        target_lms = target.landmarks[landmark_group]
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    for i, (alpha, beta, gamma) in enumerate(zip(stiffness_weights,
                                                 landmark_weights,
                                                 data_weights), 1):
        alpha_is_per_vertex = isinstance(alpha, np.ndarray)
        if alpha_is_per_vertex:
            # stiffness is provided per-vertex
            if alpha.shape[0] != source.n_points:
                raise ValueError()
            alpha_per_edge = alpha[unique_edge_pairs].mean(axis=1)
            alpha_M_s = sp.diags(alpha_per_edge).dot(M_s)
            alpha_M_kron_G_s = sp.kron(alpha_M_s, G)
        else:
            # stiffness is global - just a scalar multiply. Note that here
            # we don't have to recalculate M_kron_G_s
            alpha_M_kron_G_s = alpha * M_kron_G_s

        if verbose:
            a_str = (alpha if not alpha_is_per_vertex
                     else 'min: {:.2f}, max: {:.2f}'.format(alpha.min(),
                                                            alpha.max()))
            i_str = '{}/{}: stiffness: {}'.format(i, len(stiffness_weights), a_str)
            if landmark_group is not None:
                i_str += '  lm_weight: {}'.format(beta)
            print(i_str)

        j = 0
        while True:  # iterate until convergence
            j += 1  # track the iterations for this alpha/landmark weight

            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            # for now disable the edge constraint (it was noisy anyway)
            w_i = w_i_n

            # w_i = np.logical_and(w_i_n, w_i_e).astype(np.float)

            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n,
            #                                     w_i_e,
            #                                     w_i_i).astype(np.float)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n

            if data_weights is not None:
                w_i = w_i * gamma

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)),
                          U * w_i[:, None]]  # nullify nearest points by w_i

            if landmark_group is not None:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i_prev = v_i
            v_i = D_s.dot(X)
            delta_v_i = v_i - v_i_prev

            if v_i_update_func:
                # custom logic is provided to update the current template
                # deformation. This is typically used by Active NICP.

                # take the v_i points matrix and convert back to a TriMesh in
                # the original space
                def_template = restore.apply(source.from_vector(v_i.ravel()))

                # perform the update
                updated_def_template = v_i_update_func(def_template)

                # convert back to points in the NICP space
                v_i = prepare.apply(updated_def_template.points)

            err = np.linalg.norm(X_prev - X, ord='fro')
            stop_criterion = err / np.sqrt(np.size(X_prev))

            if landmark_group is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L) ** 2).sum(axis=1).mean()

            if verbose:
                v_str = (' - {} stop crit: {:.3f}  '
                         'total: {:.0%}  norms: {:.0%}  '
                         'edges: {:.0%}'.format(j, stop_criterion,
                                                prop_w_i, prop_w_i_n,
                                                prop_w_i_e))
                if landmark_group is not None:
                    v_str += '  lm_err: {:.4f}'.format(lm_err)

                print(v_str)

            X_prev = X

            # track the progress of the algorithm per-iteration
            info_dict = {
                'alpha': alpha,
                'iteration': j,
                'prop_omitted': prop_w_i,
                'prop_omitted_norms': prop_w_i_n,
                'prop_omitted_edges': prop_w_i_e,
                'delta': err,
                'mask_normals': w_i_n,
                'mask_edges': w_i_e,
                'mask_all': w_i,
                'nearest_points': restore.apply(U),
                'deformation_per_step': delta_v_i
            }

            current_instance = source.copy()
            current_instance.points = v_i.copy()

            if landmark_group:
                info_dict['beta'] = beta
                info_dict['lm_err'] = lm_err
                current_instance.landmarks[landmark_group] = PointCloud(src_lms)

            yield restore.apply(current_instance), info_dict

            if stop_criterion < eps:
                break
def test_2d_trimesh_2d_positive_areas():
    t = TriMesh(np.array([[0, 0], [0, 1],
                          [1, 1], [1, 0]], dtype=np.float),
                trilist=np.array([[0, 2, 3], [0, 2, 1]]))
    assert np.all(t.tri_areas() > 0)