Exemple #1
0
    def non_rigid_icp(self, source, target):
        """
        Non-rigid icp algorithm ignoring landmarks.

        Parameters:
            source (menpo.shape.mesh.base.TriMesh): source mesh to be transformed
            target (menpo.shape.mesh.base.TriMesh): target mesh as the base

        Returns:
            transformed_mesh (menpo.shape.mesh.base.TriMesh): transformed source mesh
            training_info (dict): containing 3 lists of loss/regularized_err/err while training
        """
        start_time = time.time()

        # one more mesh file processed
        NonRigidIcp._mesh_counter += 1

        n_dims = source.n_dims
        transformed_mesh = source

        M, unique_edge_pairs = self._node_arc_incidence_matrix(source)

        # weight matrix
        G = np.identity(n_dims + 1)

        M_kron_G = sp.kron(M, G)

        # build octree for finding closest points on target.
        target_vtk = trimesh_to_vtk(target)
        closest_points_on_target = VTKClosestPointLocator(target_vtk)

        # log
        training_info = {'loss': [], 'regularized_loss': []}

        for i, (alpha, gamma) in enumerate(zip(self.stiffness_weights, self.data_weights), 1):
            if self.verbose:
                print("Epoch " + str(i) + " with stiffness " + str(alpha))
            transformed_mesh, err_info = self._non_rigid_icp_iter(transformed_mesh, target, closest_points_on_target,
                                                                  M_kron_G, alpha, gamma)
            for k in training_info.keys():
                training_info[k] += err_info[k]

        end_time = time.time()
        mesh_training_time = end_time - start_time
        if NonRigidIcp._num_of_meshes is not None:
            self._expected_remaining_time = str(datetime.timedelta(seconds=mesh_training_time * (NonRigidIcp._num_of_meshes - NonRigidIcp._mesh_counter)))
        else:
            self._expected_remaining_time = str(mesh_training_time) + " x # of mesh files"

        if self.verbose:
            print("average loss: {:.3f}\naverage regularized loss: {:.3f}\nexpected remaining time: {}"
                  .format(np.mean(training_info['loss']),
                          np.mean(training_info['regularized_loss']),
                          self._expected_remaining_time
                          )
                  )

        return transformed_mesh, training_info
Exemple #2
0
def calculate_dense_error(fit_3d_aligned, gt_mesh):
    fit_vtk = trimesh_to_vtk(fit_3d_aligned)
    closest_points_on_fit = VTKClosestPointLocator(fit_vtk)
    nearest_points, tri_indices = closest_points_on_fit(gt_mesh.points)
    err_per_vertex = np.sqrt(
        np.sum((nearest_points - gt_mesh.points)**2, axis=1))

    # normalize by inter-oc
    b, a = gt_mesh.landmarks['eye_corners'].lms.points
    inter_occ_distance = np.sqrt(((a - b)**2).sum())
    print('norm: {}'.format(inter_occ_distance))
    return err_per_vertex / inter_occ_distance
def per_vertex_occlusion_accurate(mesh):
    from menpo3d.vtkutils import trimesh_to_vtk
    import vtk
    tol = mesh.mean_edge_length() / 1000
    min_, max_ = mesh.bounds()
    z_min = min_[-1] - 10
    z_max = max_[-1] + 10

    ray_start = mesh.points.copy()
    ray_end = mesh.points.copy()
    points = mesh.points
    ray_start[:, 2] = z_min
    ray_end[:, 2] = z_max


    vtk_mesh = trimesh_to_vtk(mesh)

    obbTree = vtk.vtkOBBTree()
    obbTree.SetDataSet(vtk_mesh)
    obbTree.BuildLocator()


    vtk_points = vtk.vtkPoints()
    vtk_cellIds = vtk.vtkIdList()
    bad_val = tuple(ray_start[0])
    first_intersects = []
    for start, end, point in zip(ray_start, ray_end, points):
        start = tuple(start)
        end = tuple(end)
        obbTree.IntersectWithLine(start, end, vtk_points, vtk_cellIds)
        data = vtk_points.GetData()
        break
    for start, end, point in zip(ray_start, ray_end, points):
        start = tuple(start)
        end = tuple(end)
        #obbTree.IntersectWithLine(start, end, vtk_points, vtk_cellIds)
        data = vtk_points.GetData()
        if data.GetNumberOfTuples() > 0:
            first_intersects.append(data.GetTuple3(0))
        else:
            first_intersects.append(bad_val)

    visible = np.linalg.norm(points - np.array(first_intersects), axis=1) < tol
    return visible
Exemple #4
0
def per_vertex_occlusion_accurate(mesh):
    from menpo3d.vtkutils import trimesh_to_vtk
    import vtk
    tol = mesh.mean_edge_length() / 1000
    min_, max_ = mesh.bounds()
    z_min = min_[-1] - 10
    z_max = max_[-1] + 10

    ray_start = mesh.points.copy()
    ray_end = mesh.points.copy()
    points = mesh.points
    ray_start[:, 2] = z_min
    ray_end[:, 2] = z_max


    vtk_mesh = trimesh_to_vtk(mesh)

    obbTree = vtk.vtkOBBTree()
    obbTree.SetDataSet(vtk_mesh)
    obbTree.BuildLocator()


    vtk_points = vtk.vtkPoints()
    vtk_cellIds = vtk.vtkIdList()
    bad_val = tuple(ray_start[0])
    first_intersects = []
    for start, end, point in zip(ray_start, ray_end, points):
        start = tuple(start)
        end = tuple(end)
        obbTree.IntersectWithLine(start, end, vtk_points, vtk_cellIds)
        data = vtk_points.GetData()
        break
    for start, end, point in zip(ray_start, ray_end, points):
        start = tuple(start)
        end = tuple(end)
        #obbTree.IntersectWithLine(start, end, vtk_points, vtk_cellIds)
        data = vtk_points.GetData()
        if data.GetNumberOfTuples() > 0:
            first_intersects.append(data.GetTuple3(0))
        else:
            first_intersects.append(bad_val)

    visible = np.linalg.norm(points - np.array(first_intersects), axis=1) < tol
    return visible
Exemple #5
0
def non_rigid_icp_generator(
    source,
    target,
    eps=1e-3,
    stiffness_weights=None,
    data_weights=None,
    landmark_group=None,
    landmark_weights=None,
    v_i_update_func=None,
    verbose=False,
):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # If landmarks are provided, we should always start with a simple
    # AlignmentSimilarity between the landmarks to initialize optimally.
    if landmark_group is not None:
        if verbose:
            print("'{}' landmarks will be used as "
                  "a landmark constraint.".format(landmark_group))
            print("performing similarity alignment using landmarks")
        lm_align = AlignmentSimilarity(
            source.landmarks[landmark_group],
            target.landmarks[landmark_group]).as_non_alignment()
        source = lm_align.apply(source)

    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range()**2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s, unique_edge_pairs = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_weights is not None:
        if verbose:
            print("using user-defined stiffness_weights")
        validate_weights("stiffness_weights",
                         stiffness_weights,
                         source.n_points,
                         verbose=verbose)
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print("using default "
                  "stiffness_weights: {}".format(stiffness_weights))

    n_iterations = len(stiffness_weights)

    if landmark_weights is not None:
        if verbose:
            print("using user defined "
                  "landmark_weights: {}".format(landmark_weights))
    elif landmark_group is not None:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        landmark_weights = [5, 2, 0.5, 0, 0, 0, 0, 0]
        if verbose:
            print("using default "
                  "landmark_weights: {}".format(landmark_weights))
    else:
        # no landmark_weights provided - no landmark_group in use. We still
        # need a landmark group for the iterator
        landmark_weights = [None] * n_iterations

    # We should definitely have some landmark weights set now - check the
    # number is correct.
    # Note we say verbose=False, as we have done custom reporting above, and
    # per-vertex landmarks are not supported.
    validate_weights(
        "landmark_weights",
        landmark_weights,
        source.n_points,
        n_iterations=n_iterations,
        verbose=False,
    )

    if data_weights is not None:
        if verbose:
            print("using user-defined data_weights")
        validate_weights(
            "data_weights",
            data_weights,
            source.n_points,
            n_iterations=n_iterations,
            verbose=verbose,
        )
    else:
        data_weights = [None] * n_iterations
        if verbose:
            print("Not customising data_weights")

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims,
                               axis=1).ravel(), np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(), x[:, n_dims]))
    o = np.ones(n)

    if landmark_group is not None:
        source_lm_index = source.distance_to(
            source.landmarks[landmark_group]).argmin(axis=0)
        target_lms = target.landmarks[landmark_group]
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    for i, (alpha, beta, gamma) in enumerate(
            zip(stiffness_weights, landmark_weights, data_weights), 1):
        alpha_is_per_vertex = isinstance(alpha, np.ndarray)
        if alpha_is_per_vertex:
            # stiffness is provided per-vertex
            if alpha.shape[0] != source.n_points:
                raise ValueError()
            alpha_per_edge = alpha[unique_edge_pairs].mean(axis=1)
            alpha_M_s = sp.diags(alpha_per_edge).dot(M_s)
            alpha_M_kron_G_s = sp.kron(alpha_M_s, G)
        else:
            # stiffness is global - just a scalar multiply. Note that here
            # we don't have to recalculate M_kron_G_s
            alpha_M_kron_G_s = alpha * M_kron_G_s

        if verbose:
            a_str = (alpha if not alpha_is_per_vertex
                     else "min: {:.2f}, max: {:.2f}".format(
                         alpha.min(), alpha.max()))
            i_str = "{}/{}: stiffness: {}".format(i, len(stiffness_weights),
                                                  a_str)
            if landmark_group is not None:
                i_str += "  lm_weight: {}".format(beta)
            print(i_str)

        j = 0
        while True:  # iterate until convergence
            j += 1  # track the iterations for this alpha/landmark weight

            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist, copy=False)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            # for now disable the edge constraint (it was noisy anyway)
            w_i = w_i_n

            # w_i = np.logical_and(w_i_n, w_i_e).astype(np.float)

            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n,
            #                                     w_i_e,
            #                                     w_i_i).astype(np.float)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n

            if gamma is not None:
                w_i = w_i * gamma

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [
                np.zeros((alpha_M_kron_G_s.shape[0], n_dims)),
                U * w_i[:, None],
            ]  # nullify nearest points by w_i

            if landmark_group is not None:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i_prev = v_i
            v_i = D_s.dot(X)
            delta_v_i = v_i - v_i_prev

            if v_i_update_func:
                # custom logic is provided to update the current template
                # deformation. This is typically used by Active NICP.

                # take the v_i points matrix and convert back to a TriMesh in
                # the original space
                def_template = restore.apply(source.from_vector(v_i.ravel()))

                # perform the update
                updated_def_template = v_i_update_func(def_template)

                # convert back to points in the NICP space
                v_i = prepare.apply(updated_def_template.points)

            err = np.linalg.norm(X_prev - X, ord="fro")
            stop_criterion = err / np.sqrt(np.size(X_prev))

            if landmark_group is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L)**2).sum(axis=1).mean()

            if verbose:
                v_str = (" - {} stop crit: {:.3f}  "
                         "total: {:.0%}  norms: {:.0%}  "
                         "edges: {:.0%}".format(j, stop_criterion, prop_w_i,
                                                prop_w_i_n, prop_w_i_e))
                if landmark_group is not None:
                    v_str += "  lm_err: {:.4f}".format(lm_err)

                print(v_str)

            X_prev = X

            # track the progress of the algorithm per-iteration
            info_dict = {
                "alpha": alpha,
                "iteration": j,
                "prop_omitted": prop_w_i,
                "prop_omitted_norms": prop_w_i_n,
                "prop_omitted_edges": prop_w_i_e,
                "delta": err,
                "mask_normals": w_i_n,
                "mask_edges": w_i_e,
                "mask_all": w_i,
                "nearest_points": restore.apply(U),
                "deformation_per_step": delta_v_i,
            }

            current_instance = source.copy()
            current_instance.points = v_i.copy()

            if landmark_group:
                info_dict["beta"] = beta
                info_dict["lm_err"] = lm_err
                current_instance.landmarks[landmark_group] = PointCloud(
                    src_lms)

            yield restore.apply(current_instance), info_dict

            if stop_criterion < eps:
                break
Exemple #6
0
def test_trimesh_to_vtk_and_back_is_same():
    bunny = menpo3d.io.import_builtin_asset.bunny_obj()
    bunny_vtk = trimesh_to_vtk(bunny)
    bunny_back = trimesh_from_vtk(bunny_vtk)
    assert_allclose(bunny.points, bunny_back.points)
    assert np.all(bunny.trilist == bunny_back.trilist)
Exemple #7
0
def test_trimesh_to_vtk_fails_on_2d_mesh():
    points = np.random.random((5, 2))
    test_mesh = TriMesh(points)
    trimesh_to_vtk(test_mesh)
Exemple #8
0
def non_rigid_icp_generator(source, target, eps=1e-3,
                            stiffness_weights=None, data_weights=None,
                            landmark_group=None, landmark_weights=None,
                            v_i_update_func=None, verbose=False):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # If landmarks are provided, we should always start with a simple
    # AlignmentSimilarity between the landmarks to initialize optimally.
    if landmark_group is not None:
        if verbose:
            print("'{}' landmarks will be used as "
                  "a landmark constraint.".format(landmark_group))
            print("performing similarity alignment using landmarks")
        lm_align = AlignmentSimilarity(source.landmarks[landmark_group],
                                       target.landmarks[landmark_group]).as_non_alignment()
        source = lm_align.apply(source)

    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range() ** 2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s, unique_edge_pairs = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_weights is not None:
        if verbose:
            print('using user-defined stiffness_weights')
        validate_weights('stiffness_weights', stiffness_weights,
                         source.n_points, verbose=verbose)
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print('using default '
                  'stiffness_weights: {}'.format(stiffness_weights))

    n_iterations = len(stiffness_weights)

    if landmark_weights is not None:
        if verbose:
            print('using user defined '
                  'landmark_weights: {}'.format(landmark_weights))
    elif landmark_group is not None:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        landmark_weights = [5, 2, .5, 0, 0, 0, 0, 0]
        if verbose:
            print('using default '
                  'landmark_weights: {}'.format(landmark_weights))
    else:
        # no landmark_weights provided - no landmark_group in use. We still
        # need a landmark group for the iterator
        landmark_weights = [None] * n_iterations

    # We should definitely have some landmark weights set now - check the
    # number is correct.
    # Note we say verbose=False, as we have done custom reporting above, and
    # per-vertex landmarks are not supported.
    validate_weights('landmark_weights', landmark_weights, source.n_points,
                     n_iterations=n_iterations, verbose=False)

    if data_weights is not None:
        if verbose:
            print('using user-defined data_weights')
        validate_weights('data_weights', data_weights,
                         source.n_points, n_iterations=n_iterations,
                         verbose=verbose)
    else:
        data_weights = [None] * n_iterations
        if verbose:
            print('Not customising data_weights')

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(),
                     np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(),
                     x[:, n_dims]))
    o = np.ones(n)

    if landmark_group is not None:
        source_lm_index = source.distance_to(
            source.landmarks[landmark_group]).argmin(axis=0)
        target_lms = target.landmarks[landmark_group]
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    for i, (alpha, beta, gamma) in enumerate(zip(stiffness_weights,
                                                 landmark_weights,
                                                 data_weights), 1):
        alpha_is_per_vertex = isinstance(alpha, np.ndarray)
        if alpha_is_per_vertex:
            # stiffness is provided per-vertex
            if alpha.shape[0] != source.n_points:
                raise ValueError()
            alpha_per_edge = alpha[unique_edge_pairs].mean(axis=1)
            alpha_M_s = sp.diags(alpha_per_edge).dot(M_s)
            alpha_M_kron_G_s = sp.kron(alpha_M_s, G)
        else:
            # stiffness is global - just a scalar multiply. Note that here
            # we don't have to recalculate M_kron_G_s
            alpha_M_kron_G_s = alpha * M_kron_G_s

        if verbose:
            a_str = (alpha if not alpha_is_per_vertex
                     else 'min: {:.2f}, max: {:.2f}'.format(alpha.min(),
                                                            alpha.max()))
            i_str = '{}/{}: stiffness: {}'.format(i, len(stiffness_weights), a_str)
            if landmark_group is not None:
                i_str += '  lm_weight: {}'.format(beta)
            print(i_str)

        j = 0
        while True:  # iterate until convergence
            j += 1  # track the iterations for this alpha/landmark weight

            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            # for now disable the edge constraint (it was noisy anyway)
            w_i = w_i_n

            # w_i = np.logical_and(w_i_n, w_i_e).astype(np.float)

            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n,
            #                                     w_i_e,
            #                                     w_i_i).astype(np.float)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n

            if data_weights is not None:
                w_i = w_i * gamma

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)),
                          U * w_i[:, None]]  # nullify nearest points by w_i

            if landmark_group is not None:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i_prev = v_i
            v_i = D_s.dot(X)
            delta_v_i = v_i - v_i_prev

            if v_i_update_func:
                # custom logic is provided to update the current template
                # deformation. This is typically used by Active NICP.

                # take the v_i points matrix and convert back to a TriMesh in
                # the original space
                def_template = restore.apply(source.from_vector(v_i.ravel()))

                # perform the update
                updated_def_template = v_i_update_func(def_template)

                # convert back to points in the NICP space
                v_i = prepare.apply(updated_def_template.points)

            err = np.linalg.norm(X_prev - X, ord='fro')
            stop_criterion = err / np.sqrt(np.size(X_prev))

            if landmark_group is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L) ** 2).sum(axis=1).mean()

            if verbose:
                v_str = (' - {} stop crit: {:.3f}  '
                         'total: {:.0%}  norms: {:.0%}  '
                         'edges: {:.0%}'.format(j, stop_criterion,
                                                prop_w_i, prop_w_i_n,
                                                prop_w_i_e))
                if landmark_group is not None:
                    v_str += '  lm_err: {:.4f}'.format(lm_err)

                print(v_str)

            X_prev = X

            # track the progress of the algorithm per-iteration
            info_dict = {
                'alpha': alpha,
                'iteration': j,
                'prop_omitted': prop_w_i,
                'prop_omitted_norms': prop_w_i_n,
                'prop_omitted_edges': prop_w_i_e,
                'delta': err,
                'mask_normals': w_i_n,
                'mask_edges': w_i_e,
                'mask_all': w_i,
                'nearest_points': restore.apply(U),
                'deformation_per_step': delta_v_i
            }

            current_instance = source.copy()
            current_instance.points = v_i.copy()

            if landmark_group:
                info_dict['beta'] = beta
                info_dict['lm_err'] = lm_err
                current_instance.landmarks[landmark_group] = PointCloud(src_lms)

            yield restore.apply(current_instance), info_dict

            if stop_criterion < eps:
                break
Exemple #9
0
def test_trimesh_to_vtk_and_back_is_same():
    bunny = menpo3d.io.import_builtin_asset.bunny_obj()
    bunny_vtk = trimesh_to_vtk(bunny)
    bunny_back = trimesh_from_vtk(bunny_vtk)
    assert_allclose(bunny.points, bunny_back.points)
    assert np.all(bunny.trilist == bunny_back.trilist)
Exemple #10
0
def test_trimesh_to_vtk_fails_on_2d_mesh():
    points = np.random.random((5, 2))
    test_mesh = TriMesh(points)
    trimesh_to_vtk(test_mesh)
Exemple #11
0
def non_rigid_icp(source, target, eps=1e-3, stiffness_values=None,
                  verbose=False, landmarks=None, lm_weight=None):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range() ** 2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_values is not None:
        stiffness = stiffness_values
        if verbose:
            print('using user defined stiffness values: {}'.format(stiffness))
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print('using default stiffness values: {}'.format(stiffness))

    if lm_weight is not None:
        lm_weight = lm_weight
        if verbose:
            print('using user defined lm_weight values: {}'.format(lm_weight))
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        lm_weight = [5,  2, .5, 0,   0,   0,    0,    0]
        if verbose:
            print('using default lm_weight values: {}'.format(lm_weight))

    # to store per iteration information
    info = []

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(),
                     np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(),
                     x[:, n_dims]))

    if landmarks is not None:
        if verbose:
            print("'{}' landmarks will be used as a landmark constraint.".format(landmarks))
        source_lm_index = source.distance_to(
            source.landmarks[landmarks].lms).argmin(axis=0)
        target_lms = target.landmarks[landmarks].lms
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    o = np.ones(n)

    for alpha, beta in zip(stiffness, lm_weight):
        alpha_M_kron_G_s = alpha * M_kron_G_s  # get the term for stiffness
        j = 0
        while True:  # iterate until convergence
            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist, copy=False)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            w_i = np.logical_and(w_i_n, w_i_e)
            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n, w_i_e), w_i_i)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n
            j = j + 1

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            # nullify the masked U values
            U[~w_i] = 0

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)), U]

            if landmarks:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i = D_s.dot(X)
            err = np.linalg.norm(X_prev - X, ord='fro')

            if landmarks is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L) ** 2).sum(axis=1).mean()

            if verbose:
                v_str = ('a: {}, ({}) - total : {:.0%} norms: {:.0%} '
                         'edges: {:.0%}'.format(alpha, j, prop_w_i,
                                                prop_w_i_n, prop_w_i_e))
                if landmarks is not None:
                    v_str += ' beta: {}, lm_err: {:.5f}'.format(beta, lm_err)

                print(v_str)

            info_dict = {
                'alpha': alpha,
                'iteration': j + 1,
                'prop_omitted': prop_w_i,
                'prop_omitted_norms': prop_w_i_n,
                'prop_omitted_edges': prop_w_i_e,
                'delta': err
            }
            if landmarks:
                info_dict['beta'] = beta
                info_dict['lm_err'] = lm_err
            info.append(info_dict)
            X_prev = X

            if err / np.sqrt(np.size(X_prev)) < eps:
                break

    # final result if we choose closest points
    point_corr = closest_points_on_target(v_i)[0]

    result = {
        'deformed_source': restore.apply(v_i),
        'matched_target': restore.apply(point_corr),
        'matched_tri_indices': tri_indices,
        'info': info
    }

    if landmarks is not None:
        result['source_lm_index'] = source_lm_index

    return result
Exemple #12
0
def non_rigid_icp(source,
                  target,
                  eps=1e-3,
                  stiffness_values=None,
                  verbose=False,
                  landmarks=None,
                  lm_weight=None):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range()**2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_values is not None:
        stiffness = stiffness_values
        if verbose:
            print('using user defined stiffness values: {}'.format(stiffness))
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print('using default stiffness values: {}'.format(stiffness))

    if lm_weight is not None:
        lm_weight = lm_weight
        if verbose:
            print('using user defined lm_weight values: {}'.format(lm_weight))
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        lm_weight = [5, 2, .5, 0, 0, 0, 0, 0]
        if verbose:
            print('using default lm_weight values: {}'.format(lm_weight))

    # to store per iteration information
    info = []

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims,
                               axis=1).ravel(), np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(), x[:, n_dims]))

    if landmarks is not None:
        if verbose:
            print(
                "'{}' landmarks will be used as a landmark constraint.".format(
                    landmarks))
        source_lm_index = source.distance_to(
            source.landmarks[landmarks].lms).argmin(axis=0)
        target_lms = target.landmarks[landmarks].lms
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    o = np.ones(n)

    for alpha, beta in zip(stiffness, lm_weight):
        alpha_M_kron_G_s = alpha * M_kron_G_s  # get the term for stiffness
        j = 0
        while True:  # iterate until convergence
            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist, copy=False)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            w_i = np.logical_and(w_i_n, w_i_e)
            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n, w_i_e), w_i_i)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n
            j = j + 1

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            # nullify the masked U values
            U[~w_i] = 0

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)), U]

            if landmarks:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i = D_s.dot(X)
            err = np.linalg.norm(X_prev - X, ord='fro')

            if landmarks is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L)**2).sum(axis=1).mean()

            if verbose:
                v_str = ('a: {}, ({}) - total : {:.0%} norms: {:.0%} '
                         'edges: {:.0%}'.format(alpha, j, prop_w_i, prop_w_i_n,
                                                prop_w_i_e))
                if landmarks is not None:
                    v_str += ' beta: {}, lm_err: {:.5f}'.format(beta, lm_err)

                print(v_str)

            info_dict = {
                'alpha': alpha,
                'iteration': j + 1,
                'prop_omitted': prop_w_i,
                'prop_omitted_norms': prop_w_i_n,
                'prop_omitted_edges': prop_w_i_e,
                'delta': err
            }
            if landmarks:
                info_dict['beta'] = beta
                info_dict['lm_err'] = lm_err
            info.append(info_dict)
            X_prev = X

            if err / np.sqrt(np.size(X_prev)) < eps:
                break

    # final result if we choose closest points
    point_corr = closest_points_on_target(v_i)[0]

    result = {
        'deformed_source': restore.apply(v_i),
        'matched_target': restore.apply(point_corr),
        'matched_tri_indices': tri_indices,
        'info': info
    }

    if landmarks is not None:
        result['source_lm_index'] = source_lm_index

    return result
Exemple #13
0
def test_trimesh_to_vtk_fails_on_2d_mesh():
    with raises(ValueError):
        points = np.random.random((5, 2))
        test_mesh = TriMesh(points)
        trimesh_to_vtk(test_mesh)