Beispiel #1
0
def noisy_alignment_similarity_transform(source, target, noise_type='uniform',
                                         noise_percentage=0.1,
                                         allow_alignment_rotation=False):
    r"""
    Constructs and perturbs the optimal similarity transform between the source
    and target shapes by adding noise to its parameters.

    Parameters
    ----------
    source : `menpo.shape.PointCloud`
        The source pointcloud instance used in the alignment
    target : `menpo.shape.PointCloud`
        The target pointcloud instance used in the alignment
    noise_type : ``{'uniform', 'gaussian'}``, optional
        The type of noise to be added.
    noise_percentage : `float` in ``(0, 1)`` or `list` of `len` `3`, optional
        The standard percentage of noise to be added. If `float`, then the same
        amount of noise is applied to the scale, rotation and translation
        parameters of the optimal similarity transform. If `list` of
        `float` it must have length 3, where the first, second and third elements
        denote the amount of noise to be applied to the scale, rotation and
        translation parameters, respectively.
    allow_alignment_rotation : `bool`, optional
        If ``False``, then the rotation is not considered when computing the
        optimal similarity transform between source and target.

    Returns
    -------
    noisy_alignment_similarity_transform : `menpo.transform.Similarity`
        The noisy Similarity Transform between source and target.
    """
    if isinstance(noise_percentage, float):
        noise_percentage = [noise_percentage] * 3
    elif len(noise_percentage) == 1:
        noise_percentage *= 3

    similarity = AlignmentSimilarity(source, target,
                                     rotation=allow_alignment_rotation)

    if noise_type is 'gaussian':
        s = noise_percentage[0] * (0.5 / 3) * np.asscalar(np.random.randn(1))
        r = noise_percentage[1] * (180 / 3) * np.asscalar(np.random.randn(1))
        t = noise_percentage[2] * (target.range() / 3) * np.random.randn(2)

        s = scale_about_centre(target, 1 + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    elif noise_type is 'uniform':
        s = noise_percentage[0] * 0.5 * (2 * np.asscalar(np.random.randn(1)) - 1)
        r = noise_percentage[1] * 180 * (2 * np.asscalar(np.random.rand(1)) - 1)
        t = noise_percentage[2] * target.range() * (2 * np.random.rand(2) - 1)

        s = scale_about_centre(target, 1. + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    else:
        raise ValueError('Unexpected noise type. '
                         'Supported values are {gaussian, uniform}')

    return similarity.compose_after(t.compose_after(s.compose_after(r)))
Beispiel #2
0
def noisy_align(source, target, noise_std=0.04, rotation=False):
    r"""
    Constructs and perturbs the optimal similarity transform between source
    to the target by adding white noise to its weights.

    Parameters
    ----------
    source: :class:`menpo.shape.PointCloud`
        The source pointcloud instance used in the alignment
    target: :class:`menpo.shape.PointCloud`
        The target pointcloud instance used in the alignment
    noise_std: float
        The standard deviation of the white noise

        Default: 0.04
    rotation: boolean
        If False the second parameter of the Similarity,
        which captures captures inplane rotations, is set to 0.

        Default:False

    Returns
    -------
    noisy_transform : :class: `menpo.transform.Similarity`
        The noisy Similarity Transform
    """
    transform = AlignmentSimilarity(source, target, rotation=rotation)
    parameters = transform.as_vector()
    parameter_range = np.hstack((parameters[:2], target.range()))
    noise = (parameter_range * noise_std *
             np.random.randn(transform.n_parameters))
    return Similarity.identity(source.n_dims).from_vector(parameters + noise)
def align_shapes(shapes, target_shape, lms_shapes=None, align_target=None):

    if align_target:
        print('Using AlignmentSimilarity')
        lms_target = align_target

        forward_transform = [
            AlignmentSimilarity(ls, lms_target) for ls in lms_shapes
        ]
        aligned_shapes = np.array(
            [t.apply(s) for t, s in zip(forward_transform, shapes)])
        removed_transform = [t.pseudoinverse() for t in forward_transform]
        target_shape = align_target
        _icp = None

    else:
        print('Using ICP')
        # Align Shapes Using ICP
        _icp = SICP(shapes, target_shape)
        aligned_shapes = _icp.aligned_shapes
        # Store Removed Transform
        removed_transform = []
        forward_transform = []
        for a_s, s in zip(aligned_shapes, shapes):
            ast = AlignmentSimilarity(a_s, s)
            removed_transform.append(ast)
            icpt = AlignmentSimilarity(s, a_s)
            forward_transform.append(icpt)

    return aligned_shapes, target_shape, removed_transform, forward_transform, _icp
Beispiel #4
0
def noisy_align(source, target, noise_std=0.04, rotation=False):
    r"""
    Constructs and perturbs the optimal similarity transform between source
    to the target by adding white noise to its weights.

    Parameters
    ----------
    source: :class:`menpo.shape.PointCloud`
        The source pointcloud instance used in the alignment
    target: :class:`menpo.shape.PointCloud`
        The target pointcloud instance used in the alignment
    noise_std: float
        The standard deviation of the white noise

        Default: 0.04
    rotation: boolean
        If False the second parameter of the Similarity,
        which captures captures inplane rotations, is set to 0.

        Default:False

    Returns
    -------
    noisy_transform : :class: `menpo.transform.Similarity`
        The noisy Similarity Transform
    """
    transform = AlignmentSimilarity(source, target, rotation=rotation)
    parameters = transform.as_vector()
    parameter_range = np.hstack((parameters[:2], target.range()))
    noise = (parameter_range * noise_std *
             np.random.randn(transform.n_parameters))
    return Similarity.init_identity(source.n_dims).from_vector(parameters + noise)
Beispiel #5
0
def alignment(mesh):
    if mesh.n_points == 53215:
        template, idxs = load_mean()

    alignment = AlignmentSimilarity(PointCloud(mesh.points[idxs]),
                                    PointCloud(template.points[idxs]))
    aligned_mesh = alignment.apply(mesh)
    return aligned_mesh
Beispiel #6
0
def project_landmarks_to_shape_model(landmarks):
    final = []

    for lms in landmarks:
        lms = PointCloud(lms)
        similarity = AlignmentSimilarity(pca.global_transform.source, lms)
        projected_target = similarity.pseudoinverse().apply(lms)
        target = pca.model.reconstruct(projected_target)
        target = similarity.apply(target)
        final.append(target.points)

    return np.array(final).astype(np.float32)
Beispiel #7
0
def test_align_2d_similarity_set_h_matrix_raises_notimplemented_error():
    linear_component = np.array([[2, -6], [6, 2]])
    translation_component = np.array([7, -8])
    h_matrix = np.eye(3, 3)
    h_matrix[:-1, :-1] = linear_component
    h_matrix[:-1, -1] = translation_component
    similarity = Similarity(h_matrix)
    source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]]))
    target = similarity.apply(source)
    # estimate the transform from source to source
    estimate = AlignmentSimilarity(source, source)
    # and set the target
    estimate.set_h_matrix(h_matrix)
Beispiel #8
0
def test_align_2d_similarity_set_h_matrix_raises_notimplemented_error():
    linear_component = np.array([[2, -6], [6, 2]])
    translation_component = np.array([7, -8])
    h_matrix = np.eye(3, 3)
    h_matrix[:-1, :-1] = linear_component
    h_matrix[:-1, -1] = translation_component
    similarity = Similarity(h_matrix)
    source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]]))
    target = similarity.apply(source)
    # estimate the transform from source to source
    estimate = AlignmentSimilarity(source, source)
    # and set the target
    estimate.set_h_matrix(h_matrix)
Beispiel #9
0
def test_align_2d_similarity_set_target():
    linear_component = np.array([[2, -6], [6, 2]])
    translation_component = np.array([7, -8])
    h_matrix = np.eye(3, 3)
    h_matrix[:-1, :-1] = linear_component
    h_matrix[:-1, -1] = translation_component
    similarity = Similarity(h_matrix)
    source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]]))
    target = similarity.apply(source)
    # estimate the transform from source to source
    estimate = AlignmentSimilarity(source, source, allow_mirror=True)
    # and set the target
    estimate.set_target(target)
    # check the estimates is correct
    assert_allclose(similarity.h_matrix, estimate.h_matrix)
Beispiel #10
0
def test_align_2d_similarity_set_target():
    linear_component = np.array([[2, -6], [6, 2]])
    translation_component = np.array([7, -8])
    h_matrix = np.eye(3, 3)
    h_matrix[:-1, :-1] = linear_component
    h_matrix[:-1, -1] = translation_component
    similarity = Similarity(h_matrix)
    source = PointCloud(np.array([[0, 1], [1, 1], [-1, -5], [3, -5]]))
    target = similarity.apply(source)
    # estimate the transform from source to source
    estimate = AlignmentSimilarity(source, source, allow_mirror=True)
    # and set the target
    estimate.set_target(target)
    # check the estimates is correct
    assert_allclose(similarity.h_matrix, estimate.h_matrix)
Beispiel #11
0
 def __init__(self, sources, **kwargs):
     super(GeneralizedProcrustesAnalysis, self).__init__(sources, **kwargs)
     self.transforms = [
         AlignmentSimilarity(source, self.target) for source in self.sources
     ]
     self.initial_target_scale = self.target.norm()
     self.n_iterations = 1
     self.max_iterations = 100
     self.converged = self._recursive_procrustes()
     print self
Beispiel #12
0
def rasterize_mesh_at_template(mesh, img_shape=(640, 480),
                               pose_angle_deg=0, shaded=False):
    camera = perspective_camera_for_template(img_shape,
                                             pose_angle_deg=pose_angle_deg)
    mesh_aligned = AlignmentSimilarity(mesh, load_template()).apply(mesh)

    if shaded:
        mesh_aligned = lambertian_shading(mesh_aligned)

    return rasterize_mesh(camera.apply(mesh_aligned), img_shape)
Beispiel #13
0
def active_non_rigid_icp(model, target, eps=1e-3,
                         stiffness_weights=None, data_weights=None,
                         landmark_group=None, landmark_weights=None,
                         model_mean_landmarks=None,
                         generate_instances=False, verbose=False):
    model_mean = model.mean()

    if landmark_group is not None:

        # user better have provided model landmarks!
        if model_mean_landmarks is None:
            raise ValueError(
                'For Active NICP with landmarks the model_mean_landmarks '
                'need to be provided.')

        shape_model = ShapeModel(model)
        source_lms = model_mean_landmarks
        target_lms = target.landmarks[landmark_group]
        model_lms_index = model_mean.distance_to(source_lms).argmin(axis=0)
        shape_model_lms = shape_model.mask_points(model_lms_index)

        # Sim align the target lms to the mean before projecting
        target_lms_aligned = AlignmentSimilarity(target_lms,
                                                 source_lms).apply(target_lms)

        # project to learn the weights for the landmark model
        weights = shape_model_lms.project(target_lms_aligned,
                                          n_components=20)

        # use these weights on the dense shape model to generate an improved
        # instance
        source = model.instance(weights)

        # update the source landmarks (for the alignment below)
        source.landmarks[landmark_group] = PointCloud(source.points[
                                                          model_lms_index])
    else:
        # Start from the mean of the model
        source = model_mean

    # project onto the shape model to restrict the basis
    def project_onto_model(instance):
        return model.reconstruct(instance)

    # call the generator version of NICP, always returning a generator
    generator = non_rigid_icp_generator(source, target, eps=eps,
                                        stiffness_weights=stiffness_weights,
                                        data_weights=data_weights,
                                        landmark_weights=landmark_weights,
                                        landmark_group=landmark_group,
                                        v_i_update_func=project_onto_model,
                                        verbose=verbose)
    # the handler decides whether the user get's details and each iteration
    # returned, or just the final result.
    return non_rigid_icp_generator_handler(generator, generate_instances)
    def _align_mean_shape_with_bbox(self, bbox):
        # Convert 3D landmarks to 2D by removing the Z axis
        template_shape = PointCloud(self.mm.landmarks.points[:, [1, 0]])

        # Rotation that flips over x axis
        rot_matrix = np.eye(template_shape.n_dims)
        rot_matrix[0, 0] = -1
        template_shape = Rotation(rot_matrix,
                                  skip_checks=True).apply(template_shape)

        # Align the 2D landmarks' bbox with the provided bbox
        return AlignmentSimilarity(template_shape.bounding_box(),
                                   bbox).apply(template_shape)
Beispiel #15
0
 def __call__(self, img_generator):
     from menpo.transform import AlignmentSimilarity
     results = []
     ref_shape = self.fitter.reference_shape
     for img in img_generator:
         # note that we don't want to crop the image in our preprocessing
         # that's because the gt on the image we are passed is what will
         # be used for assessment - we will introduce large errors if this
         # is modified in size.
         img = menpo_img_process(img, crop=False)
         bbox = img.landmarks['bbox'].lms
         shape_bb = ref_shape.bounding_box()
         init_shape = AlignmentSimilarity(shape_bb, bbox).apply(ref_shape)
         menpofit_fr = self.fitter.fit(img, init_shape)
         results.append(menpofit_to_result(menpofit_fr))
     return results
Beispiel #16
0
def align_shape_with_bb(shape, bounding_box):
    r"""
    Returns the Similarity transform that aligns the provided shape with the
    provided bounding box.

    Parameters
    ----------
    shape: :class:`menpo.shape.PointCloud`
        The shape to be aligned.
    bounding_box: (2, 2) ndarray
        The bounding box specified as:

            np.array([[x_min, y_min], [x_max, y_max]])

    Returns
    -------
    transform : :class: `menpo.transform.Similarity`
        The align transform
    """
    shape_box = PointCloud(shape.bounds())
    bounding_box = PointCloud(bounding_box)
    return AlignmentSimilarity(shape_box, bounding_box, rotation=False)
Beispiel #17
0
def non_rigid_icp_generator(
    source,
    target,
    eps=1e-3,
    stiffness_weights=None,
    data_weights=None,
    landmark_group=None,
    landmark_weights=None,
    v_i_update_func=None,
    verbose=False,
):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # If landmarks are provided, we should always start with a simple
    # AlignmentSimilarity between the landmarks to initialize optimally.
    if landmark_group is not None:
        if verbose:
            print("'{}' landmarks will be used as "
                  "a landmark constraint.".format(landmark_group))
            print("performing similarity alignment using landmarks")
        lm_align = AlignmentSimilarity(
            source.landmarks[landmark_group],
            target.landmarks[landmark_group]).as_non_alignment()
        source = lm_align.apply(source)

    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range()**2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s, unique_edge_pairs = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_weights is not None:
        if verbose:
            print("using user-defined stiffness_weights")
        validate_weights("stiffness_weights",
                         stiffness_weights,
                         source.n_points,
                         verbose=verbose)
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print("using default "
                  "stiffness_weights: {}".format(stiffness_weights))

    n_iterations = len(stiffness_weights)

    if landmark_weights is not None:
        if verbose:
            print("using user defined "
                  "landmark_weights: {}".format(landmark_weights))
    elif landmark_group is not None:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        landmark_weights = [5, 2, 0.5, 0, 0, 0, 0, 0]
        if verbose:
            print("using default "
                  "landmark_weights: {}".format(landmark_weights))
    else:
        # no landmark_weights provided - no landmark_group in use. We still
        # need a landmark group for the iterator
        landmark_weights = [None] * n_iterations

    # We should definitely have some landmark weights set now - check the
    # number is correct.
    # Note we say verbose=False, as we have done custom reporting above, and
    # per-vertex landmarks are not supported.
    validate_weights(
        "landmark_weights",
        landmark_weights,
        source.n_points,
        n_iterations=n_iterations,
        verbose=False,
    )

    if data_weights is not None:
        if verbose:
            print("using user-defined data_weights")
        validate_weights(
            "data_weights",
            data_weights,
            source.n_points,
            n_iterations=n_iterations,
            verbose=verbose,
        )
    else:
        data_weights = [None] * n_iterations
        if verbose:
            print("Not customising data_weights")

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims,
                               axis=1).ravel(), np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(), x[:, n_dims]))
    o = np.ones(n)

    if landmark_group is not None:
        source_lm_index = source.distance_to(
            source.landmarks[landmark_group]).argmin(axis=0)
        target_lms = target.landmarks[landmark_group]
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    for i, (alpha, beta, gamma) in enumerate(
            zip(stiffness_weights, landmark_weights, data_weights), 1):
        alpha_is_per_vertex = isinstance(alpha, np.ndarray)
        if alpha_is_per_vertex:
            # stiffness is provided per-vertex
            if alpha.shape[0] != source.n_points:
                raise ValueError()
            alpha_per_edge = alpha[unique_edge_pairs].mean(axis=1)
            alpha_M_s = sp.diags(alpha_per_edge).dot(M_s)
            alpha_M_kron_G_s = sp.kron(alpha_M_s, G)
        else:
            # stiffness is global - just a scalar multiply. Note that here
            # we don't have to recalculate M_kron_G_s
            alpha_M_kron_G_s = alpha * M_kron_G_s

        if verbose:
            a_str = (alpha if not alpha_is_per_vertex
                     else "min: {:.2f}, max: {:.2f}".format(
                         alpha.min(), alpha.max()))
            i_str = "{}/{}: stiffness: {}".format(i, len(stiffness_weights),
                                                  a_str)
            if landmark_group is not None:
                i_str += "  lm_weight: {}".format(beta)
            print(i_str)

        j = 0
        while True:  # iterate until convergence
            j += 1  # track the iterations for this alpha/landmark weight

            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist, copy=False)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            # for now disable the edge constraint (it was noisy anyway)
            w_i = w_i_n

            # w_i = np.logical_and(w_i_n, w_i_e).astype(np.float)

            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n,
            #                                     w_i_e,
            #                                     w_i_i).astype(np.float)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n

            if gamma is not None:
                w_i = w_i * gamma

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [
                np.zeros((alpha_M_kron_G_s.shape[0], n_dims)),
                U * w_i[:, None],
            ]  # nullify nearest points by w_i

            if landmark_group is not None:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i_prev = v_i
            v_i = D_s.dot(X)
            delta_v_i = v_i - v_i_prev

            if v_i_update_func:
                # custom logic is provided to update the current template
                # deformation. This is typically used by Active NICP.

                # take the v_i points matrix and convert back to a TriMesh in
                # the original space
                def_template = restore.apply(source.from_vector(v_i.ravel()))

                # perform the update
                updated_def_template = v_i_update_func(def_template)

                # convert back to points in the NICP space
                v_i = prepare.apply(updated_def_template.points)

            err = np.linalg.norm(X_prev - X, ord="fro")
            stop_criterion = err / np.sqrt(np.size(X_prev))

            if landmark_group is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L)**2).sum(axis=1).mean()

            if verbose:
                v_str = (" - {} stop crit: {:.3f}  "
                         "total: {:.0%}  norms: {:.0%}  "
                         "edges: {:.0%}".format(j, stop_criterion, prop_w_i,
                                                prop_w_i_n, prop_w_i_e))
                if landmark_group is not None:
                    v_str += "  lm_err: {:.4f}".format(lm_err)

                print(v_str)

            X_prev = X

            # track the progress of the algorithm per-iteration
            info_dict = {
                "alpha": alpha,
                "iteration": j,
                "prop_omitted": prop_w_i,
                "prop_omitted_norms": prop_w_i_n,
                "prop_omitted_edges": prop_w_i_e,
                "delta": err,
                "mask_normals": w_i_n,
                "mask_edges": w_i_e,
                "mask_all": w_i,
                "nearest_points": restore.apply(U),
                "deformation_per_step": delta_v_i,
            }

            current_instance = source.copy()
            current_instance.points = v_i.copy()

            if landmark_group:
                info_dict["beta"] = beta
                info_dict["lm_err"] = lm_err
                current_instance.landmarks[landmark_group] = PointCloud(
                    src_lms)

            yield restore.apply(current_instance), info_dict

            if stop_criterion < eps:
                break
def _build_shape_desc(sd_path_in,
                      _norm_imgs,
                      target_shape,
                      aligned_shapes,
                      align_t,
                      reference_frame,
                      _icp_transform,
                      _is_mc=False,
                      group=None,
                      target_align_shape=None,
                      _shape_desc=svs_shape,
                      align_group='align',
                      target_group=None):
    sd_path_in = '{}'.format(sd_path_in)
    if not os.path.exists(sd_path_in):
        os.makedirs(sd_path_in)
    # Build Transform Using SVS
    xr, yr = reference_frame.shape

    # Draw Mask
    # mask_shape = mask_pc(align_t.apply(target_shape))
    # mask_image = Image.init_blank((xr, yr))
    # for pts in mask_shape.points:
    #     mask_image.pixels[0, pts[0], pts[1]] = 1
    # mio.export_image(
    #     mask_image,
    #     '{}/ref_mask.png'.format(sd_path_in),
    #     overwrite=True
    # )

    if (not glob.glob(sd_path_in + '/sd_*.gif')):

        target_group = target_group if not target_group is None else [
            range(target_shape.n_points)
        ]
        for j, (a_s, tr, svsLms, groups) in enumerate(
                zip([target_shape] + aligned_shapes.tolist(),
                    [AlignmentSimilarity(target_shape, target_shape)] +
                    _icp_transform, [target_align_shape] +
                    [ni.landmarks[align_group].lms
                     for ni in _norm_imgs], [target_group] + [
                         group_from_labels(ni.landmarks[group])
                         for ni in _norm_imgs
                     ])):
            print_dynamic("  - Shape Descriptor Training {} out of {}".format(
                j,
                len(aligned_shapes) + 1))
            # Align shapes with reference frame
            temp_as = align_t.apply(a_s)
            points = temp_as.points

            # Store SVS Landmarks
            svsLmsPath = '{}/sd_{:04d}_lms.pts'.format(sd_path_in, j)
            svsLms = align_t.apply(tr.apply(svsLms))
            if not os.path.exists(svsLmsPath):
                tempRef = reference_frame.copy()
                tempRef.landmarks['temp'] = svsLms
                mio.export_landmark_file(tempRef.landmarks['temp'], svsLmsPath)

            store_image = normalise_image(_shape_desc(temp_as, xr, yr, groups))

            # Create gif from svs group
            #     convert -delay 10 -loop 0 sd_0001_g*.png test.gif

            for ch in range(store_image.n_channels):
                channel_img = store_image.extract_channels(ch)
                mio.export_image(channel_img,
                                 '{}/sd_{:04d}_g{:02d}.png'.format(
                                     sd_path_in, j, ch),
                                 overwrite=True)

            subprocess.Popen([
                'convert', '-delay', '10', '-loop', '0',
                '{0}/sd_{1:04d}_g*.png'.format(sd_path_in, j),
                '{0}/sd_{1:04d}.gif'.format(sd_path_in, j)
            ])
Beispiel #19
0
def transform_to_mean_shape(src, mean_shape):
    centered = PointCloud(src.points - src.centre(), copy=False)

    return AlignmentSimilarity(centered, mean_shape)
Beispiel #20
0
def align_dense_fit_to_gt(fit_3d, gt_mesh):
    return AlignmentSimilarity(fit_3d, gt_mesh).apply(fit_3d)
Beispiel #21
0
def non_rigid_icp_generator(source, target, eps=1e-3,
                            stiffness_weights=None, data_weights=None,
                            landmark_group=None, landmark_weights=None,
                            v_i_update_func=None, verbose=False):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    # If landmarks are provided, we should always start with a simple
    # AlignmentSimilarity between the landmarks to initialize optimally.
    if landmark_group is not None:
        if verbose:
            print("'{}' landmarks will be used as "
                  "a landmark constraint.".format(landmark_group))
            print("performing similarity alignment using landmarks")
        lm_align = AlignmentSimilarity(source.landmarks[landmark_group],
                                       target.landmarks[landmark_group]).as_non_alignment()
        source = lm_align.apply(source)

    # Scale factors completely change the behavior of the algorithm - always
    # rescale the source down to a sensible size (so it fits inside box of
    # diagonal 1) and is centred on the origin. We'll undo this after the fit
    # so the user can use whatever scale they prefer.
    tr = Translation(-1 * source.centre())
    sc = UniformScale(1.0 / np.sqrt(np.sum(source.range() ** 2)), 3)
    prepare = tr.compose_before(sc)

    source = prepare.apply(source)
    target = prepare.apply(target)

    # store how to undo the similarity transform
    restore = prepare.pseudoinverse()

    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points, trilist = source.points, source.trilist
    n = points.shape[0]  # record number of points

    edge_tris = source.boundary_tri_index()

    M_s, unique_edge_pairs = node_arc_incidence_matrix(source)

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build octree for finding closest points on target.
    target_vtk = trimesh_to_vtk(target)
    closest_points_on_target = VTKClosestPointLocator(target_vtk)

    # save out the target normals. We need them for the weight matrix.
    target_tri_normals = target.tri_normals()

    # init transformation
    X_prev = np.tile(np.zeros((n_dims, h_dims)), n).T
    v_i = points

    if stiffness_weights is not None:
        if verbose:
            print('using user-defined stiffness_weights')
        validate_weights('stiffness_weights', stiffness_weights,
                         source.n_points, verbose=verbose)
    else:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        stiffness_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]
        if verbose:
            print('using default '
                  'stiffness_weights: {}'.format(stiffness_weights))

    n_iterations = len(stiffness_weights)

    if landmark_weights is not None:
        if verbose:
            print('using user defined '
                  'landmark_weights: {}'.format(landmark_weights))
    elif landmark_group is not None:
        # these values have been empirically found to perform well for well
        # rigidly aligned facial meshes
        landmark_weights = [5, 2, .5, 0, 0, 0, 0, 0]
        if verbose:
            print('using default '
                  'landmark_weights: {}'.format(landmark_weights))
    else:
        # no landmark_weights provided - no landmark_group in use. We still
        # need a landmark group for the iterator
        landmark_weights = [None] * n_iterations

    # We should definitely have some landmark weights set now - check the
    # number is correct.
    # Note we say verbose=False, as we have done custom reporting above, and
    # per-vertex landmarks are not supported.
    validate_weights('landmark_weights', landmark_weights, source.n_points,
                     n_iterations=n_iterations, verbose=False)

    if data_weights is not None:
        if verbose:
            print('using user-defined data_weights')
        validate_weights('data_weights', data_weights,
                         source.n_points, n_iterations=n_iterations,
                         verbose=verbose)
    else:
        data_weights = [None] * n_iterations
        if verbose:
            print('Not customising data_weights')

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims, axis=1).ravel(),
                     np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(),
                     x[:, n_dims]))
    o = np.ones(n)

    if landmark_group is not None:
        source_lm_index = source.distance_to(
            source.landmarks[landmark_group]).argmin(axis=0)
        target_lms = target.landmarks[landmark_group]
        U_L = target_lms.points
        n_landmarks = target_lms.n_points
        lm_mask = np.in1d(row, source_lm_index)
        col_lm = col[lm_mask]
        # pull out the rows for the lms - but the values are
        # all wrong! need to map them back to the order of the landmarks
        row_lm_to_fix = row[lm_mask]
        source_lm_index_l = list(source_lm_index)
        row_lm = np.array([source_lm_index_l.index(r) for r in row_lm_to_fix])

    for i, (alpha, beta, gamma) in enumerate(zip(stiffness_weights,
                                                 landmark_weights,
                                                 data_weights), 1):
        alpha_is_per_vertex = isinstance(alpha, np.ndarray)
        if alpha_is_per_vertex:
            # stiffness is provided per-vertex
            if alpha.shape[0] != source.n_points:
                raise ValueError()
            alpha_per_edge = alpha[unique_edge_pairs].mean(axis=1)
            alpha_M_s = sp.diags(alpha_per_edge).dot(M_s)
            alpha_M_kron_G_s = sp.kron(alpha_M_s, G)
        else:
            # stiffness is global - just a scalar multiply. Note that here
            # we don't have to recalculate M_kron_G_s
            alpha_M_kron_G_s = alpha * M_kron_G_s

        if verbose:
            a_str = (alpha if not alpha_is_per_vertex
                     else 'min: {:.2f}, max: {:.2f}'.format(alpha.min(),
                                                            alpha.max()))
            i_str = '{}/{}: stiffness: {}'.format(i, len(stiffness_weights), a_str)
            if landmark_group is not None:
                i_str += '  lm_weight: {}'.format(beta)
            print(i_str)

        j = 0
        while True:  # iterate until convergence
            j += 1  # track the iterations for this alpha/landmark weight

            # find nearest neighbour and the normals
            U, tri_indices = closest_points_on_target(v_i)

            # ---- WEIGHTS ----
            # 1.  Edges
            # Are any of the corresponding tris on the edge of the target?
            # Where they are we return a false weight (we *don't* want to
            # include these points in the solve)
            w_i_e = np.in1d(tri_indices, edge_tris, invert=True)

            # 2. Normals
            # Calculate the normals of the current v_i
            v_i_tm = TriMesh(v_i, trilist=trilist)
            v_i_n = v_i_tm.vertex_normals()
            # Extract the corresponding normals from the target
            u_i_n = target_tri_normals[tri_indices]
            # If the dot of the normals is lt 0.9 don't contrib to deformation
            w_i_n = (u_i_n * v_i_n).sum(axis=1) > 0.9

            # 3. Self-intersection
            # This adds approximately 12% to the running cost and doesn't seem
            # to be very critical in helping mesh fitting performance so for
            # now it's removed. Revisit later.
            # # Build an intersector for the current deformed target
            # intersect = build_intersector(to_vtk(v_i_tm))
            # # budge the source points 1% closer to the target
            # source = v_i + ((U - v_i) * 0.5)
            # # if the vector from source to target intersects the deformed
            # # template we don't want to include it in the optimisation.
            # problematic = [i for i, (s, t) in enumerate(zip(source, U))
            #                if len(intersect(s, t)[0]) > 0]
            # print(len(problematic) * 1.0 / n)
            # w_i_i = np.ones(v_i_tm.n_points, dtype=np.bool)
            # w_i_i[problematic] = False

            # Form the overall w_i from the normals, edge case
            # for now disable the edge constraint (it was noisy anyway)
            w_i = w_i_n

            # w_i = np.logical_and(w_i_n, w_i_e).astype(np.float)

            # we could add self intersection at a later date too...
            # w_i = np.logical_and(np.logical_and(w_i_n,
            #                                     w_i_e,
            #                                     w_i_i).astype(np.float)

            prop_w_i = (n - w_i.sum() * 1.0) / n
            prop_w_i_n = (n - w_i_n.sum() * 1.0) / n
            prop_w_i_e = (n - w_i_e.sum() * 1.0) / n

            if data_weights is not None:
                w_i = w_i * gamma

            # Build the sparse diagonal weight matrix
            W_s = sp.diags(w_i.astype(np.float)[None, :], [0])

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            to_stack_A = [alpha_M_kron_G_s, W_s.dot(D_s)]
            to_stack_B = [np.zeros((alpha_M_kron_G_s.shape[0], n_dims)),
                          U * w_i[:, None]]  # nullify nearest points by w_i

            if landmark_group is not None:
                D_L = sp.coo_matrix((data[lm_mask], (row_lm, col_lm)),
                                    shape=(n_landmarks, D_s.shape[1]))
                to_stack_A.append(beta * D_L)
                to_stack_B.append(beta * U_L)

            A_s = sp.vstack(to_stack_A).tocsr()
            B_s = sp.vstack(to_stack_B).tocsr()
            X = spsolve(A_s, B_s)

            # deform template
            v_i_prev = v_i
            v_i = D_s.dot(X)
            delta_v_i = v_i - v_i_prev

            if v_i_update_func:
                # custom logic is provided to update the current template
                # deformation. This is typically used by Active NICP.

                # take the v_i points matrix and convert back to a TriMesh in
                # the original space
                def_template = restore.apply(source.from_vector(v_i.ravel()))

                # perform the update
                updated_def_template = v_i_update_func(def_template)

                # convert back to points in the NICP space
                v_i = prepare.apply(updated_def_template.points)

            err = np.linalg.norm(X_prev - X, ord='fro')
            stop_criterion = err / np.sqrt(np.size(X_prev))

            if landmark_group is not None:
                src_lms = v_i[source_lm_index]
                lm_err = np.sqrt((src_lms - U_L) ** 2).sum(axis=1).mean()

            if verbose:
                v_str = (' - {} stop crit: {:.3f}  '
                         'total: {:.0%}  norms: {:.0%}  '
                         'edges: {:.0%}'.format(j, stop_criterion,
                                                prop_w_i, prop_w_i_n,
                                                prop_w_i_e))
                if landmark_group is not None:
                    v_str += '  lm_err: {:.4f}'.format(lm_err)

                print(v_str)

            X_prev = X

            # track the progress of the algorithm per-iteration
            info_dict = {
                'alpha': alpha,
                'iteration': j,
                'prop_omitted': prop_w_i,
                'prop_omitted_norms': prop_w_i_n,
                'prop_omitted_edges': prop_w_i_e,
                'delta': err,
                'mask_normals': w_i_n,
                'mask_edges': w_i_e,
                'mask_all': w_i,
                'nearest_points': restore.apply(U),
                'deformation_per_step': delta_v_i
            }

            current_instance = source.copy()
            current_instance.points = v_i.copy()

            if landmark_group:
                info_dict['beta'] = beta
                info_dict['lm_err'] = lm_err
                current_instance.landmarks[landmark_group] = PointCloud(src_lms)

            yield restore.apply(current_instance), info_dict

            if stop_criterion < eps:
                break
Beispiel #22
0
def noisy_alignment_similarity_transform(source,
                                         target,
                                         noise_type='uniform',
                                         noise_percentage=0.1,
                                         allow_alignment_rotation=False):
    r"""
    Constructs and perturbs the optimal similarity transform between the source
    and target shapes by adding noise to its parameters.

    Parameters
    ----------
    source : `menpo.shape.PointCloud`
        The source pointcloud instance used in the alignment
    target : `menpo.shape.PointCloud`
        The target pointcloud instance used in the alignment
    noise_type : ``{'uniform', 'gaussian'}``, optional
        The type of noise to be added.
    noise_percentage : `float` in ``(0, 1)`` or `list` of `len` `3`, optional
        The standard percentage of noise to be added. If `float`, then the same
        amount of noise is applied to the scale, rotation and translation
        parameters of the optimal similarity transform. If `list` of
        `float` it must have length 3, where the first, second and third elements
        denote the amount of noise to be applied to the scale, rotation and
        translation parameters, respectively.
    allow_alignment_rotation : `bool`, optional
        If ``False``, then the rotation is not considered when computing the
        optimal similarity transform between source and target.

    Returns
    -------
    noisy_alignment_similarity_transform : `menpo.transform.Similarity`
        The noisy Similarity Transform between source and target.
    """
    if isinstance(noise_percentage, float):
        noise_percentage = [noise_percentage] * 3
    elif len(noise_percentage) == 1:
        noise_percentage *= 3

    similarity = AlignmentSimilarity(source,
                                     target,
                                     rotation=allow_alignment_rotation)

    if noise_type is 'gaussian':
        s = noise_percentage[0] * (0.5 / 3) * np.asscalar(np.random.randn(1))
        r = noise_percentage[1] * (180 / 3) * np.asscalar(np.random.randn(1))
        t = noise_percentage[2] * (target.range() / 3) * np.random.randn(2)

        s = scale_about_centre(target, 1 + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    elif noise_type is 'uniform':
        s = noise_percentage[0] * 0.5 * (2 * np.asscalar(np.random.randn(1)) -
                                         1)
        r = noise_percentage[1] * 180 * (2 * np.asscalar(np.random.rand(1)) -
                                         1)
        t = noise_percentage[2] * target.range() * (2 * np.random.rand(2) - 1)

        s = scale_about_centre(target, 1. + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    else:
        raise ValueError('Unexpected noise type. '
                         'Supported values are {gaussian, uniform}')

    return similarity.compose_after(t.compose_after(s.compose_after(r)))