Esempio n. 1
0
def pinverse(inputs: torch.Tensor):
    assert (inputs.ndim >= 2)
    shp = inputs.shape
    U, S, V = batch_svd(inputs.view(-1, shp[-2], shp[-1]))
    S[S < 1e-6] = 0
    S_inv = torch.where(S < 1e-5, torch.zeros_like(S), 1 / S)
    pinv = V @ torch.diag_embed(S_inv) @ U.transpose(1, 2)
    return pinv.view(shp)
Esempio n. 2
0
def compute_energy(meshes: ARAPMeshes,
                   verts: torch.Tensor,
                   verts_deformed: torch.Tensor,
                   mesh_idx=0,
                   device="cuda"):
    """Compute the energy of a deformation for a deformation, according to

	sum_i w_i * sum_j w_ij || (p'_i - p'_j) - R_i(p_i - p_j) ||^2

	Where i is the vertex index,
	j is the indices of all one-ring-neighbours
	p gives the undeformed vertex locations
	p' gives the deformed vertex rotations
	and R gives the rotation matrix between p and p' that captures as much of the deformation as possible
	(maximising the amount of deformation that is rigid)

	w_i gives the per-cell weight, selected as 1
	w_ij gives the per-edge weight, selected as 0.5 * (cot (alpha_ij) + cot(beta_ij)), where alpha and beta
	give the angles opposite of the mesh edge

	:param meshes: ARAP meshes object
	:param verts_deformed:
	:param verts:

	:return energy: Tensor of strain energy of deformation

	"""

    V = meshes.num_verts_per_mesh()[mesh_idx]

    orn = meshes.one_ring_neighbours[mesh_idx]
    max_neighbours = max(map(len,
                             orn.values()))  # largest number of neighbours

    ii, jj, nn = produce_idxs(V, orn, device)  # flattened tensors for indices

    w = meshes.w_nfmts[
        mesh_idx]  # cotangent weight matrix, in nfmt index format

    p = verts[mesh_idx]  # initial mesh
    p_prime = verts_deformed[mesh_idx]  # displaced verts

    edge_shape = (V, max_neighbours, 3)
    P = produce_edge_matrix_nfmt(p, edge_shape, ii, jj, nn, device=device)
    P_prime = produce_edge_matrix_nfmt(p_prime,
                                       edge_shape,
                                       ii,
                                       jj,
                                       nn,
                                       device=device)

    ### Calculate covariance matrix in bulk
    D = torch.diag_embed(w, dim1=1, dim2=2)
    S = torch.bmm(P.permute(0, 2, 1), torch.bmm(D, P_prime))

    ## in the case of no deflection, set S = 0, such that R = I. This is to avoid numerical errors
    unchanged_verts = torch.unique(torch.where(
        (P == P_prime).all(dim=1))[0])  # any verts which are undeformed
    S[unchanged_verts] = 0

    U, sig, W = batch_svd(S)
    R = torch.bmm(W, U.permute(0, 2, 1))  # compute rotations

    # Need to flip the column of U corresponding to smallest singular value
    # for any det(Ri) <= 0
    entries_to_flip = torch.nonzero(
        torch.det(R) <= 0, as_tuple=False).flatten()  # idxs where det(R) <= 0
    if len(entries_to_flip) > 0:
        Umod = U.clone()
        cols_to_flip = torch.argmin(
            sig[entries_to_flip],
            dim=1)  # Get minimum singular value for each entry
        Umod[entries_to_flip, :, cols_to_flip] *= -1  # flip cols
        R[entries_to_flip] = torch.bmm(W[entries_to_flip],
                                       Umod[entries_to_flip].permute(0, 2, 1))

    # Compute energy
    rot_rigid = torch.bmm(R, P.permute(0, 2, 1)).permute(0, 2, 1)
    stretch_vec = P_prime - rot_rigid  # stretch vector
    stretch_norm = (torch.norm(stretch_vec,
                               dim=2)**2)  # norm over (x,y,z) space
    energy = (w * stretch_norm).sum()

    return energy
Esempio n. 3
0
    def solve(self,
              static_verts,
              handle_verts,
              handle_verts_pos,
              mesh_idx=0,
              n_its=1,
              track_energy=False,
              report=False):
        """
		Solve iterations of the As-Rigid-As-Possible method.

		:param static_verts: list of all vertices which do not move
		:param handle_verts: list of all vertices which are moved as input. Size H
		:param handle_verts_pos: (H x 3) array of target positions of all handle_verts
		:param mesh_idx: index of self for selected mesh.
		:param track_energy: Flag to print energy after every it
		:param report: Flag to use tqdm bar to track iteration progress

		p = initial mesh deformation
		p0 = working guess
"""

        V = self.num_verts_per_mesh()[mesh_idx]
        p = self.verts_padded()[mesh_idx]  # initial mesh

        if "w_padded" not in self.precomputed_params:
            self.precompute_laplacian()

        L = self.precomputed_params["L_padded"][mesh_idx]

        known_handles = {
            i: pos
            for i, pos in zip(handle_verts, handle_verts_pos)
        }
        known_static = {v: p[v] for v in static_verts}
        known = {**known_handles, **known_static}

        # Initial guess using Naive Laplacian editing: least square minimisation of |Lp0 - Lp|, subject to known
        # constraints on the values of p, from static and handles
        p_prime = least_sq_with_known_values(L, torch.mm(L, p), known=known)

        if n_its == 0:  # if only want initial guess, end here
            return p_prime

        ## modify L, L_inv and b_fixed to incorporate boundary conditions
        unknown_verts = [n for n in range(V)
                         if n not in known]  # indices of all unknown verts

        b_fixed = torch.zeros(
            (V, 3))  # factor to be subtracted from b, due to constraints
        for k, pos in known.items():
            b_fixed += torch.einsum("i,j->ij", L[:, k], pos)  # [unknown]

        #  Precompute L_reduced_inv if not already done
        if "L_reduced_inv" not in self.precomputed_params:
            self.precompute_reduced_laplacian(static_verts, handle_verts)

        L_reduced_inv = self.precomputed_params["L_reduced_inv"]

        orn = self.one_ring_neighbours[mesh_idx]
        max_neighbours = max(map(len,
                                 orn.values()))  # largest number of neighbours

        ii, jj, nn = produce_idxs(V, orn,
                                  self.device)  # flattened tensors for indices
        w = self.w_nfmts[
            mesh_idx]  # cotangent weight matrix, in nfmt index format

        edge_shape = (V, max_neighbours, 3)
        P = produce_edge_matrix_nfmt(p,
                                     edge_shape,
                                     ii,
                                     jj,
                                     nn,
                                     device=self.device)

        # Iterate through method
        if report: progress = tqdm(total=n_its)

        for it in range(n_its):

            P_prime = produce_edge_matrix_nfmt(p_prime,
                                               edge_shape,
                                               ii,
                                               jj,
                                               nn,
                                               device=self.device)

            ### Calculate covariance matrix in bulk
            D = torch.diag_embed(w, dim1=1, dim2=2)
            S = torch.bmm(P.permute(0, 2, 1), torch.bmm(D, P_prime))

            ## in the case of no deflection, set S = 0, such that R = I. This is to avoid numerical errors
            unchanged_verts = torch.unique(
                torch.where((P == P_prime).all(
                    dim=1))[0])  # any verts which are undeformed
            S[unchanged_verts] = 0

            U, sig, W = batch_svd(S)
            R = torch.bmm(W, U.permute(0, 2, 1))  # compute rotations

            # Need to flip the column of U corresponding to smallest singular value
            # for any det(Ri) <= 0
            entries_to_flip = torch.nonzero(
                torch.det(R) <= 0,
                as_tuple=False).flatten()  # idxs where det(R) <= 0
            if len(entries_to_flip) > 0:
                Umod = U.clone()
                cols_to_flip = torch.argmin(
                    sig[entries_to_flip],
                    dim=1)  # Get minimum singular value for each entry
                Umod[entries_to_flip, :, cols_to_flip] *= -1  # flip cols
                R[entries_to_flip] = torch.bmm(
                    W[entries_to_flip], Umod[entries_to_flip].permute(0, 2, 1))

            ### RHS of minimum energy equation
            Rsum_shape = (V, max_neighbours, 3, 3)
            Rsum = torch.zeros(Rsum_shape).to(
                self.device)  # Ri + Rj, as in eq (8)
            Rsum[ii, nn] = R[ii] + R[jj]

            ### Rsum has shape (V, max_neighbours, 3, 3). P has shape (V, max_neighbours, 3)
            ### To batch multiply, collapse first 2 dims into a single batch dim
            Rsum_batch, P_batch = Rsum.view(-1, 3, 3), P.view(-1,
                                                              3).unsqueeze(-1)

            # RHS of minimum energy equation
            b = 0.5 * (w[..., None] *
                       torch.bmm(Rsum_batch, P_batch).squeeze(-1).reshape(
                           V, max_neighbours, 3)).sum(dim=1)

            b -= b_fixed  # subtract component of LHS not included - constraints

            p_prime_unknown = torch.mm(
                L_reduced_inv,
                b[unknown_verts])  # predicted p's for only unknown values

            p_prime = torch.zeros_like(
                p_prime
            )  # generate next iteration of fit, from p0_unknown and constraints
            for index, val in known.items():
                p_prime[index] = val

            # Assign initially unknown values to x_out
            p_prime[unknown_verts] = p_prime_unknown

            # track energy
            if track_energy:
                energy = compute_energy(self, [p], [p_prime],
                                        device=self.device)
                print(f"It = {it}, Energy = {energy:.2f}")
            # update tqdm
            if report:
                progress.update()

        return p_prime  # return new vertices
Esempio n. 4
0
def estimate_pointcloud_local_coord_frames(
    pointclouds: Union[torch.Tensor, Pointclouds],
    neighborhood_size: int = 50,
    disambiguate_directions: bool = True,
    return_knn_result: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, Optional['KNN']]:
    """
    Faster version of pytorch3d estimate_pointcloud_local_coord_frames

    Estimates the principal directions of curvature (which includes normals)
    of a batch of `pointclouds`.
    Returns:
        curvatures (N,P,3) ascending order
        local_frames (N,P,3,3) corresponding eigenvectors
    """
    points_padded, num_points = convert_pointclouds_to_tensor(pointclouds)

    ba, N, dim = points_padded.shape
    if dim != 3:
        raise ValueError(
            "The pointclouds argument has to be of shape (minibatch, N, 3)")

    if (num_points <= neighborhood_size).any():
        raise ValueError("The neighborhood_size argument has to be" +
                         " >= size of each of the point clouds.")
    # undo global mean for stability
    # TODO: replace with tutil.wmean once landed
    pcl_mean = points_padded.sum(1) / num_points[:, None]
    points_centered = points_padded - pcl_mean[:, None, :]

    # get K nearest neighbor idx for each point in the point cloud
    knn_result = knn_points(
        points_padded,
        points_padded,
        lengths1=num_points,
        lengths2=num_points,
        K=neighborhood_size,
        return_nn=True,
    )
    k_nearest_neighbors = knn_result.knn
    # obtain the mean of the neighborhood
    pt_mean = k_nearest_neighbors.mean(2, keepdim=True)
    # compute the diff of the neighborhood and the mean of the neighborhood
    # N,P,K,3
    central_diff = k_nearest_neighbors - pt_mean
    per_pts_diff = central_diff.view(-1, neighborhood_size, 3)
    # S (NP,3) and local_coord_framds (NP,3,3)
    _, S, local_coord_frames = batch_svd(per_pts_diff)
    curvature = S * S / neighborhood_size
    local_coord_frames = local_coord_frames.view(ba, N, dim, dim)
    curvature = curvature.view(ba, N, dim)

    # flip to ascending order
    curvature = curvature.flip(-1)
    local_coord_frames = local_coord_frames.flip(-1)

    # disambiguate the directions of individual principal vectors
    if disambiguate_directions:
        # disambiguate normal
        n = _disambiguate_vector_directions(points_centered,
                                            k_nearest_neighbors,
                                            local_coord_frames[:, :, :, 0])
        # disambiguate the main curvature
        z = _disambiguate_vector_directions(points_centered,
                                            k_nearest_neighbors,
                                            local_coord_frames[:, :, :, 2])
        # the secondary curvature is just a cross between n and z
        y = torch.cross(n, z, dim=2)
        # cat to form the set of principal directions
        local_coord_frames = torch.stack((n, y, z), dim=3)

    if return_knn_result:
        return curvature, local_coord_frames, knn_result
    return curvature, local_coord_frames