Exemple #1
0
 def __init__(self, config):
     super(EdgeLoss, self).__init__()
     self.patch_size = config.data_patch_size
     faces = torch.tensor(make_faces(self.patch_size, self.patch_size))
     vertices = torch.rand(self.patch_size**2, 3)
     meshes = Meshes(verts=[vertices], faces=[faces])
     self.no_edges = max(meshes.edges_packed().shape)
     edges_packed = meshes.edges_packed()
     self.register_buffer('v0', edges_packed[:, 0])
     self.register_buffer('v1', edges_packed[:, 1])
def taubin_smoothing(meshes: Meshes,
                     lambd: float = 0.53,
                     mu: float = -0.53,
                     num_iter: int = 10) -> Meshes:
    """
    Taubin smoothing [1] is an iterative smoothing operator for meshes.
    At each iteration
        verts := (1 - λ) * verts + λ * L * verts
        verts := (1 - μ) * verts + μ * L * verts

    This function returns a new mesh with smoothed vertices.
    Args:
        meshes: Meshes input to be smoothed
        lambd, mu: float parameters for Taubin smoothing,
            lambd > 0, mu < 0
        num_iter: number of iterations to execute smoothing
    Returns:
        mesh: Smoothed input Meshes

    [1] Curve and Surface Smoothing without Shrinkage,
        Gabriel Taubin, ICCV 1997
    """
    verts = meshes.verts_packed()  # V x 3
    edges = meshes.edges_packed()  # E x 3

    for _ in range(num_iter):
        L = norm_laplacian(verts, edges)
        total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
        verts = (1 - lambd) * verts + lambd * torch.mm(L, verts) / total_weight

        # pyre-ignore
        L = norm_laplacian(verts, edges)
        total_weight = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
        verts = (1 - mu) * verts + mu * torch.mm(L, verts) / total_weight

    verts_list = struct_utils.packed_to_list(
        verts,
        meshes.num_verts_per_mesh().tolist())
    mesh = Meshes(verts=list(verts_list), faces=meshes.faces_list())
    return mesh
    def test_norm_laplacian(self):
        V = 32
        F = 64
        device = get_random_cuda_device()
        # random vertices
        verts = torch.rand((V, 3), dtype=torch.float32, device=device)
        # random valid faces (no self circles, e.g. (v0, v0, v1))
        faces = torch.stack([torch.randperm(V) for f in range(F)],
                            dim=0)[:, :3]
        faces = faces.to(device=device)
        mesh = Meshes(verts=[verts], faces=[faces])
        edges = mesh.edges_packed()

        eps = 1e-12

        L = norm_laplacian(verts, edges, eps=eps)

        Lnaive = torch.zeros((V, V), dtype=torch.float32, device=device)
        for f in range(F):
            f0, f1, f2 = faces[f]
            v0 = verts[f0]
            v1 = verts[f1]
            v2 = verts[f2]

            w12 = 1.0 / ((v1 - v2).norm() + eps)
            w02 = 1.0 / ((v0 - v2).norm() + eps)
            w01 = 1.0 / ((v0 - v1).norm() + eps)

            Lnaive[f0, f1] = w01
            Lnaive[f1, f0] = w01
            Lnaive[f0, f2] = w02
            Lnaive[f2, f0] = w02
            Lnaive[f1, f2] = w12
            Lnaive[f2, f1] = w12

        self.assertClose(L.to_dense(), Lnaive)
Exemple #4
0
def point_mesh_edge_distance(meshes: Meshes, pcls: Pointclouds):
    """
    Computes the distance between a pointcloud and a mesh within a batch.
    Given a pair `(mesh, pcl)` in the batch, we define the distance to be the
    sum of two distances, namely `point_edge(mesh, pcl) + edge_point(mesh, pcl)`

    `point_edge(mesh, pcl)`: Computes the squared distance of each point p in pcl
        to the closest edge segment in mesh and averages across all points in pcl
    `edge_point(mesh, pcl)`: Computes the squared distance of each edge segment in mesh
        to the closest point in pcl and averages across all edges in mesh.

    The above distance functions are applied for all `(mesh, pcl)` pairs in the batch
    and then averaged across the batch.

    Args:
        meshes: A Meshes data structure containing N meshes
        pcls: A Pointclouds data structure containing N pointclouds

    Returns:
        loss: The `point_edge(mesh, pcl) + edge_point(mesh, pcl)` distance
            between all `(mesh, pcl)` in a batch averaged across the batch.
    """
    if len(meshes) != len(pcls):
        raise ValueError("meshes and pointclouds must be equal sized batches")
    N = len(meshes)

    # packed representation for pointclouds
    points = pcls.points_packed()  # (P, 3)
    points_first_idx = pcls.cloud_to_packed_first_idx()
    max_points = pcls.num_points_per_cloud().max().item()

    # packed representation for edges
    verts_packed = meshes.verts_packed()
    edges_packed = meshes.edges_packed()
    segms = verts_packed[edges_packed]  # (S, 2, 3)
    segms_first_idx = meshes.mesh_to_edges_packed_first_idx()
    max_segms = meshes.num_edges_per_mesh().max().item()

    # point to edge distance: shape (P,)
    point_to_edge = point_edge_distance(points, points_first_idx, segms,
                                        segms_first_idx, max_points)

    # weight each example by the inverse of number of points in the example
    point_to_cloud_idx = pcls.packed_to_cloud_idx()  # (sum(P_i), )
    num_points_per_cloud = pcls.num_points_per_cloud()  # (N,)
    weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
    weights_p = 1.0 / weights_p.float()
    point_to_edge = point_to_edge * weights_p
    point_dist = point_to_edge.sum() / N

    # edge to edge distance: shape (S,)
    edge_to_point = edge_point_distance(points, points_first_idx, segms,
                                        segms_first_idx, max_segms)

    # weight each example by the inverse of number of edges in the example
    segm_to_mesh_idx = meshes.edges_packed_to_mesh_idx()  # (sum(S_n),)
    num_segms_per_mesh = meshes.num_edges_per_mesh()  # (N,)
    weights_s = num_segms_per_mesh.gather(0, segm_to_mesh_idx)
    weights_s = 1.0 / weights_s.float()
    edge_to_point = edge_to_point * weights_s
    edge_dist = edge_to_point.sum() / N

    return point_dist + edge_dist