Beispiel #1
0
    def nlog_density(self, target, source, log_weights, scales, probas=None):
        """Negative log-likelihood of the proposal generated by the source onto the target."""

        x_i = LazyTensor(target[:, None, :])  # (N,1,D)
        y_j = LazyTensor(self.means[None, :, :])  # (1,M,D)
        s_j = self.covariances_inv  # (M, D, D)
        s_j = LazyTensor(s_j.view(s_j.shape[0], -1)[None, :, :])  # (1, M, D*D)
        D_ij = (x_i - y_j) | s_j.matvecmult(x_i - y_j)  # (N,M,1)

        det_j = LazyTensor(self.log_det_cov_half[None, :, None])

        logK_ij = (-D_ij / 2 - (self.D / 2) * float(np.log(2 * np.pi)) - det_j)

        log_weights = self.weights.log()
        logW_j = LazyTensor(log_weights[None, :, None])
        logK_ij = logK_ij + logW_j

        logdensities_i = logK_ij.logsumexp(dim=1).reshape(-1)  # (N,)
        return -logdensities_i
Beispiel #2
0
    def nlog_density(self, target, source, log_weights, scales, probas=None):
        """Negative log-likelihood of the proposal generated by the source onto the target."""

        if self.adaptive:

            x_i = LazyTensor(target[:, None, :])  # (N,1,D)
            y_j = LazyTensor(source[None, :, :])  # (1,M,D)
            s_j = self.covariances_inv  # (M, D, D)
            s_j = LazyTensor(s_j.view(s_j.shape[0],
                                      -1)[None, :, :])  # (1, M, D*D)
            D_ij = (x_i - y_j) | s_j.matvecmult(x_i - y_j)  # (N,M,1)

            det_j = LazyTensor(self.log_det_cov_half[None, :, None])

            logK_ij = (-D_ij / 2 - (self.D / 2) * float(np.log(2 * np.pi)) -
                       det_j)

        else:
            D_ij = squared_distances(target, source)
            logK_ij = (-D_ij / (2 * scales**2) -
                       (self.D / 2) * float(np.log(2 * np.pi)) -
                       self.D * scales.log())

        if log_weights is None:
            logK_ij = logK_ij - float(np.log(len(source)))
        else:
            logW_j = LazyTensor(log_weights[None, :, None])
            logK_ij = logK_ij + logW_j

        logdensities_i = logK_ij.logsumexp(dim=1).view(-1)  # (N,)

        if probas is None:
            return -logdensities_i
        else:
            return -(logdensities_i.view(-1, len(probas)) +
                     probas.log()[None, :]).logsumexp(dim=1).view(-1)
    def load_mesh(self,
                  xyz,
                  triangles=None,
                  normals=None,
                  weights=None,
                  batch=None):
        """Loads the geometry of a triangle mesh.

        Input arguments:
        - xyz, a point cloud encoded as an (N, 3) Tensor.
        - triangles, a connectivity matrix encoded as an (N, 3) integer tensor.
        - weights, importance weights for the orientation estimation, encoded as an (N, 1) Tensor.
        - radius, the scale used to estimate the local normals.
        - a batch vector, following PyTorch_Geometric's conventions.

        The routine updates the model attributes:
        - points, i.e. the point cloud itself,
        - nuv, a local oriented basis in R^3 for every point,
        - ranges, custom KeOps syntax to implement batch processing.
        """

        # 1. Save the vertices for later use in the convolutions ---------------
        self.points = xyz
        self.batch = batch
        self.ranges = diagonal_ranges(
            batch)  # KeOps support for heterogeneous batch processing
        self.triangles = triangles
        self.normals = normals
        self.weights = weights

        # 2. Estimate the normals and tangent frame ----------------------------
        # Normalize the scale:
        points = xyz / self.radius

        # Normals and local areas:
        if normals is None:
            normals, areas = mesh_normals_areas(points, triangles, 0.5, batch)
        tangent_bases = tangent_vectors(normals)  # Tangent basis (N, 2, 3)

        # 3. Steer the tangent bases according to the gradient of "weights" ----

        # 3.a) Encoding as KeOps LazyTensors:
        # Orientation scores:
        weights_j = LazyTensor(weights.view(1, -1, 1))  # (1, N, 1)
        # Vertices:
        x_i = LazyTensor(points[:, None, :])  # (N, 1, 3)
        x_j = LazyTensor(points[None, :, :])  # (1, N, 3)
        # Normals:
        n_i = LazyTensor(normals[:, None, :])  # (N, 1, 3)
        n_j = LazyTensor(normals[None, :, :])  # (1, N, 3)
        # Tangent basis:
        uv_i = LazyTensor(tangent_bases.view(-1, 1, 6))  # (N, 1, 6)

        # 3.b) Pseudo-geodesic window:
        # Pseudo-geodesic squared distance:
        rho2_ij = ((x_j - x_i)**2).sum(-1) * (
            (2 - (n_i | n_j))**2)  # (N, N, 1)
        # Gaussian window:
        window_ij = (-rho2_ij).exp()  # (N, N, 1)

        # 3.c) Coordinates in the (u, v) basis - not oriented yet:
        X_ij = uv_i.matvecmult(x_j - x_i)  # (N, N, 2)

        # 3.d) Local average in the tangent plane:
        orientation_weight_ij = window_ij * weights_j  # (N, N, 1)
        orientation_vector_ij = orientation_weight_ij * X_ij  # (N, N, 2)

        # Support for heterogeneous batch processing:
        orientation_vector_ij.ranges = self.ranges  # Block-diagonal sparsity mask

        orientation_vector_i = orientation_vector_ij.sum(dim=1)  # (N, 2)
        orientation_vector_i = (orientation_vector_i + 1e-5
                                )  # Just in case someone's alone...

        # 3.e) Normalize stuff:
        orientation_vector_i = F.normalize(orientation_vector_i, p=2,
                                           dim=-1)  #  (N, 2)
        ex_i, ey_i = (
            orientation_vector_i[:, 0][:, None],
            orientation_vector_i[:, 1][:, None],
        )  # (N,1)

        # 3.f) Re-orient the (u,v) basis:
        uv_i = tangent_bases  # (N, 2, 3)
        u_i, v_i = uv_i[:, 0, :], uv_i[:, 1, :]  # (N, 3)
        tangent_bases = torch.cat(
            (ex_i * u_i + ey_i * v_i, -ey_i * u_i + ex_i * v_i),
            dim=1).contiguous()  # (N, 6)

        # 4. Store the local 3D frame as an attribute --------------------------
        self.nuv = torch.cat(
            (normals.view(-1, 1, 3), tangent_bases.view(-1, 2, 3)), dim=1)
    def forward(self, points, nuv, features, ranges=None):
        """Performs a quasi-geodesic interaction step.

        points, local basis, in features  ->  out features
        (N, 3),   (N, 3, 3),    (N, I)    ->    (N, O)

        This layer computes the interaction step of Eq. (7) in the paper,
        in-between the application of two MLP networks independently on all
        feature vectors.

        Args:
            points (Tensor): (N,3) point coordinates `x_i`.
            nuv (Tensor): (N,3,3) local coordinate systems `[n_i,u_i,v_i]`.
            features (Tensor): (N,I) input feature vectors `f_i`.
            ranges (6-uple of integer Tensors, optional): low-level format
                to support batch processing, as described in the KeOps documentation.
                In practice, this will be built by a higher-level object
                to encode the relevant "batch vectors" in a way that is convenient
                for the KeOps CUDA engine. Defaults to None.

        Returns:
            (Tensor): (N,O) output feature vectors `f'_i`.
        """

        # 1. Transform the input features: -------------------------------------
        features = self.net_in(features)  # (N, I) -> (N, H)

        # 2. Compute the local "shape contexts": -------------------------------

        # 2.a Normalize the kernel radius:
        points = points / (sqrt(2.0) * self.Radius)  # (N, 3)

        # 2.b Encode the variables as KeOps LazyTensors

        # Vertices:
        x_i = LazyTensor(points[:, None, :])  # (N, 1, 3)
        x_j = LazyTensor(points[None, :, :])  # (1, N, 3)

        # WARNING - Here, we assume that the normals are fixed:
        normals = (nuv[:, 0, :].contiguous().detach()
                   )  # (N, 3) - remove the .detach() if needed

        # Local bases:
        nuv_i = LazyTensor(nuv.view(-1, 1, 9))  # (N, 1, 9)
        # Normals:
        n_i = nuv_i[:3]  # (N, 1, 3)

        n_j = LazyTensor(normals[None, :, :])  # (1, N, 3)

        # Features:
        f_j = LazyTensor(features[None, :, :])  # (1, N, H)

        # Convolution parameters:
        if self.cheap:
            A, B = self.conv[0].weight, self.conv[0].bias  # (H, 3), (H,)
            AB = torch.cat((A, B[:, None]), dim=1)  # (H, 4)
            ab = LazyTensor(AB.view(1, 1, -1))  # (1, 1, H*4)
        else:
            A_1, B_1 = self.conv[0].weight, self.conv[0].bias  # (C, 3), (C,)
            A_2, B_2 = self.conv[2].weight, self.conv[2].bias  # (H, C), (H,)
            a_1 = LazyTensor(A_1.view(1, 1, -1))  # (1, 1, C*3)
            b_1 = LazyTensor(B_1.view(1, 1, -1))  # (1, 1, C)
            a_2 = LazyTensor(A_2.view(1, 1, -1))  # (1, 1, H*C)
            b_2 = LazyTensor(B_2.view(1, 1, -1))  # (1, 1, H)

        # 2.c Pseudo-geodesic window:
        # Pseudo-geodesic squared distance:
        d2_ij = ((x_j - x_i)**2).sum(-1) * ((2 - (n_i | n_j))**2)  # (N, N, 1)
        # Gaussian window:
        window_ij = (-d2_ij).exp()  # (N, N, 1)

        # 2.d Local MLP:
        # Local coordinates:
        X_ij = nuv_i.matvecmult(x_j -
                                x_i)  # (N, N, 9) "@" (N, N, 3) = (N, N, 3)
        # MLP:
        if self.cheap:
            X_ij = ab.matvecmult(X_ij.concat(
                LazyTensor(1)))  # (N, N, H*4) @ (N, N, 3+1) = (N, N, H)
            X_ij = X_ij.relu()  # (N, N, H)
        else:
            X_ij = a_1.matvecmult(X_ij) + b_1  # (N, N, C)
            X_ij = X_ij.relu()  # (N, N, C)
            X_ij = a_2.matvecmult(X_ij) + b_2  # (N, N, H)
            X_ij = X_ij.relu()

        # 2.e Actual computation:
        F_ij = window_ij * X_ij * f_j  # (N, N, H)
        F_ij.ranges = ranges  # Support for batches and/or block-sparsity

        features = F_ij.sum(dim=1)  # (N, H)

        # 3. Transform the output features: ------------------------------------
        features = self.net_out(features)  # (N, H) -> (N, O)

        return features
def curvatures(vertices,
               triangles=None,
               scales=[1.0],
               batch=None,
               normals=None,
               reg=0.01):
    """Returns a collection of mean (H) and Gauss (K) curvatures at different scales.

    points, faces, scales  ->  (H_1, K_1, ..., H_S, K_S)
    (N, 3), (3, N), (S,)   ->         (N, S*2)

    We rely on a very simple linear regression method, for all vertices:

      1. Estimate normals and surface areas.
      2. Compute a local tangent frame.
      3. In a pseudo-geodesic Gaussian neighborhood at scale s,
         compute the two (2, 2) covariance matrices PPt and PQt
         between the displacement vectors "P = x_i - x_j" and
         the normals "Q = n_i - n_j", projected on the local tangent plane.
      4. Up to the sign, the shape operator S at scale s is then approximated
         as  "S = (reg**2 * I_2 + PPt)^-1 @ PQt".
      5. The mean and Gauss curvatures are the trace and determinant of
         this (2, 2) matrix.

    As of today, this implementation does not weigh points by surface areas:
    this could make a sizeable difference if protein surfaces were not
    sub-sampled to ensure uniform sampling density.

    For convergence analysis, see for instance
    "Efficient curvature estimation for oriented point clouds",
    Cao, Li, Sun, Assadi, Zhang, 2019.

    Args:
        vertices (Tensor): (N,3) coordinates of the points or mesh vertices.
        triangles (integer Tensor, optional): (3,T) mesh connectivity. Defaults to None.
        scales (list of floats, optional): list of (S,) smoothing scales. Defaults to [1.].
        batch (integer Tensor, optional): batch vector, as in PyTorch_geometric. Defaults to None.
        normals (Tensor, optional): (N,3) field of "raw" unit normals. Defaults to None.
        reg (float, optional): small amount of Tikhonov/ridge regularization
            in the estimation of the shape operator. Defaults to .01.

    Returns:
        (Tensor): (N, S*2) tensor of mean and Gauss curvatures computed for
            every point at the required scales.
    """
    # Number of points, number of scales:
    N, S = vertices.shape[0], len(scales)
    ranges = diagonal_ranges(batch)

    # Compute the normals at different scales + vertice areas:
    normals_s, _ = mesh_normals_areas(vertices,
                                      triangles=triangles,
                                      normals=normals,
                                      scale=scales,
                                      batch=batch)  # (N, S, 3), (N,)

    # Local tangent bases:
    uv_s = tangent_vectors(normals_s)  # (N, S, 2, 3)

    features = []

    for s, scale in enumerate(scales):
        # Extract the relevant descriptors at the current scale:
        normals = normals_s[:, s, :].contiguous()  #  (N, 3)
        uv = uv_s[:, s, :, :].contiguous()  # (N, 2, 3)

        # Encode as symbolic tensors:
        # Points:
        x_i = LazyTensor(vertices.view(N, 1, 3))
        x_j = LazyTensor(vertices.view(1, N, 3))
        # Normals:
        n_i = LazyTensor(normals.view(N, 1, 3))
        n_j = LazyTensor(normals.view(1, N, 3))
        # Tangent bases:
        uv_i = LazyTensor(uv.view(N, 1, 6))

        # Pseudo-geodesic squared distance:
        d2_ij = ((x_j - x_i)**2).sum(-1) * ((2 - (n_i | n_j))**2)  # (N, N, 1)
        # Gaussian window:
        window_ij = (-d2_ij / (2 * (scale**2))).exp()  # (N, N, 1)

        # Project on the tangent plane:
        P_ij = uv_i.matvecmult(x_j - x_i)  # (N, N, 2)
        Q_ij = uv_i.matvecmult(n_j - n_i)  # (N, N, 2)
        # Concatenate:
        PQ_ij = P_ij.concat(Q_ij)  # (N, N, 2+2)

        # Covariances, with a scale-dependent weight:
        PPt_PQt_ij = P_ij.tensorprod(PQ_ij)  # (N, N, 2*(2+2))
        PPt_PQt_ij = window_ij * PPt_PQt_ij  #  (N, N, 2*(2+2))

        # Reduction - with batch support:
        PPt_PQt_ij.ranges = ranges
        PPt_PQt = PPt_PQt_ij.sum(1)  # (N, 2*(2+2))

        # Reshape to get the two covariance matrices:
        PPt_PQt = PPt_PQt.view(N, 2, 2, 2)
        PPt, PQt = PPt_PQt[:, :, 0, :], PPt_PQt[:, :,
                                                1, :]  # (N, 2, 2), (N, 2, 2)

        # Add a small ridge regression:
        PPt[:, 0, 0] += reg
        PPt[:, 1, 1] += reg

        # (minus) Shape operator, i.e. the differential of the Gauss map:
        # = (PPt^-1 @ PQt) : simple estimation through linear regression
        S = torch.solve(PQt, PPt).solution
        a, b, c, d = S[:, 0, 0], S[:, 0, 1], S[:, 1, 0], S[:, 1, 1]  # (N,)

        # Normalization
        mean_curvature = a + d
        gauss_curvature = a * d - b * c
        features += [mean_curvature.clamp(-1, 1), gauss_curvature.clamp(-1, 1)]

    features = torch.stack(features, dim=-1)
    return features