Exemplo n.º 1
0
def clusterize(α, x, scale=None, labels=None):
    """
    Performs a simple 'voxelgrid' clustering on the input measure,
    putting points into cubic bins of size 'scale' = σ_c.
    The weights are summed, and the centroid position is that of the bin's center of mass.
    Most importantly, the "fine" lists of weights and points are *sorted*
    so that clusters are *contiguous in memory*: this allows us to perform
    kernel truncation efficiently on the GPU.

    If 
        [α_c, α], [x_c, x], [x_ranges] = clusterize(α, x, σ_c),
    then
        α_c[k], x_c[k] correspond to
        α[x_ranges[k,0]:x_ranges[k,1]], x[x_ranges[k,0]:x_ranges[k,1],:]
    """
    if labels is None and scale is None:  # No clustering, single-scale Sinkhorn on the way...
        return [α], [x], []

    else:  # As of today, only two-scale Sinkhorn is implemented:
        # Compute simple (voxel-like) class labels:
        x_lab = grid_cluster(x, scale) if labels is None else labels
        # Compute centroids and weights:
        ranges_x, x_c, α_c = cluster_ranges_centroids(x, x_lab, weights=α)
        # Make clusters contiguous in memory:
        (α, x), x_labels = sort_clusters((α, x), x_lab)

        return [α_c, α], [x_c, x], [ranges_x]
Exemplo n.º 2
0
    def _Gauss_block_sparse_pre(self, x: torch.tensor, y: torch.tensor, K_ij: LazyTensor):
        '''
        Helper function to preprocess data for block-sparse reduction
        of the Gaussian kernel

        Args:
            x[np.array], y[np.array] = arrays giving rise to Gaussian kernel K(x,y)
            K_ij[LazyTensor_n] = symbolic representation of K(x,y)
            eps[float] = size for square bins
        Returns:
            K_ij[LazyTensor_n] = symbolic representation of K(x,y) with
                                set sparse ranges
        '''
        # labels for low dimensions

        if x.shape[1] < 4 or y.shape[1] < 4:

            x_labels = grid_cluster(x, self.eps)
            y_labels = grid_cluster(y, self.eps)
            # range and centroid per class
            x_ranges, x_centroids, _ = cluster_ranges_centroids(x, x_labels)
            y_ranges, y_centroids, _ = cluster_ranges_centroids(y, y_labels)
        else:
            # labels for higher dimensions

            x_labels, x_centroids = self._KMeans(x)
            y_labels, y_centroids = self._KMeans(y)
            # compute ranges
            x_ranges = cluster_ranges(x_labels)
            y_ranges = cluster_ranges(y_labels)

        # sort points
        x, x_labels = sort_clusters(x, x_labels)
        y, y_labels = sort_clusters(y, y_labels)
        # Compute a coarse Boolean mask:
        D = torch.sum((x_centroids[:, None, :] - y_centroids[None, :, :]) ** 2, 2)
        keep = D < (self.mask_radius) ** 2
        # mask -> set of integer tensors
        ranges_ij = from_matrix(x_ranges, y_ranges, keep)
        K_ij.ranges = ranges_ij  # block-sparsity pattern

        return K_ij
def subsample(x, batch=None, scale=1.0):
    """Subsamples the point cloud using a grid (cubic) clustering scheme.

    The function returns one average sample per cell, as described in Fig. 3.e)
    of the paper.

    Args:
        x (Tensor): (N,3) point cloud.
        batch (integer Tensor, optional): (N,) batch vector, as in PyTorch_geometric.
            Defaults to None.
        scale (float, optional): side length of the cubic grid cells. Defaults to 1 (Angstrom).

    Returns:
        (M,3): sub-sampled point cloud, with M <= N.
    """

    if batch is None:  # Single protein case:
        if True:  # Use a fast scatter_add_ implementation
            labels = grid_cluster(x, scale).long()
            C = labels.max() + 1

            # We append a "1" to the input vectors, in order to
            # compute both the numerator and denominator of the "average"
            #  fraction in one pass through the data.
            x_1 = torch.cat((x, torch.ones_like(x[:, :1])), dim=1)
            D = x_1.shape[1]
            points = torch.zeros_like(x_1[:C])
            points.scatter_add_(0, labels[:, None].repeat(1, D), x_1)
            return (points[:, :-1] / points[:, -1:]).contiguous()

        else:  # Older implementation;
            points = scatter(points * weights[:, None], labels, dim=0)
            weights = scatter(weights, labels, dim=0)
            points = points / weights[:, None]

    else:  # We process proteins using a for loop.
        # This is probably sub-optimal, but I don't really know
        # how to do more elegantly (this type of computation is
        # not super well supported by PyTorch).
        batch_size = torch.max(batch).item() + 1  # Typically, =32
        points, batches = [], []
        for b in range(batch_size):
            p = subsample(x[batch == b], scale=scale)
            points.append(p)
            batches.append(b * torch.ones_like(batch[:len(p)]))

    return torch.cat(points, dim=0), torch.cat(batches, dim=0)
Exemplo n.º 4
0
def clusterize(α, x, scale=None, labels=None):
    """
    Performs a simple 'voxelgrid' clustering on the input measure,
    putting points into cubic bins of size 'scale' = σ_c.
    The weights are summed, and the centroid position is that of the bin's center of mass.
    Most importantly, the "fine" lists of weights and points are *sorted*
    so that clusters are *contiguous in memory*: this allows us to perform
    kernel truncation efficiently on the GPU.

    If
        [α_c, α], [x_c, x], [x_ranges] = clusterize(α, x, σ_c),
    then
        α_c[k], x_c[k] correspond to
        α[x_ranges[k,0]:x_ranges[k,1]], x[x_ranges[k,0]:x_ranges[k,1],:]
    """
    perm = None  # did we sort the point cloud at some point? Here's the permutation.

    if (labels is None and scale is
            None):  # No clustering, single-scale Sinkhorn on the way...
        return [α], [x], []

    else:  # As of today, only two-scale Sinkhorn is implemented:
        # Compute simple (voxel-like) class labels:
        x_lab = grid_cluster(x, scale) if labels is None else labels
        # Compute centroids and weights:
        ranges_x, x_c, α_c = cluster_ranges_centroids(x, x_lab, weights=α)
        # Make clusters contiguous in memory:
        x_labels, perm = torch.sort(x_lab.view(-1))
        α, x = α[perm], x[perm]

        # N.B.: the lines above were return to replace a call to
        #       'sort_clusters' which does not return the permutation,
        #       an information that is needed to de-permute the dual potentials
        #       if they are required by the user.
        # (α, x), x_labels = sort_clusters( (α,x), x_lab)

        return [α_c, α], [x_c, x], [ranges_x], perm
Exemplo n.º 5
0
# nor too **many** (the :func:`from_matrix() <pykeops.torch.cluster.from_matrix>`
# pre-processor can become a bottleneck when working with >2,000 clusters
# per point cloud).
#
# In this tutorial, we use the :func:`grid_cluster() <pykeops.torch.cluster.grid_cluster>`
# routine which simply groups points into **cubic bins** of arbitrary size:

from pykeops.torch.cluster import grid_cluster

eps = 0.05  # Size of our square bins

if use_cuda:
    torch.cuda.synchronize()
Start = time.time()
start = time.time()
x_labels = grid_cluster(x, eps)  # class labels
y_labels = grid_cluster(y, eps)  # class labels
if use_cuda:
    torch.cuda.synchronize()
end = time.time()
print("Perform clustering       : {:.4f}s".format(end - start))

###############################################
# Once (integer) cluster labels have been computed,
# we can compute the **centroids** and **memory footprint** of each class:

from pykeops.torch.cluster import cluster_ranges_centroids

# Compute one range and centroid per class:
start = time.time()
x_ranges, x_centroids, _ = cluster_ranges_centroids(x, x_labels)
Exemplo n.º 6
0
def kernel_multiscale(α,
                      x,
                      β,
                      y,
                      blur=.05,
                      kernel=None,
                      name=None,
                      truncate=5,
                      diameter=None,
                      cluster_scale=None,
                      verbose=False,
                      **kwargs):

    if truncate is None or name == "energy":
        return kernel_online(α,
                             x,
                             β,
                             y,
                             blur=blur,
                             kernel=kernel,
                             truncate=truncate,
                             name=name,
                             **kwargs)

    # Renormalize our point cloud so that blur = 1:
    kernel, x, y = kernel_preprocess(kernel, name, x, y, blur)

    # Don't forget to normalize the diameter too!
    if cluster_scale is None:
        D = x.shape[-1]
        if diameter is None:
            diameter = max_diameter(x.view(-1, D), y.view(-1, D))
        else:
            diameter = diameter / blur
        cluster_scale = diameter / (np.sqrt(D) * 2000**(1 / D))

    # Put our points in cubic clusters:
    cell_diameter = cluster_scale * np.sqrt(x.shape[1])
    x_lab = grid_cluster(x, cluster_scale)
    y_lab = grid_cluster(y, cluster_scale)

    # Compute the ranges and centroids of each cluster:
    ranges_x, x_c, α_c = cluster_ranges_centroids(x, x_lab, weights=α)
    ranges_y, y_c, β_c = cluster_ranges_centroids(y, y_lab, weights=β)

    if verbose:
        print("{}x{} clusters, computed at scale = {:2.3f}".format(
            len(x_c), len(y_c), cluster_scale))

    # Sort the clusters, making them contiguous in memory:
    (α, x), x_lab = sort_clusters((α, x), x_lab)
    (β, y), y_lab = sort_clusters((β, y), y_lab)

    with torch.no_grad():  # Compute our block-sparse reduction ranges:
        # Compute pairwise distances between clusters:
        C_xx = squared_distances(x_c, x_c)
        C_yy = squared_distances(y_c, y_c)
        C_xy = squared_distances(x_c, y_c)

        # Compute the boolean masks:
        keep_xx = (C_xx <= (truncate + cell_diameter)**2)
        keep_yy = (C_yy <= (truncate + cell_diameter)**2)
        keep_xy = (C_xy <= (truncate + cell_diameter)**2)

        # Compute the KeOps reduction ranges:
        ranges_xx = from_matrix(ranges_x, ranges_x, keep_xx)
        ranges_yy = from_matrix(ranges_y, ranges_y, keep_yy)
        ranges_xy = from_matrix(ranges_x, ranges_y, keep_xy)

    return kernel_keops(kernel,
                        α,
                        x,
                        β,
                        y,
                        ranges_xx=ranges_xx,
                        ranges_yy=ranges_yy,
                        ranges_xy=ranges_xy)
Exemplo n.º 7
0
    # And require grad:
    a_i.requires_grad = True
    x_i.requires_grad = True
    b_j.requires_grad = True

    # Compute the loss + gradients:
    Loss_xy = Loss(a_i, x_i, b_j, y_j)
    [F_i, G_j, dx_i] = grad(Loss_xy, [a_i, b_j, x_i])

    #  The generalized "Brenier map" is (minus) the gradient of the Sinkhorn loss
    # with respect to the Wasserstein metric:
    BrenierMap = -dx_i / (a_i.view(-1, 1) + 1e-7)

    # Compute the coarse measures for display ----------------------------------

    x_lab = grid_cluster(x_i, cluster_scale)
    _, x_c, a_c = cluster_ranges_centroids(x_i, x_lab, weights=a_i)

    y_lab = grid_cluster(y_j, cluster_scale)
    _, y_c, b_c = cluster_ranges_centroids(y_j, y_lab, weights=b_j)

    # Fancy display: -----------------------------------------------------------

    ax = plt.subplot(((Nits - 1) // 3 + 1), 3, i + 1)
    ax.scatter([10], [10])  # shameless hack to prevent a slight change of axis...

    display_potential(ax, G_j, "#E2C5C5")
    display_potential(ax, F_i, "#C8DFF9")

    if blur > cluster_scale:
        display_samples(ax, y_j, b_j, [(0.55, 0.55, 0.95, 0.2)])
Exemplo n.º 8
0
def kernel_multiscale(α,
                      x,
                      β,
                      y,
                      blur=0.05,
                      kernel=None,
                      name=None,
                      truncate=5,
                      diameter=None,
                      cluster_scale=None,
                      potentials=False,
                      verbose=False,
                      **kwargs):

    if truncate is None or name == "energy":
        return kernel_online(α.unsqueeze(0),
                             x.unsqueeze(0),
                             β.unsqueeze(0),
                             y.unsqueeze(0),
                             blur=blur,
                             kernel=kernel,
                             truncate=truncate,
                             name=name,
                             potentials=potentials,
                             **kwargs)

    # Renormalize our point cloud so that blur = 1:
    # Center the point clouds just in case, to prevent numeric overflows:
    center = (x.mean(-2, keepdim=True) + y.mean(-2, keepdim=True)) / 2
    x, y = x - center, y - center
    x_ = x / blur
    y_ = y / blur

    # Don't forget to normalize the diameter too!
    if cluster_scale is None:
        D = x.shape[-1]
        if diameter is None:
            diameter = max_diameter(x_.view(-1, D), y_.view(-1, D))
        else:
            diameter = diameter / blur
        cluster_scale = diameter / (np.sqrt(D) * 2000**(1 / D))

    # Put our points in cubic clusters:
    cell_diameter = cluster_scale * np.sqrt(x_.shape[-1])
    x_lab = grid_cluster(x_, cluster_scale)
    y_lab = grid_cluster(y_, cluster_scale)

    # Compute the ranges and centroids of each cluster:
    ranges_x, x_c, α_c = cluster_ranges_centroids(x_, x_lab, weights=α)
    ranges_y, y_c, β_c = cluster_ranges_centroids(y_, y_lab, weights=β)

    if verbose:
        print("{}x{} clusters, computed at scale = {:2.3f}".format(
            len(x_c), len(y_c), cluster_scale))

    # Sort the clusters, making them contiguous in memory:
    (α, x), x_lab = sort_clusters((α, x), x_lab)
    (β, y), y_lab = sort_clusters((β, y), y_lab)

    with torch.no_grad():  # Compute our block-sparse reduction ranges:
        # Compute pairwise distances between clusters:
        C_xx = squared_distances(x_c, x_c)
        C_yy = squared_distances(y_c, y_c)
        C_xy = squared_distances(x_c, y_c)

        # Compute the boolean masks:
        keep_xx = C_xx <= (truncate + cell_diameter)**2
        keep_yy = C_yy <= (truncate + cell_diameter)**2
        keep_xy = C_xy <= (truncate + cell_diameter)**2

        # Compute the KeOps reduction ranges:
        ranges_xx = from_matrix(ranges_x, ranges_x, keep_xx)
        ranges_yy = from_matrix(ranges_y, ranges_y, keep_yy)
        ranges_xy = from_matrix(ranges_x, ranges_y, keep_xy)

    return kernel_loss(
        α,
        x,
        β,
        y,
        blur=blur,
        kernel=kernel,
        name=name,
        potentials=potentials,
        use_keops=True,
        ranges_xx=ranges_xx,
        ranges_yy=ranges_yy,
        ranges_xy=ranges_xy,
    )