Beispiel #1
0
def weighted_medoid(A: torch.sparse.FloatTensor, x: torch.Tensor,
                    **kwargs) -> torch.Tensor:
    """A weighted Medoid aggregation.

    Parameters
    ----------
    A : torch.sparse.FloatTensor
        Sparse [n, n] tensor of the weighted/normalized adjacency matrix.
    x : torch.Tensor
        Dense [n, d] tensor containing the node attributes/embeddings.

    Returns
    -------
    torch.Tensor
        The new embeddings [n, d].
    """
    N, D = x.shape
    l2 = _distance_matrix(x)
    A_cpu_dense = A.cpu()
    l2_cpu = l2.cpu()
    if A.is_sparse:
        A_cpu_dense = A_cpu_dense.to_dense()
    distances = A_cpu_dense[:, None, :].expand(N, N, N) * l2_cpu
    distances[A_cpu_dense == 0] = torch.finfo(distances.dtype).max
    distances = distances.sum(-1).to(x.device)
    distances[~torch.isfinite(distances)] = torch.finfo(distances.dtype).max
    row_sum = A_cpu_dense.sum(-1)[:, None].to(x.device)
    return row_sum * x[distances.argmin(-1)]
Beispiel #2
0
def soft_weighted_medoid(A: torch.sparse.FloatTensor,
                         x: torch.Tensor,
                         temperature: float = 1.0,
                         **kwargs) -> torch.Tensor:
    """A weighted Medoid aggregation.

    Parameters
    ----------
    A : torch.sparse.FloatTensor
        Sparse [n, n] tensor of the weighted/normalized adjacency matrix.
    x : torch.Tensor
        Dense [n, d] tensor containing the node attributes/embeddings.
    temperature : float, optional
        Temperature for the argmin approximation by softmax, by default 1.0

    Returns
    -------
    torch.Tensor
        The new embeddings [n, d].
    """
    N, D = x.shape
    l2 = _distance_matrix(x)
    A_cpu_dense = A.cpu()
    l2_cpu = l2.cpu()
    if A.is_sparse:
        A_cpu_dense = A_cpu_dense.to_dense()
    distances = A_cpu_dense[:, None, :].expand(N, N, N) * l2_cpu
    distances[A_cpu_dense == 0] = torch.finfo(distances.dtype).max
    distances = distances.sum(-1).to(x.device)
    distances[~torch.isfinite(distances)] = torch.finfo(distances.dtype).max
    row_sum = A_cpu_dense.sum(-1)[:, None].to(x.device)
    return row_sum * (F.softmax(-distances / temperature, dim=-1) @ x)
Beispiel #3
0
def weighted_dimwise_median_cpu(A: torch.sparse.FloatTensor, x: torch.Tensor,
                                **kwargs) -> torch.Tensor:
    """A weighted dimension-wise Median aggregation (cpu implementation).

    Parameters
    ----------
    A : torch.sparse.FloatTensor
        Sparse [n, n] tensor of the weighted/normalized adjacency matrix.
    x : torch.Tensor
        Dense [n, d] tensor containing the node attributes/embeddings.

    Returns
    -------
    torch.Tensor
        The new embeddings [n, d].
    """
    N, D = x.shape
    x_sorted, index_x = torch.sort(x, dim=0)
    matrix_index_for_each_node = torch.arange(
        N, dtype=torch.long)[:, None, None].expand(N, N, D)
    A_cpu_dense = A.cpu()
    if A.is_sparse:
        A_cpu_dense = A_cpu_dense.to_dense()
    cum_sorted_weights = A_cpu_dense[matrix_index_for_each_node,
                                     index_x].cumsum(1)
    weight_sum_per_node = cum_sorted_weights.max(1)[0]
    median_element = (cum_sorted_weights <
                      (weight_sum_per_node / 2)[:, None].expand(
                          N, N, D)).sum(1).to(A.device)

    matrix_reverse_index = torch.arange(D, dtype=torch.long)[None, :].expand(
        N, D).to(A.device)
    x_selected = x[index_x[median_element, matrix_reverse_index],
                   matrix_reverse_index]
    return weight_sum_per_node.to(A.device) * x_selected