def _reduce(x: torch.sparse.FloatTensor):
    # dispatch table cannot distinguish between torch.sparse.FloatTensor and torch.Tensor
    if isinstance(x, torch.sparse.FloatTensor) or isinstance(
            x, torch.sparse.LongTensor):
        int_type = _get_int_type(torch.max(x._indices()).item())
        return _sparse_tensor_constructor, (x._indices().to(int_type),
                                            x._values(), x.size())
    else:
        return torch.Tensor.__reduce_ex__(
            x, pickle.HIGHEST_PROTOCOL)  # use your own protocol
Beispiel #2
0
def weighted_dimwise_median(A: torch.sparse.FloatTensor, x: torch.Tensor,
                            **kwargs) -> torch.Tensor:
    """A weighted dimension-wise Median aggregation.

    Parameters
    ----------
    A : torch.sparse.FloatTensor
        Sparse [n, n] tensor of the weighted/normalized adjacency matrix
    x : torch.Tensor
        Dense [n, d] tensor containing the node attributes/embeddings

    Returns
    -------
    torch.Tensor
        The new embeddings [n, d]
    """
    if not A.is_cuda:
        return weighted_dimwise_median_cpu(A, x, **kwargs)

    assert A.is_sparse
    N, D = x.shape

    median_idx = custom_cuda_kernels.dimmedian_idx(x, A)
    col_idx = torch.arange(D, device=A.device).view(1, -1).expand(N, D)
    x_selected = x[median_idx, col_idx]

    a_row_sum = torch_scatter.scatter_sum(A._values(), A._indices()[0],
                                          dim=-1).view(-1, 1).expand(N, D)
    return a_row_sum * x_selected
Beispiel #3
0
def expand_adjacency_tensor(
        adj: torch.sparse.FloatTensor) -> torch.sparse.FloatTensor:
    row, col = adj._indices()
    num_interactions = len(row) * 2

    row_new = torch.cat([
        torch.arange(0,
                     num_interactions,
                     2,
                     dtype=torch.long,
                     device=settings.device),
        torch.arange(1,
                     max(1, num_interactions),
                     2,
                     dtype=torch.long,
                     device=settings.device),
    ])
    col_new = torch.cat([row, col])
    data_new = torch.ones(num_interactions,
                          dtype=torch.float,
                          device=settings.device)
    size_new = (num_interactions, adj.shape[0]
                )  # number of interactions x sequence length
    adj_new = torch.sparse_coo_tensor(torch.stack([row_new, col_new]),
                                      data_new,
                                      size=size_new)
    return adj_new
Beispiel #4
0
def add_eye_sparse_tensor(x: torch.sparse.FloatTensor,
                          bandwidth: int) -> torch.sparse.FloatTensor:
    """Not used!"""
    if not bandwidth:
        return x
    indices_list = [x._indices()]
    values_list = [x._values()]
    # Add diagonal
    indices_list.append(
        torch.arange(0, x.shape[1], dtype=torch.long).repeat(2, 1))
    values_list.append(torch.ones(x.shape[1], dtype=torch.float))
    # Add off-diagonals
    for k in range(1, bandwidth, 1):
        # Indices
        indices_upper = torch.stack([
            torch.arange(x.shape[1] - k, dtype=torch.long),
            k + torch.arange(x.shape[1] - k, dtype=torch.long),
        ])
        indices_lower = torch.stack([
            k + torch.arange(x.shape[1] - k, dtype=torch.long),
            torch.arange(x.shape[1] - k, dtype=torch.long),
        ])
        indices_list.extend([indices_upper, indices_lower])
        # Values
        values_upper = torch.ones(x.shape[1] - k, dtype=torch.float)
        values_lower = torch.ones(x.shape[1] - k, dtype=torch.float)
        values_list.extend([values_upper, values_lower])
    out = torch.sparse_coo_tensor(torch.cat(indices_list),
                                  torch.cat(values_list),
                                  size=x.size)
    return out
Beispiel #5
0
 def _sparse_masked_select_abs(self,
                               sparse_tensor: torch.sparse.FloatTensor,
                               thr):
     indices = sparse_tensor._indices()
     values = sparse_tensor._values()
     prune_mask = torch.abs(values) >= thr
     return torch.sparse_coo_tensor(
         indices=indices.masked_select(prune_mask).reshape(2, -1),
         values=values.masked_select(prune_mask),
         size=[self.n_outputs, self.n_inputs]).coalesce()
Beispiel #6
0
def _sparse_top_k(A: torch.sparse.FloatTensor,
                  k: int,
                  return_sparse: bool = True):
    n = A.shape[0]

    if A.is_cuda:
        topk_values, topk_idx = custom_cuda_kernels.topk(
            A._indices(), A._values(), n, k)
        if not return_sparse:
            return topk_values, topk_idx.long()

        mask = topk_idx != -1
        row_idx = torch.arange(n, device=A.device).view(-1, 1).expand(n, k)
        return torch.sparse.FloatTensor(
            torch.stack((row_idx[mask], topk_idx[mask].long())),
            topk_values[mask])

    n_edges_per_row = torch_scatter.scatter_sum(torch.ones_like(A._values()),
                                                A._indices()[0],
                                                dim=0)
    k_per_row = torch.clamp(n_edges_per_row, max=k).long()

    new_idx, value_idx, unroll_idx = _select_k_idx_cpu(
        A.indices()[0].cpu().numpy(),
        A.indices()[1].cpu().numpy(),
        A._values().cpu().numpy(),
        k_per_row.cpu().numpy(),
        n,
        method='top')

    new_idx = torch.from_numpy(np.hstack(new_idx)).to(A.device)
    value_idx = torch.from_numpy(np.hstack(value_idx)).to(A.device)

    if return_sparse:
        return torch.sparse.FloatTensor(new_idx, A._values()[value_idx])
    else:
        unroll_idx = np.hstack(unroll_idx)
        values = torch.zeros((n, k), device=A.device)
        indices = -torch.ones((n, k), device=A.device, dtype=torch.long)
        values[new_idx[0], unroll_idx] = A._values()[value_idx]
        indices[new_idx[0], unroll_idx] = new_idx[1]
        return values, indices
Beispiel #7
0
def remove_eye_sparse_tensor(x: torch.sparse.FloatTensor,
                             bandwidth: int) -> torch.sparse.FloatTensor:
    """Set diagonal (and offdiagonal) elements to zero.

    Args:
        x: Input array.
        bandwidth: Width of the diagonal 0 band.
    """
    if not bandwidth:
        return x
    indices = x._indices()
    values = x._values()
    keep_mask = (indices[0, :] - indices[1, :]).abs() > bandwidth
    out = torch.sparse_coo_tensor(indices[:, keep_mask], values[keep_mask])
    return out