Пример #1
0
    def wrapper(actual: Tensor, expected: Tensor,
                **kwargs: Any) -> Optional[_TestingErrorMeta]:
        if not actual.is_sparse:
            return check_tensors(actual, expected, **kwargs)

        if actual._nnz() != expected._nnz():
            return _TestingErrorMeta(
                AssertionError,
                f"The number of specified values does not match: {actual._nnz()} != {expected._nnz()}"
            )

        kwargs_equal = dict(kwargs, rtol=0, atol=0)
        error_meta = check_tensors(actual._indices(), expected._indices(),
                                   **kwargs_equal)
        if error_meta:
            return error_meta.amend_msg(
                postfix="\n\nThe failure occurred for the indices.")

        error_meta = check_tensors(actual._values(), expected._values(),
                                   **kwargs)
        if error_meta:
            return error_meta.amend_msg(
                postfix="\n\nThe failure occurred for the values.")

        return None
def kronecker_torch(t1: Tensor, t2: Tensor) -> Tensor:
    r"""
    Compute the kronecker product of :math:`\mathbf{T}_1` and :math:`\mathbf{T}_2`.
    This function is implemented in torch API and is not efficient for sparse {0, 1} matrix.

    :param t1: input tensor 1
    :param t2: input tensor 2
    :return: kronecker product of :math:`\mathbf{T}_1` and :math:`\mathbf{T}_2`
    """
    batch_num = t1.shape[0]
    t1dim1, t1dim2 = t1.shape[1], t1.shape[2]
    t2dim1, t2dim2 = t2.shape[1], t2.shape[2]
    if t1.is_sparse and t2.is_sparse:
        tt_idx = torch.stack(t1._indices()[0, :] * t2dim1,
                             t1._indices()[1, :] * t2dim2)
        tt_idx = torch.repeat_interleave(
            tt_idx, t2._nnz(), dim=1) + t2._indices().repeat(1, t1._nnz())
        tt_val = torch.repeat_interleave(t1._values(), t2._nnz(),
                                         dim=1) * t2._values().repeat(
                                             1, t1._nnz())
        tt = torch.sparse.FloatTensor(
            tt_idx, tt_val, torch.Size(t1dim1 * t2dim1, t1dim2 * t2dim2))
    else:
        t1 = t1.reshape(batch_num, -1, 1)
        t2 = t2.reshape(batch_num, 1, -1)
        tt = torch.bmm(t1, t2)
        tt = tt.reshape(batch_num, t1dim1, t1dim2, t2dim1, t2dim2)
        tt = tt.permute([0, 1, 3, 2, 4])
        tt = tt.reshape(batch_num, t1dim1 * t2dim1, t1dim2 * t2dim2)
    return tt
Пример #3
0
def is_sparse(x: torch.Tensor) -> bool:
    """
    :param x:
    :return: True if x is sparse tensor else False
    """
    try:
        x._indices()
    except RuntimeError:
        return False
    return True
Пример #4
0
def filter_which(x: Tensor, **kwargs):
    # ind_0 : gt 0
    # val: lt 0.01
    # ind_1  : eq 2
    # ind_1: eq [2,4,56]
    if not x.is_sparse:
        filter_which(dense_to_sparse(x), **kwargs)

    def cmpn(op, tensor, val):
        if not isinstance(val, Iterable):
            return op(tensor, val)
        mask = None
        multiple_op = isinstance(op, Iterable)
        for i, item in enumerate(val):
            curr = op[i](tensor, item) if multiple_op else op(tensor, item)
            mask = curr if mask is None else torch.logical_and(mask, curr)
        return mask

    ind, val = x._indices(), x._values()
    mask = None
    for k, v in kwargs.items():
        cmd = k.split('_')
        op, v = v
        if cmd[0] == 'ind':
            dim = int(cmd[1])
            curr = cmpn(op, ind[dim], v)
        elif cmd[0] == 'val':
            curr = cmpn(op, val, v)
        else:
            continue
        mask = curr if mask is None else torch.logical_and(mask, curr)
    # print('total', mask.numel(), 'total remain', mask.sum())
    return ind2sparse(ind[:, mask], x.size(), values=val[mask])
Пример #5
0
def get_truncated_svd(adjacency_matrix: torch.Tensor, rank: int = 50):
    """Truncated SVD preprocessing as proposed in Negin Entezari, Saba A. Al - Sayouri, Amirali Darvishzadeh, and
    Evangelos E. Papalexakis. All you need is Low(rank):  Defending against adversarial attacks on graphs.

    Attention: the result will not be sparse!

    Parameters
    ----------
    adjacency_matrix : torch.Tensor
        Sparse [n,n] adjacency matrix.
    rank : int, optional
        Rank of the truncated SVD, by default 50.

    Returns
    -------
    torch.Tensor
        Preprocessed adjacency matrix.
    """
    row, col = adjacency_matrix._indices().cpu()
    values = adjacency_matrix._values().cpu()
    N = adjacency_matrix.shape[0]

    low_rank_adj = sp.coo_matrix((values.numpy(), (row.numpy(), col.numpy())), (N, N))
    low_rank_adj = truncatedSVD(low_rank_adj, rank)
    low_rank_adj = torch.from_numpy(low_rank_adj).to(adjacency_matrix.device, adjacency_matrix.dtype)

    return svd_norm_adj(low_rank_adj).to_sparse()
Пример #6
0
def get_jaccard(adjacency_matrix: torch.Tensor, features: torch.Tensor, threshold: int = 0.01):
    """Jaccard similarity edge filtering as proposed in Huijun Wu, Chen Wang, Yuriy Tyshetskiy, Andrew Docherty, Kai Lu,
    and Liming Zhu.  Adversarial examples for graph data: Deep insights into attack and defense.

    Parameters
    ----------
    adjacency_matrix : torch.Tensor
        Sparse [n,n] adjacency matrix.
    features : torch.Tensor
        Dense [n,d] feature matrix.
    threshold : int, optional
        Similarity threshold for filtering, by default 0.

    Returns
    -------
    torch.Tensor
        Preprocessed adjacency matrix.
    """
    row, col = adjacency_matrix._indices().cpu()
    values = adjacency_matrix._values().cpu()
    N = adjacency_matrix.shape[0]

    if features.is_sparse:
        features = features.to_dense()

    modified_adj = sp.coo_matrix((values.numpy(), (row.numpy(), col.numpy())), (N, N))
    modified_adj = drop_dissimilar_edges(features.cpu().numpy(), modified_adj, threshold=threshold)
    modified_adj = torch.sparse.FloatTensor(*from_scipy_sparse_matrix(modified_adj)).to(adjacency_matrix.device)
    return modified_adj
Пример #7
0
def resize_sparse(x: Tensor, new_size, ind_shift):
    xi, xv = x._indices(), x._values()
    for i, shift in enumerate(ind_shift):
        if shift == 0:
            continue
        xi[i] += shift
    return ind2sparse(xi, new_size, values=xv)
Пример #8
0
def sparse_minmax(a: Tensor, eps=1e-8, in_place=True) -> Tensor:
    a_x = a._values()
    a_x = minmax(a_x, eps=eps)
    if in_place:
        a._values().copy_(a_x)
        return a

    ret = ind2sparse(a._indices(), a.size(), values=a_x)
    return ret.coalesce()
Пример #9
0
def torch_to_scisp(t: torch.Tensor) -> sp.coo_matrix:
    if t.is_sparse:
        indice = t._indices().numpy()
        values = t._values().numpy()
        m = sp.coo_matrix((values, (indice[0, :], indice[1, :])),
                          shape=t.shape)
        return m
    else:
        raise TypeError(f'{type(t)} is not torch.sparse')
Пример #10
0
    def from_sparse(ctx, tensor: torch.Tensor) -> "MaskedTensor":
        """Build from a torch sparse tensor.

        This function is not diffrentiable.
        """
        return ctx(indices=tensor._indices(),
                   values=tensor._values(),
                   shape=tensor.shape,
                   dtype=tensor.dtype,
                   device=tensor.device)
Пример #11
0
    def wrapper(
        actual: Tensor,
        expected: Tensor,
        msg: Optional[Union[str, Callable[[Tensor, Tensor, Diagnostics],
                                          str]]] = None,
        **kwargs: Any,
    ) -> Optional[_TestingErrorMeta]:
        if not actual.is_sparse:
            return check_tensors(actual, expected, msg=msg, **kwargs)

        if actual._nnz() != expected._nnz():
            return _TestingErrorMeta(
                AssertionError,
                (f"The number of specified values in sparse COO tensors does not match: "
                 f"{actual._nnz()} != {expected._nnz()}"),
            )

        kwargs_equal = dict(kwargs, rtol=0, atol=0)
        error_meta = check_tensors(
            actual._indices(),
            expected._indices(),
            msg=msg or functools.partial(_make_mismatch_msg,
                                         identifier="Sparse COO indices"),
            **kwargs_equal,
        )
        if error_meta:
            return error_meta

        error_meta = check_tensors(
            actual._values(),
            expected._values(),
            msg=msg or functools.partial(_make_mismatch_msg,
                                         identifier="Sparse COO values"),
            **kwargs,
        )
        if error_meta:
            return error_meta

        return None
Пример #12
0
    def from_torch_sparse_coo_tensor(self, mat: torch.Tensor,
                                     has_value: bool = True):
        mat = mat.coalesce()
        index = mat._indices()
        row, col = index[0], index[1]

        value: Optional[torch.Tensor] = None
        if has_value:
            value = mat.values()

        return SparseTensor(row=row, rowptr=None, col=col, value=value,
                            sparse_sizes=(mat.size(0), mat.size(1)),
                            is_sorted=True)
Пример #13
0
        def get_tensor_stat(
                tensor: torch.Tensor) -> List[Tuple[str, int, int, int]]:
            """Get the stat of a single tensor

            Returns:
                - stat: a tuple containing (tensor_name, tensor_size,
            tensor_numel, tensor_memory)
            """
            assert isinstance(tensor, torch.Tensor)

            name = self._get_tensor_name(tensor)
            if tensor.is_sparse:
                indices_stat = get_tensor_stat(tensor._indices())
                values_stat = get_tensor_stat(tensor._values())
                return indices_stat + values_stat

            numel = tensor.numel()
            element_size = tensor.element_size()
            fact_numel = tensor.storage().size()
            fact_memory_size = fact_numel * element_size
            # since pytorch allocate at least 512 Bytes for any tensor, round
            # up to a multiple of 512
            memory_size = math.ceil(fact_memory_size / PYTORCH_MIN_ALLOCATE) \
                    * PYTORCH_MIN_ALLOCATE

            # tensor.storage should be the actual object related to memory
            # allocation
            data_ptr = tensor.storage().data_ptr()
            if data_ptr in visited_data:
                name = '{}(->{})'.format(
                    name,
                    visited_data[data_ptr],
                )
                # don't count the memory for reusing same underlying storage
                memory_size = 0
            else:
                visited_data[data_ptr] = name

            size = tuple(tensor.size())
            # torch scalar has empty size
            if not size:
                size = (1, )

            return [(name, size, numel, memory_size)]
Пример #14
0
    def _densify_chunk_of_labels(
        self, labels: torch.Tensor, chunk_start: int, chunk_end: int
    ) -> torch.Tensor:
        """Creates a dense chunk of a sparse label tensor.

        A chunk here is a range of entity values with 'chunk_start' being the lower
        bound and 'chunk_end' the upper bound.

        The resulting tensor contains the labels for the sp chunk and the po chunk.

        :param labels: sparse tensor containing the labels corresponding to the batch
        for sp and po

        :param chunk_start: int start index of the chunk

        :param chunk_end: int end index of the chunk

        :return: batch_size x chunk_size*2 dense tensor with labels for the sp chunk and
        the po chunk.

        """
        num_entities = self.dataset.num_entities()
        indices = labels._indices()
        mask_sp = (chunk_start <= indices[1, :]) & (indices[1, :] < chunk_end)
        mask_po = ((chunk_start + num_entities) <= indices[1, :]) & (
            indices[1, :] < (chunk_end + num_entities)
        )
        indices_sp_chunk = indices[:, mask_sp]
        indices_sp_chunk[1, :] = indices_sp_chunk[1, :] - chunk_start
        indices_po_chunk = indices[:, mask_po]
        indices_po_chunk[1, :] = (
            indices_po_chunk[1, :] - num_entities - chunk_start * 2 + chunk_end
        )
        indices_chunk = torch.cat((indices_sp_chunk, indices_po_chunk), dim=1)
        dense_labels = torch.sparse.LongTensor(
            indices_chunk,
            # since all sparse label tensors have the same value we could also
            # create a new tensor here without indexing with:
            # torch.full([indices_chunk.shape[1]], float("inf"), device=self.device)
            labels._values()[mask_sp | mask_po],
            torch.Size([labels.size()[0], (chunk_end - chunk_start) * 2]),
        ).to_dense()
        return dense_labels
Пример #15
0
def prepped_input(
        input: torch.Tensor,
        maybe_perturbed_input: Optional[torch.Tensor]) -> torch.Tensor:
    # Prepares the inputs to be passed into the function while including the new modified input.
    if input.layout == torch._mkldnn:  # type: ignore # no attr _mkldnn
        # Convert back to mkldnn
        if maybe_perturbed_input is not None:
            return maybe_perturbed_input.to_mkldnn()
        else:
            return input
    elif input.layout == torch.sparse_coo:
        # Modifications to entry are reflected in input so we could've just returned `input` here
        # but there is an issue where calling .coalesce on a tensor moves it off the graph when the
        # tensor is already coalesced, so analytical would always return 0 wrt to that input if it
        # is previously used to compute forward pass. To get around this, we need to do an extra clone here.
        # TODO: get rid of this extra clone once https://github.com/pytorch/pytorch/pull/52874 is landed
        # Make this new tensor require again in case the function has hooks
        return torch.sparse_coo_tensor(input._indices(), input._values(),
                                       input.size()).requires_grad_(True)
    else:
        # We cannot use entry (input.data) if we want gradgrad to work because
        # fn (in the gradgrad case) needs to compute grad wrt input
        return input
Пример #16
0
 def forward(
     self,
     n_feat: torch.Tensor,
     adj: torch.Tensor
 ):
     node_broadcast = self.broadcast(n_feat)
     (self_features,
      begin_features_sum,
      end_features_sum,
      begin_features_max,
      end_features_max) = torch.split(node_broadcast,
                                      self.num_out_feat,
                                      dim=-1)
     edge_info = adj._indices()
     begin_ids, end_ids = edge_info[0, :], edge_info[1, :]
     edge_features_max = SelectAdd(end_ids,
                                   begin_ids)(begin_features_max,
                                              end_features_max)
     edge_features_sum = SelectAdd(end_ids,
                                   begin_ids)(begin_features_sum,
                                              end_features_sum)
     edge_gathered_sum = self.gather(edge_features_sum)
     edge_gathered_sum = torch_scatter.scatter_add(edge_gathered_sum,
                                                   begin_ids,
                                                   dim=0)
     min_val = edge_features_max.min()
     edge_gathered_max = edge_features_max - min_val
     edge_gathered_max = torch_scatter.scatter_max(edge_gathered_max,
                                                   begin_ids,
                                                   dim=0)[0]
     edge_gathered_max = edge_gathered_max + min_val
     edge_gathered = torch.cat([edge_gathered_max,
                                edge_gathered_sum],
                               dim=-1)
     node_update = self.update(edge_gathered)
     outputs = self_features + node_update
     return outputs
Пример #17
0
def scatter_op(tensor: Tensor, op="sum", dim=-1, dim_size=None):
    tensor = tensor.coalesce()
    return scatter(tensor._values(),
                   tensor._indices()[dim],
                   reduce=op,
                   dim_size=dim_size)
Пример #18
0
def rebuild_with_indices(sp: Tensor):
    sp = sp.coalesce()
    return ind2sparse(sp._indices(), sp.size(0), sp.size(1)).coalesce()
Пример #19
0
def sparse_softmax(x: Tensor, dim=0):
    v = softmax(x._values(), x._indices()[dim])
    return ind2sparse(x._indices(), x.size(), values=v)
Пример #20
0
def spmm_sd(s: Tensor, d: Tensor) -> Tensor:
    s = s.coalesce()
    i, v, s, t = s._indices(), s._values(), s.size(0), s.size(1)
    return torch_sparse.spmm(i, v, s, t, d)
Пример #21
0
def split_sp(sp: Tensor):
    return sp._indices(), sp._values(), sp.size()
Пример #22
0
 def from_sparse(cls, x: torch.Tensor):
     spatial_shape = x.shape[1:-1]
     batch_size = x.shape[0]
     indices_th = x._indices().permute(1, 0).contiguous().int()
     features_th = x._values()
     return cls(features_th, indices_th, spatial_shape, batch_size)
Пример #23
0
def sparse_dense_element_wise_op(sparse: Tensor, dense: Tensor, op=torch.mul):
    sparse = sparse.coalesce()
    assert sparse.dim() == 2
    ind, val = sparse._indices(), sparse._values()
    val = op(val, dense[ind[0], ind[1]])
    return ind2sparse(ind, sparse.size(), values=val)
Пример #24
0
def sparse_min(tensor: Tensor, dim=-1):
    tensor = tensor.coalesce()
    return scatter_min(tensor._values(),
                       tensor._indices()[dim],
                       dim_size=tensor.size(dim))
Пример #25
0
def _sparse_coo_scatter_reduction_helper(op,
                                         mask_input: Tensor,
                                         dims: Tuple[int, ...],
                                         keepdim: bool,
                                         dtype: Optional[DType] = None) -> Tensor:
    reduce = op.__name__
    valid_reductions = ['sum', 'prod', 'amax', 'amin']
    if reduce not in valid_reductions:
        raise ValueError(f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead")

    output_dtype = dtype
    values, indices = mask_input._values(), mask_input._indices()
    input_dims = mask_input.dim()
    num_sparse_dims = mask_input.sparse_dim()
    reduced_sparse_dims = []
    retained_sparse_dims = []
    reduced_dense_dims = []

    # promote dtype if specified
    if values.dtype != output_dtype:
        values = values.to(output_dtype)

    if keepdim:
        output_shape = tuple(1 if i in dims else si for (i, si) in enumerate(mask_input.shape))
    else:
        output_shape = tuple(si for (i, si) in enumerate(mask_input.shape) if i not in dims)

    for d in dims:
        if (d >= input_dims):
            continue

        if d < num_sparse_dims:
            reduced_sparse_dims.append(d)
        else:
            reduced_dense_dims.append(d + 1 - num_sparse_dims)

    # Reduce dense dimensions
    if len(reduced_dense_dims) > 0:
        if reduce == "sum":
            new_values = values
            new_values = op(new_values, dim=reduced_dense_dims, keepdim=bool(keepdim))
        else:
            # FIXME: Implement reductions for dense dimensions for ops with non-zero reduction identities
            return NotImplemented
    else:
        new_values = values.clone()

    # Reduce sparse dimensions
    if len(reduced_sparse_dims) == num_sparse_dims:
        if reduce in {'amax', 'amin'} and new_values.size(0) == 0:
            # IndexError: amax(): Expected reduction dim 0 to have non-zero size.
            # sum()/prod() return the reduction identity when dim has size 0 but amax()/amin() do not
            # See https://github.com/pytorch/pytorch/issues/61901
            new_values = _reduction_identity(reduce, new_values)
        else:
            new_values = op(new_values, dim=0)
        if (keepdim):
            for _ in range(num_sparse_dims):
                new_values = new_values.unsqueeze(0)
        return new_values.to(dtype=output_dtype).to_sparse()
    else:
        new_indices = indices.clone()
        if keepdim:
            # zero out reduced sparse dimensions if keepdim = True
            # ensures that the call to torch.unique folds duplicated indices together while preserving the dimension
            new_indices[reduced_sparse_dims, :] = 0
        else:
            # remove reduced sparse dimensions if keepdim = False
            if (len(reduced_sparse_dims) > 0):
                retained_sparse_dims = [i for i in range(num_sparse_dims) if i not in set(reduced_sparse_dims)]
                new_indices = new_indices.index_select(0, torch.tensor(retained_sparse_dims).to(mask_input.device))

    # Use scatter_reduce to reduce items in the new_values tensor that correspond to the same indices in new_indices
    if (new_indices.numel() > 0):
        # lexsort indices and get index tensor for scatter reduction
        new_indices, inverse_indices = torch.unique(new_indices, return_inverse=True, dim=1)
        out_shape = list(new_values.shape)
        out_shape[0] = new_indices.shape[1]
        for _ in range(new_values.ndim - 1):
            inverse_indices = inverse_indices.unsqueeze(-1)
        scatter_indices = inverse_indices.expand(new_values.shape)
        # FIXME: temporary workaround for issue with bfloat16/float16 remove when acctype is implemented for scatter_reduce
        if output_dtype in {torch.bfloat16, torch.float16}:
            new_values = new_values.to(torch.float)
            out = new_values.new_empty(out_shape)
            new_values = out.scatter_reduce_(0, scatter_indices, new_values, reduce=reduce, include_self=False)
            new_values = new_values.to(dtype=output_dtype)
        else:
            out = new_values.new_empty(out_shape)
            new_values = out.scatter_reduce_(0, scatter_indices, new_values, reduce=reduce, include_self=False)

    return torch.sparse_coo_tensor(new_indices, new_values, output_shape, dtype=output_dtype, device=mask_input.device)