示例#1
0
def torch_coo_to_scipy_coo(m: torch.sparse.FloatTensor) -> coo_matrix:
    """Convert torch :class:`torch.sparse.FloatTensor` tensor to.

    :class:`scipy.sparse.coo_matrix`
    """
    data = m.values().numpy()
    indices = m.indices()
    return coo_matrix((data, (indices[0], indices[1])), tuple(m.size()))
示例#2
0
def _reduce(x: torch.sparse.FloatTensor):
    # dispatch table cannot distinguish between torch.sparse.FloatTensor and torch.Tensor
    if isinstance(x, torch.sparse.FloatTensor) or isinstance(
            x, torch.sparse.LongTensor):
        int_type = _get_int_type(torch.max(x._indices()).item())
        return _sparse_tensor_constructor, (x._indices().to(int_type),
                                            x._values(), x.size())
    else:
        return torch.Tensor.__reduce_ex__(
            x, pickle.HIGHEST_PROTOCOL)  # use your own protocol
示例#3
0
    def __init__(self,
                 weights: torch.sparse.FloatTensor,
                 biases=None,
                 activation=None):
        super(SparseFCLayer, self).__init__()
        if not weights.is_sparse:
            raise ValueError("Left weights must be sparse")
        elif not weights.is_coalesced():
            raise ValueError("Left weights must be coalesced")

        # Dimension is reversed
        self.n_inputs = weights.size(1)
        self.n_outputs = weights.size(0)
        self._activation = activation

        self._weights = Parameter(weights)
        if biases is None:
            self._biases = Parameter(torch.Tensor(self.n_outputs, 1))
            torch.nn.init.zeros_(self._biases)
        else:
            self._biases = Parameter(biases)
示例#4
0
    def forward(self, h: torch.sparse.FloatTensor,
                adj: torch.sparse.FloatTensor):
        """
        Forward pass the sprase GAT layer.
        Inputs:
            h.shape == (num_nodes, in_features)
            adjacency.shape == (num_nodes, num_nodes)
        Outputs:
            out.shape == out_features
        """
        num_nodes = h.size()[0]

        # h over here is actually w_h_transpose: we use the transposed form - in the end this doesn't matter
        # w_h_transpose.shape == (N, out_features)
        h = torch.mm(h, self.weight)
        assert not torch.isnan(h).any()

        # (Z, E) tensor of indices where there are Z non-zero indices
        edge = torch.nonzero(adj, as_tuple=False).t()
        # edge_h.shape == (2*D x E) - Select relevant Wh nodes
        edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
        """ Self-attention on the nodes - Shared attention mechanism """
        # edge_e.shape == (E) - This is the numerator in equation 3
        edge_e = torch.exp(self.leakyrelu(
            self.alpha.mm(edge_h).squeeze()))  # I just removed the minus here
        assert not torch.isnan(edge_e).any()
        # e_rowsum.shape == (num_nodes x 1) - This is the denominator in equation 3
        e_rowsum = self.special_spmm(edge, edge_e,
                                     torch.Size([num_nodes, num_nodes]),
                                     torch.ones(size=(num_nodes, 1)))

        # h_prime.shape == (num_nodes x out)
        # Aggregate Wh using the attention coefficients
        edge_e = self.dropout(edge_e)
        h_prime = self.special_spmm(edge, edge_e,
                                    torch.Size([num_nodes, num_nodes]), h)
        assert not torch.isnan(h_prime).any()

        # h_prime.shape == (num_nodes x out)
        # Divide by normalising factor
        h_prime = h_prime.div(e_rowsum)

        assert not torch.isnan(h_prime).any()
        return h_prime