Example #1
0
    def transition_matrix(self, edge_index, edge_weight, num_nodes,
                          normalization):
        r"""Calculate the approximate, sparse diffusion on a given sparse
        matrix.

        Args:
            edge_index (LongTensor): The edge indices.
            edge_weight (Tensor): One-dimensional edge weights.
            num_nodes (int): Number of nodes.
            normalization (str): Normalization scheme. Options:
                1. `"sym"`: Symmetric normalization:
                :math:`\mathbf{T} = \mathbf{D}^{-1/2} \mathbf{A}
                \mathbf{D}^{-1/2}`

                2. `"col"`: Column-wise normalization:
                :math:`\mathbf{T} = \mathbf{A} \mathbf{D}^{-1}`

                3. `"row"`: Row-wise normalization:
                :math:`\mathbf{T} = \mathbf{D}^{-1} \mathbf{A}`

                4. `None`: No normalization.

        :rtype: (:class:`LongTensor`, :class:`Tensor`)
        """
        diag_idx = torch.arange(0, num_nodes, dtype=torch.long,
                                device=edge_index.device)
        diag_idx = diag_idx.unsqueeze(0).repeat(2, 1)

        if normalization == 'sym':
            _, col = edge_index
            D_vec = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
            D_vec_invsqrt = 1 / torch.sqrt(D_vec)
            edge_index, edge_weight = spspmm(diag_idx, D_vec_invsqrt,
                                             edge_index, edge_weight,
                                             num_nodes, num_nodes, num_nodes)
            edge_index, edge_weight = spspmm(edge_index, edge_weight, diag_idx,
                                             D_vec_invsqrt, num_nodes,
                                             num_nodes, num_nodes)
        elif normalization == 'col':
            _, col = edge_index
            D_vec = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
            D_vec_inv = 1 / D_vec
            edge_index, edge_weight = spspmm(edge_index, edge_weight, diag_idx,
                                             D_vec_inv, num_nodes, num_nodes,
                                             num_nodes)
        elif normalization == 'row':
            row, _ = edge_index
            D_vec = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
            D_vec_inv = 1 / D_vec
            edge_index, edge_weight = spspmm(diag_idx, D_vec_inv, edge_index,
                                             edge_weight, num_nodes, num_nodes,
                                             num_nodes)
        elif normalization is None:
            pass
        else:
            raise ValueError(
                'Transition matrix normalization {} unknown.'.format(
                    normalization))

        return edge_index, edge_weight
    def forward(self, phi_indices, phi_values, phi_inverse_indices,
                phi_inverse_values, features):
        """
        Forward propagation pass.
        :param phi_indices: Sparse wavelet matrix index pairs.
        :param phi_values: Sparse wavelet matrix values.
        :param phi_inverse_indices: Inverse wavelet matrix index pairs.
        :param phi_inverse_values: Inverse wavelet matrix values.
        :param features: Feature matrix.
        :return localized_features: Filtered feature matrix extracted.
        """
        rescaled_phi_indices, rescaled_phi_values = spspmm(
            phi_indices, phi_values, self.diagonal_weight_indices,
            self.diagonal_weight_filter.view(-1), self.ncount, self.ncount,
            self.ncount)

        phi_product_indices, phi_product_values = spspmm(
            rescaled_phi_indices, rescaled_phi_values, phi_inverse_indices,
            phi_inverse_values, self.ncount, self.ncount, self.ncount)

        filtered_features = torch.mm(features, self.weight_matrix)

        localized_features = spmm(phi_product_indices, phi_product_values,
                                  self.ncount, filtered_features)

        return localized_features
Example #3
0
    def panentropy_sparse(self, edge_index, num_nodes, AFTERDROP,
                          edge_mask_list):

        edge_value = torch.ones(edge_index.size(1), device=edge_index.device)
        edge_index, edge_value = coalesce(edge_index, edge_value, num_nodes,
                                          num_nodes)

        # iteratively add weighted matrix power
        pan_index, pan_value = eye(num_nodes, device=edge_index.device)
        indextmp = pan_index.clone().to(edge_index.device)
        valuetmp = pan_value.clone().to(edge_index.device)

        pan_value = self.panconv_filter_weight[0] * pan_value

        for i in range(self.filter_size - 1):
            if AFTERDROP:
                indextmp, valuetmp = spspmm(indextmp, valuetmp, edge_index,
                                            edge_value * edge_mask_list[i],
                                            num_nodes, num_nodes, num_nodes)
            else:
                indextmp, valuetmp = spspmm(indextmp, valuetmp, edge_index,
                                            edge_value, num_nodes, num_nodes,
                                            num_nodes)
            valuetmp = valuetmp * self.panconv_filter_weight[i + 1]
            indextmp, valuetmp = coalesce(indextmp, valuetmp, num_nodes,
                                          num_nodes)
            pan_index = torch.cat((pan_index, indextmp), 1)
            pan_value = torch.cat((pan_value, valuetmp))

        return coalesce(pan_index, pan_value, num_nodes, num_nodes, op='add')
Example #4
0
def test_spspmm(dtype, device):
    indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device)
    valueA = tensor([1, 2, 3, 4, 5], dtype, device)
    sizeA = torch.Size([3, 3])
    indexB = torch.tensor([[0, 2], [1, 0]], device=device)
    valueB = tensor([2, 4], dtype, device)
    sizeB = torch.Size([3, 2])

    indexC, valueC = spspmm(indexA, valueA, indexB, valueB, 3, 3, 2)
    assert indexC.tolist() == [[0, 1, 2], [0, 1, 1]]
    assert valueC.tolist() == [8, 6, 8]

    A = torch.sparse_coo_tensor(indexA, valueA, sizeA, device=device)
    A = A.to_dense().requires_grad_()
    B = torch.sparse_coo_tensor(indexB, valueB, sizeB, device=device)
    B = B.to_dense().requires_grad_()
    torch.matmul(A, B).sum().backward()

    valueA = valueA.requires_grad_()
    valueB = valueB.requires_grad_()
    indexC, valueC = spspmm(indexA, valueA, indexB, valueB, 3, 3, 2)
    valueC.sum().backward()

    assert valueA.grad.tolist() == A.grad[indexA[0], indexA[1]].tolist()
    assert valueB.grad.tolist() == B.grad[indexB[0], indexB[1]].tolist()
Example #5
0
def power_adj(adj, dim, p):
    val = torch.ones(adj.shape[1])
    ic, vc = spspmm(adj, val, adj, val, dim, dim, dim)
    if p > 2:
        for i in range(p - 2):
            ic, vc = spspmm(ic, vc, adj, val, dim, dim, dim)
    return ic
    def forward(self, phi_indices, phi_values, phi_inverse_indices,
                phi_inverse_values, feature_indices, feature_values, dropout):
        """
        Forward propagation pass.
        :param phi_indices: Sparse wavelet matrix index pairs.
        :param phi_values: Sparse wavelet matrix values.
        :param phi_inverse_indices: Inverse wavelet matrix index pairs.
        :param phi_inverse_values: Inverse wavelet matrix values.
        :param feature_indices: Feature matrix index pairs.
        :param feature_values: Feature matrix values.
        :param dropout: Dropout rate.
        :return dropout_features: Filtered feature matrix extracted.
        """
        rescaled_phi_indices, rescaled_phi_values = spspmm(
            phi_indices, phi_values, self.diagonal_weight_indices,
            self.diagonal_weight_filter.view(-1), self.ncount, self.ncount,
            self.ncount)

        phi_product_indices, phi_product_values = spspmm(
            rescaled_phi_indices, rescaled_phi_values, phi_inverse_indices,
            phi_inverse_values, self.ncount, self.ncount, self.ncount)

        filtered_features = spmm(feature_indices, feature_values, self.ncount,
                                 self.weight_matrix)

        localized_features = spmm(phi_product_indices, phi_product_values,
                                  self.ncount, filtered_features)

        dropout_features = torch.nn.functional.dropout(
            torch.nn.functional.relu(localized_features),
            training=self.training,
            p=dropout)
        return dropout_features
Example #7
0
def scale_elements(adj_matrix, adj_part, node_count, row_vtx, col_vtx):
    if not normalization:
        return adj_part

    # Scale each edge (u, v) by 1 / (sqrt(u) * sqrt(v))
    # indices = adj_part._indices()
    # values = adj_part._values()

    # deg_map = dict()
    # for i in range(adj_part._nnz()):
    #     u = indices[0][i] + row_vtx
    #     v = indices[1][i] + col_vtx

    #     if u.item() in deg_map:
    #         degu = deg_map[u.item()]
    #     else:
    #         degu = (adj_matrix[0] == u).sum().item()
    #         deg_map[u.item()] = degu

    #     if v.item() in deg_map:
    #         degv = deg_map[v.item()]
    #     else:
    #         degv = (adj_matrix[0] == v).sum().item()
    #         deg_map[v.item()] = degv

    #     values[i] = values[i] / (math.sqrt(degu) * math.sqrt(degv))
    
    adj_part = adj_part.coalesce()
    deg = torch.histc(adj_matrix[0].double(), bins=node_count)
    deg = deg.pow(-0.5)

    row_len = adj_part.size(0)
    col_len = adj_part.size(1)

    dleft = torch.sparse_coo_tensor([np.arange(0, row_len).tolist(),
                                     np.arange(0, row_len).tolist()],
                                     deg[row_vtx:(row_vtx + row_len)].float(),
                                     size=(row_len, row_len),
                                     requires_grad=False, device=torch.device("cpu"))

    dright = torch.sparse_coo_tensor([np.arange(0, col_len).tolist(),
                                     np.arange(0, col_len).tolist()],
                                     deg[col_vtx:(col_vtx + col_len)].float(),
                                     size=(col_len, col_len),
                                     requires_grad=False, device=torch.device("cpu"))
    # adj_part = torch.sparse.mm(torch.sparse.mm(dleft, adj_part), dright)
    ad_ind, ad_val = torch_sparse.spspmm(adj_part._indices(), adj_part._values(), 
                                            dright._indices(), dright._values(),
                                            adj_part.size(0), adj_part.size(1), dright.size(1))

    adj_part_ind, adj_part_val = torch_sparse.spspmm(dleft._indices(), dleft._values(), 
                                                        ad_ind, ad_val,
                                                        dleft.size(0), dleft.size(1), adj_part.size(1))

    adj_part = torch.sparse_coo_tensor(adj_part_ind, adj_part_val, 
                                                size=(adj_part.size(0), adj_part.size(1)),
                                                requires_grad=False, device=torch.device("cpu"))

    return adj_part
Example #8
0
def StAS(index_A, value_A, index_S, value_S, device, N, kN):
    r"""StAS: a function which returns new edge weights for the pooled graph using the formula S^{T}AS"""

    index_A, value_A = coalesce(index_A, value_A, m=N, n=N)
    index_S, value_S = coalesce(index_S, value_S, m=N, n=kN)
    index_B, value_B = spspmm(index_A, value_A, index_S, value_S, N, N, kN)

    index_St, value_St = transpose(index_S, value_S, N, kN)
    index_B, value_B = coalesce(index_B, value_B, m=N, n=kN)
    index_E, value_E = spspmm(index_St.cpu(), value_St.cpu(), index_B.cpu(), value_B.cpu(), kN, N, kN)

    return index_E.to(device), value_E.to(device)
    def forward(self, phi_indices, phi_values, phi_inverse_indices,
                phi_inverse_values, features):

        rescaled_phi_indices, rescaled_phi_values = spspmm(
            phi_indices, phi_values, self.diagonal_weight_indices,
            self.diagonal_weight_filter.view(-1), self.ncount, self.ncount,
            self.ncount)
        phi_product_indices, phi_product_values = spspmm(
            rescaled_phi_indices, rescaled_phi_values, phi_inverse_indices,
            phi_inverse_values, self.ncount, self.ncount, self.ncount)
        filtered_features = torch.mm(features, self.weight_matrix)
        localized_features = spmm(phi_product_indices, phi_product_values,
                                  self.ncount, filtered_features)
        return localized_features
Example #10
0
def k_hop_edges(edge_index, num_nodes, K):
    # edge_index, _ = remove_self_loops(edge_index, None)
    n = num_nodes
    edge_index, _ = coalesce(edge_index, None, n, n)
    value = edge_index.new_ones((edge_index.size(1), ), dtype=torch.float)

    edges = []
    new_edge_index = edge_index.clone()
    new_edge_value = value.clone()
    useful_edge_index, edge_weight = add_self_loops(new_edge_index,
                                                    None,
                                                    fill_value=-1.,
                                                    num_nodes=num_nodes)
    edges.append(useful_edge_index)
    for i in range(K - 1):
        new_edge_index, new_edge_value = spspmm(new_edge_index, new_edge_value,
                                                edge_index, value, n, n, n)
        new_edge_index, new_edge_value = coalesce(new_edge_index,
                                                  new_edge_value, n, n)
        useful_edge_index, edge_weight = add_self_loops(new_edge_index,
                                                        None,
                                                        fill_value=-1.,
                                                        num_nodes=num_nodes)
        edges.append(useful_edge_index)

    # edge_index, _ = add_self_loops(edge_index, None,fill_value=-1.,num_nodes=num_nodes)
    return edges
Example #11
0
    def forward(self, x, edge_index, edge_attr):
        N, dim = x.shape
        # x = self.dropout(x)

        # adj_mat_ind, adj_mat_val = add_self_loops(edge_index, num_nodes=N)[0], edge_attr.squeeze()
        adj_mat_ind = add_remaining_self_loops(edge_index, num_nodes=N)[0]
        adj_mat_val = torch.ones(adj_mat_ind.shape[1]).to(x.device)

        h = torch.mm(x, self.weight)
        h = F.dropout(h, p=self.dropout, training=self.training)
        for _ in range(self.nhop - 1):
            adj_mat_ind, adj_mat_val = spspmm(adj_mat_ind, adj_mat_val,
                                              adj_mat_ind, adj_mat_val, N, N,
                                              N, True)

        adj_mat_ind, adj_mat_val = self.attention(h, adj_mat_ind, adj_mat_val)

        # MATRIX_MUL
        # laplacian matrix normalization
        adj_mat_val = self.normalization(adj_mat_ind, adj_mat_val, N)

        val_h = h
        # N, dim = val_h.shape

        # MATRIX_MUL
        # val_h = spmm(adj_mat_ind, F.dropout(adj_mat_val, p=self.node_dropout, training=self.training), N, N, val_h)
        val_h = spmm(adj_mat_ind, adj_mat_val, N, N, val_h)

        val_h[val_h != val_h] = 0
        val_h = val_h + self.bias
        val_h = self.adaptive_enc(val_h)
        val_h = F.dropout(val_h, p=self.dropout, training=self.training)
        # val_h = self.activation(val_h)
        return val_h
Example #12
0
 def augment_adj(self, edge_index, edge_weight, num_nodes):
     edge_index, edge_weight = sort_edge_index(edge_index, edge_weight,
                                               num_nodes)
     edge_index, edge_weight = spspmm(edge_index, edge_weight, edge_index,
                                      edge_weight, num_nodes, num_nodes,
                                      num_nodes)
     return edge_index.to(device)
Example #13
0
    def forward(self, x, edge_index, edge_attr):
        row, col = edge_index
        deg = degree(col, x.size(0), dtype=x.dtype)
        deg_inv_sqrt = deg.pow(-0.5)
        edge_attr_t = deg_inv_sqrt[row] * edge_attr * deg_inv_sqrt[col]

        N = x.size(0)
        adj = torch.sparse_coo_tensor(edge_index, edge_attr_t, size=(N, N))

        theta = self.alpha * (1 - self.alpha)
        result = [theta * adj]

        for i in range(1, self.steps - 1):
            theta = theta * (1 - self.alpha)
            adj_ind, adj_val = spspmm(edge_index, edge_attr_t,
                                      result[i - 1]._indices(),
                                      result[i - 1]._values(), N, N, N, True)
            result.append(
                torch.sparse_coo_tensor(adj_ind, adj_val, size=(N, N)))

        identity = torch.sparse_coo_tensor([range(N)] * 2,
                                           torch.ones(N),
                                           size=(N, N)).to(x.device)
        result.append(self.alpha * identity)

        def fn(x, y):
            return x.add(y)

        res = reduce(fn, result)
        return res._indices(), res._values()
Example #14
0
    def __call__(self, data):
        edge_index, edge_attr = data.edge_index, data.edge_attr
        n = data.num_nodes

        fill = 1e16
        value = edge_index.new_full((edge_index.size(1), ),
                                    fill,
                                    dtype=torch.float)

        index, value = spspmm(edge_index, value, edge_index, value, n, n, n,
                              True)

        edge_index = torch.cat([edge_index, index], dim=1)
        if edge_attr is None:
            data.edge_index, _ = coalesce(edge_index, None, n, n)
        else:
            value = value.view(-1, *[1 for _ in range(edge_attr.dim() - 1)])
            value = value.expand(-1, *list(edge_attr.size())[1:])
            edge_attr = torch.cat([edge_attr, value], dim=0)
            data.edge_index, edge_attr = coalesce(edge_index,
                                                  edge_attr,
                                                  n,
                                                  n,
                                                  op='min',
                                                  fill_value=fill)
            edge_attr[edge_attr >= fill] = 0
            data.edge_attr = edge_attr

        return data
Example #15
0
def spmmsp(sp1: torch.Tensor, sp2: torch.Tensor) -> torch.Tensor:
    assert sp1.size(-1) == sp2.size(0) and sp1.is_sparse and sp2.is_sparse
    m = sp1.size(0)
    k = sp2.size(0)
    n = sp2.size(-1)
    indices, values = spspmm(sp1.indices(), sp1.values(), sp2.indices(),
                             sp2.values(), m, k, n)
    return torch.sparse_coo_tensor(indices, values, torch.Size([m, n]))
Example #16
0
 def augment_adj(self, edge_index, edge_weight, num_nodes):
     edge_index, edge_weight = coalesce(edge_index, edge_weight, num_nodes, num_nodes)
     edge_index, edge_weight = sort_edge_index(edge_index, edge_weight,
                                               num_nodes)
     edge_index, edge_weight = spspmm(edge_index, edge_weight, edge_index,
                                      edge_weight, num_nodes, num_nodes,
                                      num_nodes)
     return edge_index, edge_weight
Example #17
0
def test_spspmm(dtype, device):
    indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device)
    valueA = tensor([1, 2, 3, 4, 5], dtype, device)
    indexB = torch.tensor([[0, 2], [1, 0]], device=device)
    valueB = tensor([2, 4], dtype, device)

    indexC, valueC = spspmm(indexA, valueA, indexB, valueB, 3, 3, 2)
    assert indexC.tolist() == [[0, 1, 2], [0, 1, 1]]
    assert valueC.tolist() == [8, 6, 8]
Example #18
0
 def augment_adj(self, edge_index, edge_weight, num_nodes):
     edge_index, edge_weight = add_self_loops(edge_index, edge_weight,
                                              num_nodes=num_nodes)
     edge_index, edge_weight = sort_edge_index(edge_index, edge_weight,
                                               num_nodes)
     edge_index, edge_weight = spspmm(edge_index, edge_weight, edge_index,
                                      edge_weight, num_nodes, num_nodes,
                                      num_nodes)
     edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)
     return edge_index, edge_weight
    def forward(self, phi_indices, phi_values, phi_inverse_indices,
                phi_inverse_values, feature_indices, feature_values, dropout):

        rescaled_phi_indices, rescaled_phi_values = spspmm(
            phi_indices, phi_values, self.diagonal_weight_indices,
            self.diagonal_weight_filter.view(-1), self.ncount, self.ncount,
            self.ncount)

        phi_product_indices, phi_product_values = spspmm(
            rescaled_phi_indices, rescaled_phi_values, phi_inverse_indices,
            phi_inverse_values, self.ncount, self.ncount, self.ncount)
        filtered_features = spmm(feature_indices, feature_values, self.ncount,
                                 self.weight_matrix)
        localized_features = spmm(phi_product_indices, phi_product_values,
                                  self.ncount, filtered_features)
        dropout_features = torch.nn.functional.dropout(
            torch.nn.functional.relu(localized_features),
            training=self.training,
            p=dropout)
        return dropout_features
Example #20
0
def test_spmm_spspmm(dtype, device):
    row = torch.tensor([0, 0, 1, 2, 2], device=device)
    col = torch.tensor([0, 2, 1, 0, 1], device=device)
    index = torch.stack([row, col], dim=0)
    value = tensor([1, 2, 4, 1, 3], dtype, device)
    x = tensor([[1, 4], [2, 5], [3, 6]], dtype, device)

    value = value.requires_grad_(True)

    out_index, out_value = spspmm(index, value, index, value, 3, 3, 3)
    out = spmm(out_index, out_value, 3, x)
    assert out.size() == (3, 2)
Example #21
0
    def forward(self, x, edge_index, edge_attr):
        device = x.device
        N, dim = x.shape

        row, col = edge_index
        deg = degree(col, x.size(0), dtype=x.dtype)
        deg_inv_sqrt = deg.pow(-0.5)
        edge_attr_t = deg_inv_sqrt[row] * edge_attr * deg_inv_sqrt[col]

        p_val = F.relu(self.p(x))
        q_val = F.relu(self.q(x))

        # add dropout
        p_val = self.dropout(p_val)
        q_val = self.dropout(q_val)
        # --------------------

        diag_ind = torch.LongTensor([range(N)] * 2).to(device)
        _, p_adj_mat_val = spspmm(edge_index, edge_attr_t, diag_ind, p_val.view(-1), N, N, N, True)
        _, q_adj_mat_val = spspmm(diag_ind, q_val.view(-1), edge_index, edge_attr, N, N, N, True)
        return edge_index, p_adj_mat_val + q_adj_mat_val
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
    """ Calculates the n-th degree adjacency matrix.
        Performs mm of adj_mat and adds the newly added.
        Default is dense. Mods for sparse version are done when needed.
        Inputs: 
        * adj_mat: (N, N) adjacency tensor
        * n: int. degree of the output adjacency
        * sparse: bool. whether to use torch-sparse module
        Outputs: 
        * edge_idxs: ij positions of the adjacency matrix
        * edge_attrs: degree of connectivity (1 for neighs, 2 for neighs^2, ... )
    """
    adj_mat = adj_mat.float()
    attr_mat = torch.zeros_like(adj_mat)
    new_adj_mat = adj_mat.clone()

    for i in range(n):
        if i == 0:
            attr_mat += adj_mat
            continue

        if i == 1 and sparse:
            idxs = adj_mat.nonzero().t()
            vals = adj_mat[idxs[0], idxs[1]]
            new_idxs = idxs.clone()
            new_vals = vals.clone()
            m, k, n = 3 * [
                adj_mat.shape[0]
            ]  # (m, n) * (n, k) , but adj_mats are squared: m=n=k

        if sparse:
            new_idxs, new_vals = torch_sparse.spspmm(new_idxs,
                                                     new_vals,
                                                     idxs,
                                                     vals,
                                                     m=m,
                                                     k=k,
                                                     n=n)
            new_vals = new_vals.bool().float()
            new_adj_mat = torch.zeros_like(attr_mat)
            new_adj_mat[new_idxs[0], new_idxs[1]] = new_vals
            # sparse to dense is slower
            # torch.sparse.FloatTensor(idxs, vals).to_dense()
        else:
            new_adj_mat = (new_adj_mat @ adj_mat).bool().float()

        attr_mat.masked_fill((new_adj_mat - attr_mat.bool().float()).bool(),
                             i + 1)

    return new_adj_mat, attr_mat
Example #23
0
def spspmm(a, b, separate=False):
    n = a.size(0)
    m = a.size(1)
    assert m == b.size(0)
    k = b.size(1)
    ai, av, bi, bv = get_iv([a, b])
    print('n,m,k=', n, m, k)
    print('ai, av, bi, bv=', apply(lambda x: x.numel(), ai, av, bi, bv))
    i, v = torch_sparse.spspmm(ai, av, bi, bv, n, m, k)
    if separate:
        nonzero_mask = v != 0.
        return i[:, nonzero_mask], v[nonzero_mask], [n, k]

    return torch.sparse_coo_tensor(i, v, [n, k])
Example #24
0
def spspmm(a, b, separate=False):
    n = a.size(0)
    m = a.size(1)
    assert m == b.size(0)
    k = b.size(1)
    a = a.coalesce()
    b = b.coalesce()
    ai, av = a._indices(), a._values()
    bi, bv = b._indices(), b._values()
    del a, b
    i, v = torch_sparse.spspmm(ai, av, bi, bv, n, m, k)
    if separate:
        nonzero_mask = v != 0.
        return i[:, nonzero_mask], v[nonzero_mask], [n, k]
    return torch.sparse_coo_tensor(i, v, [n, k])
Example #25
0
    def forward(self, x, edge_index, edge_attr):
        device = x.device
        N, dim = x.shape
        diag_val = self.p(x)
        diag_val = F.sigmoid(diag_val)
        self.dropout(diag_val)

        row, col = edge_index
        deg = degree(col, x.size(0), dtype=x.dtype)
        deg_inv = deg.pow(-1)
        edge_attr_t = deg_inv[row] * edge_attr

        diag_ind = torch.LongTensor([range(N)] * 2).to(device)
        _, adj_mat_val = spspmm(edge_index, edge_attr_t, diag_ind, diag_val.view(-1), N, N, N, True)
        return edge_index, adj_mat_val
Example #26
0
    def forward(self, graph, x):
        edge_index = graph.edge_index
        N, dim = x.shape

        # nl_adj_mat_ind, nl_adj_mat_val = add_self_loops(edge_index, num_nodes=N)[0], edge_attr.squeeze()
        nl_adj_mat_ind = add_remaining_self_loops(edge_index, num_nodes=N)[0]
        nl_adj_mat_ind = torch.stack(nl_adj_mat_ind)
        nl_adj_mat_val = torch.ones(nl_adj_mat_ind.shape[1]).to(x.device)

        for _ in range(self.nhop - 1):
            nl_adj_mat_ind, nl_adj_mat_val = spspmm(nl_adj_mat_ind,
                                                    nl_adj_mat_val,
                                                    nl_adj_mat_ind,
                                                    nl_adj_mat_val, N, N, N,
                                                    True)

        result = []
        for i in range(self.subheads):
            h = torch.mm(x, self.weight[i])

            adj_mat_ind, adj_mat_val = nl_adj_mat_ind, nl_adj_mat_val
            h = F.dropout(h, p=self.dropout, training=self.training)

            adj_mat_ind, adj_mat_val = self.attention(h, adj_mat_ind,
                                                      adj_mat_val)
            # laplacian matrix normalization
            adj_mat_val = self.normalization(adj_mat_ind, adj_mat_val, N)

            val_h = h

            with graph.local_graph():
                graph.edge_index = adj_mat_ind
                graph.edge_weight = adj_mat_val
                for _ in range(i + 1):
                    val_h = spmm(graph, val_h)
                    # val_h = spmm(adj_mat_ind, F.dropout(adj_mat_val, p=self.node_dropout, training=self.training), N, N, val_h)

                # val_h = val_h / norm
                val_h[val_h != val_h] = 0
                val_h = val_h + self.bias[i]
                val_h = self.adaptive_enc[i](val_h)
                val_h = self.activation(val_h)
                val_h = F.dropout(val_h,
                                  p=self.dropout,
                                  training=self.training)
                result.append(val_h)
        h_res = torch.cat(result, dim=1)
        return h_res
Example #27
0
    def top_k(self, graph, x: torch.Tensor, scores: torch.Tensor) -> Tuple[Graph, torch.Tensor]:
        org_n_nodes = x.shape[0]
        num = int(self.pooling_rate * x.shape[0])
        values, indices = torch.topk(scores, max(2, num))

        if self.aug_adj:
            edge_index = graph.edge_index.cpu()
            edge_attr = torch.ones(edge_index.shape[1])
            edge_index, _ = spspmm(edge_index, edge_attr, edge_index, edge_attr, org_n_nodes, org_n_nodes, org_n_nodes)
            edge_index = edge_index.to(x.device)
            batch = Graph(x=x, edge_index=edge_index)
        else:
            batch = graph
        new_batch = batch.subgraph(indices)

        new_batch.row_norm()
        return new_batch, indices
Example #28
0
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
    """ Calculates the n-th degree adjacency matrix.
        Performs mm of adj_mat and adds the newly added.
        Default is dense. Mods for sparse version are done when needed.
        Inputs: 
        * adj_mat: (N, N) adjacency tensor
        * n: int. degree of the output adjacency
        * sparse: bool. whether to use torch-sparse module
        Outputs: 
        * edge_idxs: the ij positions of the adjacency matrix
        * edge_attrs: the degree of connectivity (1 for neighs, 2 for neighs^2 )
    """
    adj_mat = adj_mat.float()
    attr_mat = torch.zeros_like(adj_mat)

    for i in range(n):
        if i == 0:
            attr_mat += adj_mat
            continue

        if i == 1 and sparse:
            # create sparse adj tensor
            adj_mat = torch.sparse.FloatTensor(adj_mat.nonzero().t(),
                                               adj_mat[adj_mat != 0]).to(
                                                   adj_mat.device).coalesce()
            idxs, vals = adj_mat.indices(), adj_mat.values()
            m, k, n = 3 * [
                adj_mat.shape[0]
            ]  # (m, n) * (n, k) , but adj_mats are squared: m=n=k

        if sparse:
            idxs, vals = torch_sparse.spspmm(idxs,
                                             vals,
                                             idxs,
                                             vals,
                                             m=m,
                                             k=k,
                                             n=n)
            adj_mat = torch.zeros_like(attr_mat)
            adj_mat[idxs[0], idxs[1]] = vals.bool().float()
        else:
            adj_mat = (adj_mat @ adj_mat).bool().float()

        attr_mat[(adj_mat - attr_mat.bool().float()).bool()] += i + 1

    return adj_mat, attr_mat
Example #29
0
 def forward(self, A, H_=None):
     if self.first == True:
         result_A = self.conv1(A)
         result_B = self.conv2(A)                
         W = [(F.softmax(self.conv1.weight, dim=1)).detach(),(F.softmax(self.conv2.weight, dim=1)).detach()]
     else:
         result_A = H_
         result_B = self.conv1(A)
         W = [(F.softmax(self.conv1.weight, dim=1)).detach()]
     H = []
     for i in range(len(result_A)):
         a_edge, a_value = result_A[i]
         b_edge, b_value = result_B[i]
         
         edges, values = torch_sparse.spspmm(a_edge, a_value, b_edge, b_value, self.num_nodes, self.num_nodes, self.num_nodes)
         H.append((edges, values))
     return H, W
Example #30
0
def sgc_precompute(features, adj, degree):
    t = perf_counter()
    adj_index = adj.coalesce().indices()
    adj_value = adj.coalesce().values()
    features_index = features.coalesce().indices()
    features_value = features.coalesce().values()
    m = adj.shape[0]
    n = adj.shape[1]
    k = features.shape[1]

    for i in range(degree):
        #features = torch.spmm(adj, features)
        features_index, features_value = torch_sparse.spspmm(
            adj_index, adj_value, features_index, features_value, m, n, k)
    precompute_time = perf_counter() - t
    return torch.sparse.FloatTensor(features_index, features_value,
                                    torch.Size(
                                        features.shape)), precompute_time