Пример #1
0
def test_convert_scipy():
    index = torch.tensor([[0, 0, 1, 2, 2], [0, 2, 1, 0, 1]])
    value = torch.Tensor([1, 2, 4, 1, 3])
    N = 3

    out = from_scipy(to_scipy(index, value, N, N))
    assert out[0].tolist() == index.tolist()
    assert out[1].tolist() == value.tolist()
Пример #2
0
def mm(indexA, valueA, indexB, valueB, m, k, n):
    assert valueA.dtype == valueB.dtype

    if indexA.is_cuda:
        return torch_sparse.spspmm_cuda.spspmm(indexA, valueA, indexB, valueB,
                                               m, k, n)

    A = to_scipy(indexA, valueA, m, k)
    B = to_scipy(indexB, valueB, k, n)
    C = A.dot(B).tocoo().tocsr().tocoo()  # Force coalesce.
    indexC, valueC = from_scipy(C)
    return indexC, valueC
Пример #3
0
def create_sparse_H(H: torch.Tensor):
    # H: 2 x nnz
    # return H: N x N

    import numpy as np
    import scipy.sparse as sp
    import torch
    import torch_sparse
    # M = H[1].max() + 1
    H = sp.csr_matrix((torch.ones_like(H[0]), H.tolist()))
    N, M = H.shape
    Dv = sp.spdiags(np.power(H.sum(1).reshape(-1), -0.5), 0, N, N)
    De = sp.spdiags(np.power(H.sum(0).reshape(-1), -1.), 0, M, M)
    Hv = Dv * H * De * H.transpose() * Dv  # V x V, for HGNN

    (row, col), value = torch_sparse.from_scipy(Hv)
    # Hv = Hv.tocoo()
    # row, col, value = Hv.row, Hv.col, Hv.data
    H = torch.sparse.FloatTensor(torch.stack((row, col)), value,
                                 (N, N)).float()  # V x V
    return H
Пример #4
0
def transpose(index, value, m, n):
    """Transposes dimensions 0 and 1 of a sparse tensor.

    Args:
        index (:class:`LongTensor`): The index tensor of sparse matrix.
        value (:class:`Tensor`): The value tensor of sparse matrix.
        m (int): The first dimension of sparse matrix.
        n (int): The second dimension of sparse matrix.

    :rtype: (:class:`LongTensor`, :class:`Tensor`)
    """

    if value.dim() == 1 and not value.is_cuda:
        mat = to_scipy(index, value, m, n).tocsc()
        (col, row), value = from_scipy(mat)
        index = torch.stack([row, col], dim=0)
        return index, value

    row, col = index
    index = torch.stack([col, row], dim=0)
    index, value = coalesce(index, value, n, m)
    return index, value
Пример #5
0
def transpose(index, value, m, n, coalesced=True):
    """Transposes dimensions 0 and 1 of a sparse tensor.

    Args:
        index (:class:`LongTensor`): The index tensor of sparse matrix.
        value (:class:`Tensor`): The value tensor of sparse matrix.
        m (int): The first dimension of corresponding dense matrix.
        n (int): The second dimension of corresponding dense matrix.
        coalesced (bool, optional): If set to :obj:`False`, will not coalesce
            the output. (default: :obj:`True`)
    :rtype: (:class:`LongTensor`, :class:`Tensor`)
    """

    if value.dim() == 1 and not value.is_cuda:
        mat = to_scipy(index, value, m, n).tocsc()
        (col, row), value = from_scipy(mat)
        index = torch.stack([row, col], dim=0)
        return index, value

    row, col = index
    index = torch.stack([col, row], dim=0)
    if coalesced:
        index, value = coalesce(index, value, n, m)
    return index, value
Пример #6
0
def initialise(X, Y, G, args, unseen=None):
    """
    initialises model, optimiser, normalises graph, and features
    
    arguments:
    X, Y, G: the entire dataset (with graph, features, labels)
    args: arguments
    unseen: if not None, remove these nodes from hypergraphs

    returns:
    a tuple with model details (UniGNN, optimiser)    
    """

    G = G.copy()

    if unseen is not None:
        unseen = set(unseen)
        # remove unseen nodes
        for e, vs in G.items():
            G[e] = list(set(vs) - unseen)

    if args.add_self_loop:
        Vs = set(range(X.shape[0]))

        # only add self-loop to those are orginally un-self-looped
        # TODO:maybe we should remove some repeated self-loops?
        for edge, nodes in G.items():
            if len(nodes) == 1 and nodes[0] in Vs:
                Vs.remove(nodes[0])

        for v in Vs:
            G[f'self-loop-{v}'] = [v]

    N, M = X.shape[0], len(G)
    indptr, indices, data = [0], [], []
    for e, vs in G.items():
        indices += vs
        data += [1] * len(vs)
        indptr.append(len(indices))
    H = sp.csc_matrix((data, indices, indptr), shape=(N, M),
                      dtype=int).tocsr()  # V x E
    degV = torch.from_numpy(H.sum(1)).view(-1, 1).float()
    degE2 = torch.from_numpy(H.sum(0)).view(-1, 1).float()

    (row, col), value = torch_sparse.from_scipy(H)
    V, E = row, col
    from torch_scatter import scatter
    assert args.first_aggregate in (
        'mean', 'sum'), 'use `mean` or `sum` for first-stage aggregation'
    degE = scatter(degV[V], E, dim=0, reduce=args.first_aggregate)
    degE = degE.pow(-0.5)
    degV = degV.pow(-0.5)
    degV[degV.isinf(
    )] = 1  # when not added self-loop, some nodes might not be connected with any edge

    V, E = V.cuda(), E.cuda()
    args.degV = degV.cuda()
    args.degE = degE.cuda()
    args.degE2 = degE2.pow(-1.).cuda()

    nfeat, nclass = X.shape[1], len(Y.unique())
    nlayer = args.nlayer
    nhid = args.nhid
    nhead = args.nhead

    # UniGNN and optimiser
    if args.model_name == 'UniGCNII':
        model = UniGCNII(args, nfeat, nhid, nclass, nlayer, nhead, V, E)
        optimiser = torch.optim.Adam([
            dict(params=model.reg_params, weight_decay=0.01),
            dict(params=model.non_reg_params, weight_decay=5e-4)
        ],
                                     lr=0.01)
    elif args.model_name == 'HyperGCN':
        args.fast = True
        dataset = args.dataset_dict
        model = HyperGCN(args, nfeat, nhid, nclass, nlayer, dataset['n'],
                         dataset['hypergraph'], dataset['features'])
        optimiser = optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
    else:
        model = UniGNN(args, nfeat, nhid, nclass, nlayer, nhead, V, E)
        p1, p2, p3, = [], [], []  # 用于设置不同的层使用不同的学习率
        for n, p in model.named_parameters():
            print(n, p.shape)
            if '.lam' in n:
                p2.append(p)
            elif '.mul' in n:
                p3.append(p)
            else:
                p1.append(p)
        optimiser = optim.Adam([{
            'params': p1,
            'weight_decay': 5e-4,
            'lr': 0.01
        }, {
            'params': p2,
            'weight_decay': args.wd_sw,
            'lr': args.lr_sw
        }, {
            'params': p3,
            'weight_decay': args.wd_mul,
            'lr': args.lr_mul
        }])
        #optimiser= optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)

    model.cuda()

    return model, optimiser
Пример #7
0
import torch
from torch_sparse import to_scipy, from_scipy, coalesce


def transpose(index, value, m, nm coalesce=True):
    """Transposes dimensions 0 and 1 of a sparse tensor.

    Args:
        index (:class:`LongTensor`): The index tensor of sparse matrix.
        value (:class:`Tensor`): The value tensor of sparse matrix.
        m (int): The first dimension of corresponding dense matrix.
        n (int): The second dimension of corresponding dense matrix.
        coalesce (bool, optional): To return coalesced index and value or not (default: :obj:`True`) 
    :rtype: (:class:`LongTensor`, :class:`Tensor`)
    """

    if value.dim() == 1 and not value.is_cuda:
        mat = to_scipy(index, value, m, n).tocsc()
        (col, row), value = from_scipy(mat)
        index = torch.stack([row, col], dim=0)
        return index, value

    row, col = index
    index = torch.stack([col, row], dim=0)
    if coalesce:
        index, value = coalesce(index, value, n, m)
    return index, value