Example #1
0
def test_gin_conv():
    x1 = torch.randn(4, 16)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32))
    conv = GINConv(nn, train_eps=True)
    assert conv.__repr__() == (
        'GINConv(nn=Sequential(\n'
        '  (0): Linear(in_features=16, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')
    out = conv(x1, edge_index)
    assert out.size() == (4, 32)
    assert conv(x1, edge_index, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index).tolist() == out.tolist()
    assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    out1 = conv((x1, x2), edge_index)
    out2 = conv((x1, None), edge_index, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert conv((x1, x2), edge_index, (4, 2)).tolist() == out1.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
    assert conv((x1, None), adj.t()).tolist() == out2.tolist()

    t = '(OptPairTensor, Tensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), edge_index).tolist() == out1.tolist()
    assert jit((x1, x2), edge_index, size=(4, 2)).tolist() == out1.tolist()
    assert jit((x1, None), edge_index, size=(4, 2)).tolist() == out2.tolist()

    t = '(OptPairTensor, SparseTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
    assert jit((x1, None), adj.t()).tolist() == out2.tolist()
def test_cluster_gcn_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = ClusterGCNConv(16, 32, diag_lambda=1.)
    assert conv.__repr__() == 'ClusterGCNConv(16, 32, diag_lambda=1.0)'
    out = conv(x, edge_index)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-5)

    t = '(Tensor, Tensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, adj.t()), out, atol=1e-5)
Example #3
0
def test_appnp():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = APPNP(K=10, alpha=0.1)
    assert conv.__repr__() == 'APPNP(K=10, alpha=0.1)'
    out = conv(x, edge_index)
    assert out.size() == (4, 16)
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)
Example #4
0
def to_torch_sparse(mat):
    if isinstance(mat, torch.Tensor):
        if not mat.is_sparse:
            stm = SparseTensor.from_dense(mat)
        else:
            stm = SparseTensor.from_torch_sparse_coo_tensor(mat)

    elif isinstance(mat, np.ndarray):
        stm = SparseTensor.from_dense(torch.as_tensor(mat))

    elif isinstance(mat, spmatrix):
        stm = SparseTensor.from_scipy(mat)

    elif isinstance(mat, SparseTensor):
        stm = mat
    else:
        raise TypeError(f"{type(mat)} not supported now")

    return stm
Example #5
0
    def __call__(self, data):
        assert data.edge_index is not None
        row, col = data.edge_index
        adj_t = SparseTensor(row=col,
                             col=row,
                             sparse_sizes=(data.num_nodes, data.num_nodes))

        deg = adj_t.sum(dim=1).to(torch.float)
        deg_inv_sqrt = deg.pow(-0.5)
        deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
        adj_t = deg_inv_sqrt.view(-1, 1) * adj_t * deg_inv_sqrt.view(1, -1)

        assert data.x is not None
        xs = [data.x]
        for i in range(1, self.K + 1):
            xs += [adj_t @ xs[-1]]
            data[f'x{i}'] = xs[-1]

        return data
def test_arma_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = ARMAConv(16, 32, num_stacks=8, num_layers=4)
    assert conv.__repr__() == 'ARMAConv(16, 32, num_stacks=8, num_layers=4)'
    out = conv(x, edge_index)
    assert out.size() == (4, 32)
    assert conv(x, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, adj.t()), out, atol=1e-6)
Example #7
0
def test_agnn_conv(requires_grad):
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = AGNNConv(requires_grad=requires_grad)
    assert conv.__repr__() == 'AGNNConv()'
    out = conv(x, edge_index)
    assert out.size() == (4, 16)
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)

    t = '(Tensor, Tensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)
 def forward(self, data):
     N = data.graph['num_nodes']
     edge_index = data.graph['edge_index']
     if isinstance(edge_index, torch.Tensor):
         row, col = edge_index
         A = SparseTensor(row=row, col=col, sparse_sizes=(N, N)).to_torch_sparse_coo_tensor()
     elif isinstance(edge_index, SparseTensor):
         A = edge_index.to_torch_sparse_coo_tensor()
     logits = self.W(A)
     return logits
    def __init__(self,
                 data,
                 batch_size: int,
                 num_steps: int = 1,
                 sample_coverage: int = 0,
                 save_dir: Optional[str] = None,
                 log: bool = True,
                 **kwargs):

        assert data.edge_index is not None
        assert 'node_norm' not in data
        assert 'edge_norm' not in data

        self.num_steps = num_steps
        self.__batch_size__ = batch_size
        self.sample_coverage = sample_coverage
        self.log = log

        self.N = N = data.num_nodes
        self.E = data.num_edges

        self.adj_t = SparseTensor(row=data.edge_index[0],
                                  col=data.edge_index[1],
                                  value=torch.arange(
                                      self.E, device=data.edge_index.device),
                                  sparse_sizes=(N, N)).t()

        self.data = copy.copy(data)
        self.data.edge_index = None

        super(GraphSAINTSampler, self).__init__(self,
                                                batch_size=1,
                                                collate_fn=self.__collate__,
                                                **kwargs)

        if self.sample_coverage > 0:
            path = osp.join(save_dir or '', self.__filename__)
            if save_dir is not None and osp.exists(path):  # pragma: no cover
                self.node_norm, self.edge_norm = torch.load(path)
            else:
                self.node_norm, self.edge_norm = self.__compute_norm__()
                if save_dir is not None:  # pragma: no cover
                    torch.save((self.node_norm, self.edge_norm), path)
Example #10
0
def test_shadow_k_hop_sampler():
    row = torch.tensor([0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 4, 4, 5, 5])
    col = torch.tensor([1, 2, 3, 0, 2, 0, 1, 4, 5, 0, 2, 5, 2, 4])
    edge_index = torch.stack([row, col], dim=0)
    edge_weight = torch.arange(row.size(0))
    x = torch.randn(6, 16)
    y = torch.randint(3, (6, ), dtype=torch.long)
    data = Data(edge_index=edge_index, edge_weight=edge_weight, x=x, y=y)

    train_mask = torch.tensor([1, 1, 0, 0, 0, 0], dtype=torch.bool)
    loader = ShaDowKHopSampler(data,
                               depth=1,
                               num_neighbors=3,
                               node_idx=train_mask,
                               batch_size=2)
    assert len(loader) == 1

    batch1 = next(iter(loader))
    assert len(batch1) == 7

    assert batch1.batch.tolist() == [0, 0, 0, 0, 1, 1, 1]
    assert batch1.ptr.tolist() == [0, 4, 7]
    assert batch1.root_n_id.tolist() == [0, 5]
    assert batch1.x.tolist() == x[torch.tensor([0, 1, 2, 3, 0, 1, 2])].tolist()
    assert batch1.y.tolist() == y[train_mask].tolist()
    row, col = batch1.edge_index
    assert row.tolist() == [0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6]
    assert col.tolist() == [1, 2, 3, 0, 2, 0, 1, 0, 5, 6, 4, 6, 4, 5]
    e_id = torch.tensor([0, 1, 2, 3, 4, 5, 6, 9, 0, 1, 3, 4, 5, 6])
    assert batch1.edge_weight.tolist() == edge_weight[e_id].tolist()

    adj_t = SparseTensor(row=edge_index[0],
                         col=edge_index[1],
                         value=edge_weight).t()
    data = Data(adj_t=adj_t, x=x, y=y)

    loader = ShaDowKHopSampler(data,
                               depth=1,
                               num_neighbors=3,
                               node_idx=train_mask,
                               batch_size=2)
    assert len(loader) == 1

    batch2 = next(iter(loader))
    assert len(batch2) == 6

    assert batch1.batch.tolist() == batch2.batch.tolist()
    assert batch1.ptr.tolist() == batch2.ptr.tolist()
    assert batch1.root_n_id.tolist() == batch2.root_n_id.tolist()
    assert batch1.x.tolist() == batch2.x.tolist()
    assert batch1.y.tolist() == batch2.y.tolist()
    row, col, value = batch2.adj_t.t().coo()
    assert batch1.edge_index[0].tolist() == row.tolist()
    assert batch1.edge_index[1].tolist() == col.tolist()
    assert batch1.edge_weight.tolist() == value.tolist()
def test_point_net_conv():
    x1 = torch.randn(4, 16)
    pos1 = torch.randn(4, 3)
    pos2 = torch.randn(2, 3)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    local_nn = Seq(Lin(16 + 3, 32), ReLU(), Lin(32, 32))
    global_nn = Seq(Lin(32, 32))
    conv = PointNetConv(local_nn, global_nn)
    assert conv.__repr__() == (
        'PointNetConv(local_nn=Sequential(\n'
        '  (0): Linear(in_features=19, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '), global_nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')
    out = conv(x1, pos1, edge_index)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x1, pos1, adj.t()), out, atol=1e-6)

    if is_full_test():
        t = '(OptTensor, Tensor, Tensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, pos1, edge_index).tolist() == out.tolist()

        t = '(OptTensor, Tensor, SparseTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x1, pos1, adj.t()), out, atol=1e-6)

    adj = adj.sparse_resize((4, 2))
    out = conv(x1, (pos1, pos2), edge_index)
    assert out.size() == (2, 32)
    assert conv((x1, None), (pos1, pos2), edge_index).tolist() == out.tolist()
    assert torch.allclose(conv(x1, (pos1, pos2), adj.t()), out, atol=1e-6)
    assert torch.allclose(conv((x1, None), (pos1, pos2), adj.t()),
                          out,
                          atol=1e-6)

    if is_full_test():
        t = '(PairOptTensor, PairTensor, Tensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, None), (pos1, pos2),
                   edge_index).tolist() == out.tolist()

        t = '(PairOptTensor, PairTensor, SparseTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit((x1, None), (pos1, pos2), adj.t()),
                              out,
                              atol=1e-6)
def test_sage_conv(project):
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = SAGEConv(8, 32, project=project)
    assert str(conv) == 'SAGEConv(8, 32, aggr=mean)'
    out = conv(x1, edge_index)
    assert out.size() == (4, 32)
    assert conv(x1, edge_index, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    if is_full_test():
        t = '(Tensor, Tensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, edge_index).tolist() == out.tolist()
        assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist()

        t = '(Tensor, SparseTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    conv = SAGEConv((8, 16), 32, project=project)
    assert str(conv) == 'SAGEConv((8, 16), 32, aggr=mean)'
    out1 = conv((x1, x2), edge_index)
    out2 = conv((x1, None), edge_index, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert conv((x1, x2), edge_index, (4, 2)).tolist() == out1.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
    assert conv((x1, None), adj.t()).tolist() == out2.tolist()

    if is_full_test():
        t = '(OptPairTensor, Tensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, x2), edge_index).tolist() == out1.tolist()
        assert jit((x1, x2), edge_index, size=(4, 2)).tolist() == out1.tolist()
        assert jit((x1, None), edge_index,
                   size=(4, 2)).tolist() == out2.tolist()

        t = '(OptPairTensor, SparseTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
        assert jit((x1, None), adj.t()).tolist() == out2.tolist()
Example #13
0
    def forward(self, x, edge_index, edge_attr=None, batch=None):
        if batch is None:
            batch = edge_index.new_zeros(x.size(0))
        num_node = x.size(0)

        k = F.relu(self.lin_2(x))

        A = SparseTensor.from_edge_index(edge_index=edge_index,
                                         edge_attr=edge_attr,
                                         sparse_sizes=(num_node, num_node))
        I = SparseTensor.eye(num_node, device=self.args.device)
        A_wave = fill_diag(A, 1)

        s = A_wave @ k

        score = s.squeeze()
        perm = topk(score, self.ratio, batch)

        A = self.norm(A)

        K_neighbor = A * k.T
        x_neighbor = K_neighbor @ x

        # ----modified
        deg = sum(A, dim=1)
        deg_inv = deg.pow_(-1)
        deg_inv.masked_fill_(deg_inv == float('inf'), 0.)
        x_neighbor = x_neighbor * deg_inv.view(1, -1).T
        # ----
        x_self = x * k

        x = x_neighbor * (
            1 - self.args.combine_ratio) + x_self * self.args.combine_ratio

        x = x[perm]
        batch = batch[perm]
        edge_index, edge_attr = filter_adj(edge_index,
                                           edge_attr,
                                           perm,
                                           num_nodes=s.size(0))

        return x, edge_index, edge_attr, batch, perm
Example #14
0
def test_point_transformer_conv():
    x1 = torch.rand(4, 16)
    x2 = torch.randn(2, 8)
    pos1 = torch.rand(4, 3)
    pos2 = torch.randn(2, 3)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = PointTransformerConv(in_channels=16, out_channels=32)
    assert str(conv) == 'PointTransformerConv(16, 32)'

    out = conv(x1, pos1, edge_index)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x1, pos1, adj.t()), out, atol=1e-6)

    if is_full_test():
        t = '(Tensor, Tensor, Tensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, pos1, edge_index).tolist() == out.tolist()

        t = '(Tensor, Tensor, SparseTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x1, pos1, adj.t()), out, atol=1e-6)

    pos_nn = Sequential(Linear(3, 16), ReLU(), Linear(16, 32))
    attn_nn = Sequential(Linear(32, 32), ReLU(), Linear(32, 32))
    conv = PointTransformerConv(16, 32, pos_nn, attn_nn)

    out = conv(x1, pos1, edge_index)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x1, pos1, adj.t()), out, atol=1e-6)

    conv = PointTransformerConv((16, 8), 32)
    adj = adj.sparse_resize((4, 2))

    out = conv((x1, x2), (pos1, pos2), edge_index)
    assert out.size() == (2, 32)
    assert torch.allclose(conv((x1, x2), (pos1, pos2), adj.t()),
                          out,
                          atol=1e-6)

    if is_full_test():
        t = '(PairTensor, PairTensor, Tensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, x2), (pos1, pos2), edge_index).tolist() == out.tolist()

        t = '(PairTensor, PairTensor, SparseTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit((x1, x2), (pos1, pos2), adj.t()),
                              out,
                              atol=1e-6)
def test_message_passing_with_edge_index():
    edge_index = torch.tensor([
        [0, 0, 0, 1, 1, 1],
        [0, 1, 2, 0, 1, 2],
    ])
    adj = SparseTensor(row=edge_index[0], col=edge_index[1])
    adj_t = adj.t()

    x = torch.Tensor([[1], [2]])
    out = MessagePassing(flow='source_to_target').propagate(
        edge_index, x=x, size=(2, 3))
    assert out.tolist() == [[3], [3], [3]]
    out = MessagePassing().propagate(adj_t, x=x)
    assert out.tolist() == [[3], [3], [3]]

    x = torch.Tensor([[1], [2], [3]])
    out = MessagePassing(flow='source_to_target').propagate(edge_index, x=x)
    assert out.tolist() == [[3], [3], [3]]

    x = torch.Tensor([[1], [2], [3]])
    out = MessagePassing(flow='target_to_source').propagate(
        edge_index, x=x, size=(2, 3))
    assert out.tolist() == [[6], [6]]
    out = MessagePassing().propagate(adj, x=x)
    assert out.tolist() == [[6], [6]]

    x = torch.Tensor([[1], [2], [3]])
    out = MessagePassing(flow='target_to_source').propagate(edge_index, x=x)
    assert out.tolist() == [[6], [6], [0]]

    x = (torch.Tensor([[1], [2]]), torch.Tensor([[1], [2], [3]]))
    out = MessagePassing(flow='source_to_target').propagate(edge_index, x=x)
    assert out.tolist() == [[3], [3], [3]]
    out = MessagePassing().propagate(adj_t, x=x)
    assert out.tolist() == [[3], [3], [3]]

    x = (torch.Tensor([[1], [2]]), torch.Tensor([[1], [2], [3]]))
    out = MessagePassing(flow='target_to_source').propagate(edge_index, x=x)
    assert out.tolist() == [[6], [6]]
    x = x[::-1]
    out = MessagePassing().propagate(adj, x=x)
    assert out.tolist() == [[6], [6]]
Example #16
0
def test_spline_conv():
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    value = torch.rand(row.size(0), 3)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    conv = SplineConv(8, 32, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv(8, 32, dim=3)'
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 32)
    assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index, value).tolist() == out.tolist()
    assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    conv = SplineConv((8, 16), 32, dim=3, kernel_size=5)
    assert conv.__repr__() == 'SplineConv((8, 16), 32, dim=3)'
    out1 = conv((x1, x2), edge_index, value)
    out2 = conv((x1, None), edge_index, value, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
    assert conv((x1, None), adj.t()).tolist() == out2.tolist()

    t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), edge_index, value).tolist() == out1.tolist()
    assert jit((x1, x2), edge_index, value,
               size=(4, 2)).tolist() == out1.tolist()
    assert jit((x1, None), edge_index, value,
               size=(4, 2)).tolist() == out2.tolist()

    t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
    assert jit((x1, None), adj.t()).tolist() == out2.tolist()
Example #17
0
def test_getitem(dtype, device):
    mat = torch.randn(50, 40, dtype=dtype, device=device)
    mat = SparseTensor.from_dense(mat)

    idx1 = torch.randint(0, 50, (10, ), dtype=torch.long, device=device)
    idx2 = torch.randint(0, 40, (10, ), dtype=torch.long, device=device)

    assert mat[:10, :10].sizes() == [10, 10]
    assert mat[..., :10].sizes() == [50, 10]
    assert mat[idx1, idx2].sizes() == [10, 10]
    assert mat[idx1.tolist()].sizes() == [10, 40]
Example #18
0
    def __init__(self, data, size, num_hops,
                 batch_size=1, shuffle=False, drop_last=False, bipartite=True,
                 add_self_loops=False, flow='source_to_target',
                 use_negative_sampling=False, neg_sample_ratio=None):

        self.N = N = data.num_nodes
        self.E = data.num_edges
        self.adj = SparseTensor(
            row=data.edge_index[0], col=data.edge_index[1],
            value=torch.arange(self.E, device=data.edge_index.device),
            sparse_sizes=(N, N))

        if use_negative_sampling:
            assert neg_sample_ratio is not None
            assert neg_sample_ratio > 0.0

        self.use_negative_sampling = use_negative_sampling
        self.num_proc = mp.cpu_count()
        self.neg_sample_ratio = neg_sample_ratio
        super().__init__(data, size, num_hops, batch_size, shuffle, drop_last, bipartite, add_self_loops, flow)
Example #19
0
def svd(data, name, embedding_dim=64):
    try:
        result = load_embedding(name, 'svd')
        return result
    except FileNotFoundError:
        print(f'cache/feature/svd_{name}.pt not found! Regenerating it now')

    if data.setting == 'inductive':
        N = data.num_train_nodes
        row, col = data.train_edge_index
    else:
        N = data.num_nodes
        row, col = data.edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N))
    adj = adj.to_scipy(layout='csc')
    ut, s, vt = sparsesvd(adj, embedding_dim)

    result = torch.tensor(np.dot(ut.T, np.diag(s)), dtype=torch.float)
    save_embedding(result, name, 'svd')
    return result
Example #20
0
def cheby_op(x: torch.Tensor,
             L: SparseTensor,
             coeff: torch.Tensor,
             lam_max: float = 2.0):
    r"""Chebyshev approximation of graph filtering

    Parameters
    ----------
    x:          Tensor
        The input graph signal. It's shape can be either :obj:`(N,)` , :obj:`(N,Ci)` or
        :obj:`(Co,N,Ci)`, wherein :obj:`N`, :obj:`Ci` and :obj:`Co` are the numbers of
        nodes, input channels, and output channels respectively.
    L:          SparseTensor
        The :obj:`(N,N)` Laplacian matrix.
    coeff:      Tensor
        The :obj:`(Co,Ci,K+1)` Chebyshev coefficients for :obj:`Ci*Co` kernels, wherein
        :obj:`K` is the order of approximation.
    lam_max:    float,optional
        The maximal graph frequency, i.e., :math:`\lambda_{max}`

    Returns
    -------
    Tensor
        The filtered signals of shape :obj:`(Co,N,Ci)`
    """

    Co, Ci, K = coeff.shape
    N = L.size(-1)

    if x.dim() == 1:
        assert x.size() == (N, )
        x = x[None, ..., None]  # (N,) --> 1 x N x 1
    elif x.dim() == 2:  # N x Ci --> Co x N x Ci
        assert x.size() == (N, Ci)
        x = x.unsqueeze(0)
    elif x.dim() == 3:  # Co x N x Ci
        assert x.size() == (Co, N, Ci) or (1, N, Ci)
    else:
        raise RuntimeError(
            "The input signals has mismatched dimensions: {}".format(x.size()))

    K = K - 1
    c = coeff.unsqueeze(1)  # Co x Ci x K --> Co x 1 x Ci x K
    L_norm = normalize_laplace(L, lam_max)
    twf_old = x
    twf_cur = L_norm @ x  # Co x N x Ci
    result = 0.5 * c[..., 0] * twf_old + c[..., 1] * twf_cur
    for k in range(2, K + 1):
        twf_new = 2 * (L_norm @ twf_cur) - twf_old
        result = result + c[..., k] * twf_new
        twf_old = twf_cur
        twf_cur = twf_new

    return result
Example #21
0
def test_gcn_encoder():
    gcn_encoder = GCNEncoder(in_channel=8, out_channel=32).eval()
    assert gcn_encoder.conv1.__repr__() == "GCNConv(8, 8)"
    assert gcn_encoder.conv2.__repr__() == "GCNConv(8, 16)"
    assert gcn_encoder.conv3.__repr__() == "GCNConv(16, 32)"
    N1, N2 = 4, 5
    x = torch.randn(N1 + N2, 8)
    batch = torch.tensor([0 for _ in range(N1)] + [1 for _ in range(N2)])
    edge_index = torch.tensor(
        [[0, 0, 0, 1, 2, 3, 3, 5, 7, 8], [1, 2, 3, 0, 0, 0, 2, 6, 6, 0]],
        dtype=torch.long)
    row, col = edge_index
    from torch_sparse import SparseTensor

    adj = SparseTensor(row=row, col=col, value=None, sparse_sizes=(9, 9))

    out1 = gcn_encoder(x, edge_index, batch)
    assert out1.size() == (2, 32)
    # assert the consistency between types of LongTensor and SparseTensor.
    assert torch.allclose(gcn_encoder(x, adj.t(), batch), out1, atol=1e-6)
Example #22
0
    def __init__(self, data, num_parts, recursive=False, save_dir=None):
        assert data.edge_index is not None

        recursive_str = '_recursive' if recursive else ''
        filename = f'partition_{num_parts}{recursive_str}.pt'

        path = osp.join(save_dir or '', filename)
        if save_dir is not None and osp.exists(path):
            adj, partptr, perm = torch.load(path)
        else:
            (row, col), edge_attr = data.edge_index, data.edge_attr
            adj = SparseTensor(row=row, col=col, value=edge_attr)
            adj, partptr, perm = adj.partition(num_parts, recursive)

            if save_dir is not None:
                torch.save((adj, partptr, perm), path)

        self.data = self.permute_data(data, perm, adj)
        self.partptr = partptr
        self.perm = perm
Example #23
0
    def __init__(self, edge_index, walk_length, context_size,
                 walks_per_node=1, p=1, q=1, num_negative_samples=1,
                 num_nodes=None, sparse=False ):
        
        if random_walk is None:
            raise ImportError('`Node2Vec` requires `torch-cluster`.')
        
        N = maybe_num_nodes(edge_index, num_nodes)
        row, col = edge_index
        self.adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N))
        self.adj = self.adj.to('cpu')

        assert walk_length >= context_size

        self.walk_length = walk_length - 1
        self.context_size = context_size
        self.walks_per_node = walks_per_node
        self.p = p
        self.q = q
        self.num_negative_samples = num_negative_samples
Example #24
0
def test_lists_of_SparseTensors():
    e1 = torch.tensor([[4, 1, 3, 2, 2, 3], [1, 3, 2, 3, 3, 2]])
    e2 = torch.tensor([[0, 1, 4, 7, 2, 9], [7, 2, 2, 1, 4, 7]])
    e3 = torch.tensor([[3, 5, 1, 2, 3, 3], [5, 0, 2, 1, 3, 7]])
    e4 = torch.tensor([[0, 1, 9, 2, 0, 3], [1, 1, 2, 1, 3, 2]])
    adj1 = SparseTensor.from_edge_index(e1, sparse_sizes=(11, 11))
    adj2 = SparseTensor.from_edge_index(e2, sparse_sizes=(22, 22))
    adj3 = SparseTensor.from_edge_index(e3, sparse_sizes=(12, 12))
    adj4 = SparseTensor.from_edge_index(e4, sparse_sizes=(15, 15))

    d1 = Data(adj_test=[adj1, adj2])
    d2 = Data(adj_test=[adj3, adj4])

    data_list = [d1, d2]
    dataset = MyTestDataset3(data_list)
    assert len(dataset) == 2
    assert dataset[0].adj_test[0].sparse_sizes() == (11, 11)
    assert dataset[0].adj_test[1].sparse_sizes() == (22, 22)
    assert dataset[1].adj_test[0].sparse_sizes() == (12, 12)
    assert dataset[1].adj_test[1].sparse_sizes() == (15, 15)
Example #25
0
def test_pdn_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    edge_attr = torch.randn(6, 8)
    adj = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4))

    conv = PDNConv(16, 32, edge_dim=8, hidden_channels=128)
    assert str(conv) == "PDNConv(16, 32)"
    out = conv(x, edge_index, edge_attr)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, edge_index, edge_attr), out)

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, adj.t()), out, atol=1e-6)
Example #26
0
def get_sparse_buffer(module, name):
    row = getattr(module, "{}_row".format(name))
    col = getattr(module, "{}_col".format(name))
    val = getattr(module, "{}_val".format(name))
    siz = getattr(module, "{}_size".format(name))
    return SparseTensor(
        row=row,
        col=col,
        value=val,
        sparse_sizes=siz.tolist(),
    )
Example #27
0
class RandomWalk():
    def __init__(self, edge_index, walk_length, context_size,
                 walks_per_node=1, p=1, q=1, num_negative_samples=1,
                 num_nodes=None, sparse=False ):
        
        if random_walk is None:
            raise ImportError('`Node2Vec` requires `torch-cluster`.')
        
        N = maybe_num_nodes(edge_index, num_nodes)
        row, col = edge_index
        self.adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N))
        self.adj = self.adj.to('cpu')

        assert walk_length >= context_size

        self.walk_length = walk_length - 1
        self.context_size = context_size
        self.walks_per_node = walks_per_node
        self.p = p
        self.q = q
        self.num_negative_samples = num_negative_samples
        
    def loader(self, **kwargs):
        return DataLoader(range(self.adj.sparse_size(0)),
                          collate_fn=self.sample, **kwargs)


    def sample(self, batch):
        if not isinstance(batch, torch.Tensor):
            batch = torch.tensor(batch)
        batch = batch.repeat(self.walks_per_node)
        rowptr, col, _ = self.adj.csr()
        rw = random_walk(rowptr, col, batch, self.walk_length, self.p, self.q)
        if not isinstance(rw, torch.Tensor):
            rw = rw[0]
        walks = []
        num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size
        for j in range(num_walks_per_rw):
            for i in range(1,self.context_size):
                walks.append(rw[:, [j,j+i]])    
        return torch.cat(walks, dim=0)
Example #28
0
    def process(self):

        with open(self.raw_paths[0], 'r') as f:
            data = [x.split('\t') for x in f.read().split('\n')[1:-1]]

            rows, cols = [], []
            for n_id, col, _ in data:
                col = [int(x) for x in col.split(',')]
                rows += [int(n_id)] * len(col)
                cols += col
            x = SparseTensor(row=torch.tensor(rows), col=torch.tensor(cols))
            x = x.to_dense()

            y = torch.empty(len(data), dtype=torch.long)
            for n_id, _, label in data:
                y[int(n_id)] = int(label)

        with open(self.raw_paths[1], 'r') as f:
            data = f.read().split('\n')[1:-1]
            data = [[int(v) for v in r.split('\t')] for r in data]
            edge_index = torch.tensor(data, dtype=torch.long).t().contiguous()
            edge_index, _ = coalesce(edge_index, None, x.size(0), x.size(0))

        train_masks, val_masks, test_masks = [], [], []
        for f in self.raw_paths[2:]:
            tmp = np.load(f)
            train_masks += [torch.from_numpy(tmp['train_mask']).to(torch.bool)]
            val_masks += [torch.from_numpy(tmp['val_mask']).to(torch.bool)]
            test_masks += [torch.from_numpy(tmp['test_mask']).to(torch.bool)]
        train_mask = torch.stack(train_masks, dim=1)
        val_mask = torch.stack(val_masks, dim=1)
        test_mask = torch.stack(test_masks, dim=1)

        data = Data(x=x,
                    edge_index=edge_index,
                    y=y,
                    train_mask=train_mask,
                    val_mask=val_mask,
                    test_mask=test_mask)
        data = data if self.pre_transform is None else self.pre_transform(data)
        torch.save(self.collate([data]), self.processed_paths[0])
Example #29
0
def edge_tensor_type_to_adj_type(
    attr: EdgeAttr,
    tensor_tuple: EdgeTensorType,
) -> Adj:
    r"""Converts an EdgeTensorType tensor tuple to a PyG Adj tensor."""
    src, dst = tensor_tuple

    if attr.layout == EdgeLayout.COO:  # COO: (row, col)
        assert src.dim() == 1 and dst.dim() == 1 and src.numel() == dst.numel()

        if src.numel() == 0:
            return torch.empty((2, 0), dtype=torch.long, device=src.device)

        if (src[0].storage().data_ptr() == dst[1].storage().data_ptr()
                and src.storage_offset() < dst.storage_offset()):
            # Do not copy if the tensor tuple is constructed from the same
            # storage (instead, return a view):
            out = torch.empty(0, dtype=src.dtype)
            out.set_(src.storage(),
                     storage_offset=src.storage_offset(),
                     size=(src.size()[0] + dst.size()[0], ))
            return out.view(2, -1)

        return torch.stack([src, dst], dim=0)

    elif attr.layout == EdgeLayout.CSR:  # CSR: (rowptr, col)
        return SparseTensor(rowptr=src,
                            col=dst,
                            is_sorted=True,
                            sparse_sizes=attr.size)

    elif attr.layout == EdgeLayout.CSC:  # CSC: (row, colptr)
        # CSC is a transposed adjacency matrix, so rowptr is the compressed
        # column and col is the uncompressed row.
        sparse_sizes = None if attr.size is None else (attr.size[1],
                                                       attr.size[0])
        return SparseTensor(rowptr=dst,
                            col=src,
                            is_sorted=True,
                            sparse_sizes=sparse_sizes)
    raise ValueError(f"Bad edge layout (got '{attr.layout}')")
Example #30
0
    def prune(self, loss, ratio=0, method='ada', glob=False):
        if glob == False:
            if method=='ada':
                diff_loss = torch.abs(loss[self.train_edge_index[0]] - loss[self.train_edge_index[1]])
                print(diff_loss.size(0))
                #print(diff_loss.nonzero().size())
                # print(int(len(diff_loss)*ratio))
                _, mask = torch.topk(diff_loss, int(len(diff_loss)*ratio), largest=False)
            elif method=='random':
                newE =self.train_edge_index.size(1)
                mask = torch.randperm(newE)[:int(newE*ratio)]
            elif method == 'naive':
                degrees = scatter(torch.ones(self.train_edge_index.size(1)), self.train_edge_index[0])
                thold = (degrees.max()*ratio).long()
                prunemask = (degrees>thold).nonzero()
                mymask = torch.ones(self.train_edge_index.size(1))
                taway = []
                for pru in prunemask:
                    p_eid = (self.train_edge_index[0]==pru.squeeze()).nonzero().squeeze()
                    p = p_eid[torch.randperm(p_eid.size(0))][thold:]
                    taway.append(p)
                nonmask = torch.cat(taway)
                mask = torch.ones(self.train_edge_index.size(1), dtype=torch.bool)
                mask[nonmask] = False
                    
            #print('len diff_loss', len(diff_loss))
            #print('len mask', len(mask))
            self.train_e_idx = self.train_e_idx[mask]
            self.train_edge_index = self.train_edge_index[:, mask]
            # self.edge_attr = self.data.edge_attr[torch.cat([self.train_e_idx, self.rest_e_idx])]
            self.edge_index = self.edge_index[:,torch.cat([self.train_e_idx, self.rest_e_idx])]

            # print(self.data.edge_attr.size(), self.data.edge_index.size())
            self.train_e_idx = torch.arange(self.train_e_idx.size(0))
            self.rest_e_idx = torch.arange(self.train_e_idx.size(0),self.train_e_idx.size(0) + self.rest_e_idx.size(0))
            # print(len(self.train_e_idx),len(self.rest_e_idx), self.train_edge_index.size(),self.edge_index.size())
            self.E = self.edge_index.size(1)
            self.adj = SparseTensor(
                row=self.edge_index[0], col=self.edge_index[1],
                value=torch.arange(self.E, device=self.edge_index.device),
                sparse_sizes=(self.N, self.N))