Beispiel #1
0
def test_sage_conv():
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = SAGEConv(8, 32)
    assert conv.__repr__() == 'SAGEConv(8, 32)'
    out = conv(x1, edge_index)
    assert out.size() == (4, 32)
    assert conv(x1, edge_index, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    if is_full_test():
        t = '(Tensor, Tensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, edge_index).tolist() == out.tolist()
        assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist()

        t = '(Tensor, SparseTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    conv = SAGEConv((8, 16), 32)
    assert conv.__repr__() == 'SAGEConv((8, 16), 32)'
    out1 = conv((x1, x2), edge_index)
    out2 = conv((x1, None), edge_index, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert conv((x1, x2), edge_index, (4, 2)).tolist() == out1.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
    assert conv((x1, None), adj.t()).tolist() == out2.tolist()

    if is_full_test():
        t = '(OptPairTensor, Tensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, x2), edge_index).tolist() == out1.tolist()
        assert jit((x1, x2), edge_index, size=(4, 2)).tolist() == out1.tolist()
        assert jit((x1, None), edge_index,
                   size=(4, 2)).tolist() == out2.tolist()

        t = '(OptPairTensor, SparseTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
        assert jit((x1, None), adj.t()).tolist() == out2.tolist()
def test_sage_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = SAGEConv(in_channels, out_channels)
    assert conv.__repr__() == 'SAGEConv(16, 32)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
Beispiel #3
0
def test_sage_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = SAGEConv(in_channels, out_channels)
    assert conv.__repr__() == 'SAGEConv(16, 32)'
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, out_channels)
    assert conv((x, x), edge_index).tolist() == out.tolist()

    jit_conv = conv.jittable(x=x, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index).tolist() == out.tolist()
def test_sage_conv():
    row = np.array([0, 1, 1, 2])
    col = np.array([1, 0, 2, 1])
    data = np.array([1, 3, 5])

    in_channels, out_channels = (1, 1)
    edge_index = torch.tensor([row, col])
    num_nodes = edge_index.max().item() + 1

    x = torch.tensor([data], dtype=torch.float).permute(1, 0)

    assert x.size() == (num_nodes, in_channels)

    conv = SAGEConv(in_channels, out_channels, normalize=False, bias=False)
    conv.weight[0][
        0] = 1.0  # Forcing the weight to 1.0 for testing convenience!

    assert conv.__repr__() == f'SAGEConv({in_channels}, {out_channels})'
    output = conv(x, edge_index)

    assert output[0][0] == 2  # (1+3)/2
    assert output[1][0] == 3  # (1+3+5)/3
    assert output[2][0] == 4  # (3+5)/2
    assert conv(x, edge_index).size() == (num_nodes, out_channels)