def test_gcn_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    value = torch.rand(row.size(0))
    adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))
    adj1 = adj2.set_value(None)

    conv = GCNConv(16, 32)
    assert conv.__repr__() == 'GCNConv(16, 32)'
    out1 = conv(x, edge_index)
    assert out1.size() == (4, 32)
    assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6)
    out2 = conv(x, edge_index, value)
    assert out2.size() == (4, 32)
    assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6)

    if is_full_test():
        t = '(Tensor, Tensor, OptTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x, edge_index).tolist() == out1.tolist()
        assert jit(x, edge_index, value).tolist() == out2.tolist()

        t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6)
        assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6)

    conv.cached = True
    conv(x, edge_index)
    assert conv(x, edge_index).tolist() == out1.tolist()
    conv(x, adj1.t())
    assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6)
示例#2
0
def test_gcn_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    edge_weight = torch.rand(edge_index.size(1))
    x = torch.randn((num_nodes, in_channels))

    conv = GCNConv(in_channels, out_channels)
    assert conv.__repr__() == 'GCNConv(16, 32)'
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, out_channels)
    assert conv(x, edge_index, edge_weight).size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index).tolist() == out.tolist()

    conv = GCNConv(in_channels, out_channels, cached=True)
    out = conv(x, edge_index)
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, out_channels)
    assert conv(x, edge_index, edge_weight).size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    jit_conv(x, edge_index)
    assert jit_conv(x, edge_index).tolist() == out.tolist()
示例#3
0
def test_gcn_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    edge_weight = torch.rand(edge_index.size(1))
    x = torch.randn((num_nodes, in_channels))

    conv = GCNConv(in_channels, out_channels)
    assert conv.__repr__() == 'GCNConv(16, 32)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    assert conv(x, edge_index, edge_weight).size() == (num_nodes, out_channels)
示例#4
0
def test_gcn_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    edge_weight = torch.rand(edge_index.size(1))
    x = torch.randn((num_nodes, in_channels))

    conv = GCNConv(in_channels, out_channels)
    assert conv.__repr__() == 'GCNConv(16, 32)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    assert conv(x, edge_index, edge_weight).size() == (num_nodes, out_channels)

    x = torch.sparse_coo_tensor(torch.tensor([[0, 0], [0, 1]]),
                                torch.Tensor([1, 1]),
                                torch.Size([num_nodes, in_channels]))
    conv(x, edge_index).size() == (num_nodes, out_channels)

    conv = GCNConv(in_channels, out_channels, cached=True)
    conv(x, edge_index).size() == (num_nodes, out_channels)
    conv(x, edge_index).size() == (num_nodes, out_channels)
示例#5
0
class JKNet(nn.Module):
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 n_layers,
                 dropout=0.5,
                 edge_dropout=0.5):
        super(JKNet, self).__init__()

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.n_layers = n_layers
        self.dropout = dropout
        self.edge_dropout = edge_dropout

        self.conv_layers = self._make_layer()
        self.last_conv = GCNConv((n_layers - 1) * hidden_dim, output_dim)

    def _make_layer(self):
        layers = []
        for i in range(self.n_layers - 1):
            input_dim = self.input_dim if i == 0 else self.hidden_dim
            output_dim = self.hidden_dim
            layers.append(GCNBlock(input_dim, output_dim, self.dropout))

        return nn.ModuleList(layers)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        dropedge_index = edge_index

        if self.edge_dropout:
            dropedge_index, _ = dropout_adj(edge_index,
                                            p=self.edge_dropout,
                                            force_undirected=True,
                                            training=self.training)

        outputs = []

        for layer in self.conv_layers:
            x = layer(x, dropedge_index)
            outputs.append(x)

        x = torch.cat(outputs, dim=1)
        x = self.last_conv(x, dropedge_index)

        return x

    def __str__(self):
        if self.edge_dropout:
            return f'JKNet-{self.n_layers}+DropEdge'
        else:
            return f'JKNet-{self.n_layers}'

    def __repr__(self):
        output = 'conv layers: ' + self.conv_layers.__repr__()
        output += '\n'
        output += 'last conv: ' + self.last_conv.__repr__()

        return output