Beispiel #1
0
def test_nn_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv(5, 10, edge_func, 'mean')
    feat = F.randn((100, 5))
    efeat = F.randn((g.number_of_edges(), 4))
    nnconv = nnconv.to(ctx)
    h = nnconv(g, feat, efeat)
    # currently we only do shape check
    assert h.shape[-1] == 10

    g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv(5, 10, edge_func, 'mean')
    feat = F.randn((100, 5))
    efeat = F.randn((g.number_of_edges(), 4))
    nnconv = nnconv.to(ctx)
    h = nnconv(g, feat, efeat)
    # currently we only do shape check
    assert h.shape[-1] == 10

    g = dgl.bipartite(sp.sparse.random(50, 100, density=0.1))
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean')
    feat = F.randn((50, 5))
    feat_dst = F.randn((100, 2))
    efeat = F.randn((g.number_of_edges(), 4))
    nnconv = nnconv.to(ctx)
    h = nnconv(g, (feat, feat_dst), efeat)
    # currently we only do shape check
    assert h.shape[-1] == 10
Beispiel #2
0
def test_nn_conv(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv(5, 10, edge_func, 'mean')
    feat = F.randn((g.number_of_nodes(), 5))
    efeat = F.randn((g.number_of_edges(), 4))
    nnconv = nnconv.to(ctx)
    h = nnconv(g, feat, efeat)
    # currently we only do shape check
    assert h.shape[-1] == 10
Beispiel #3
0
def test_nn_conv_bi(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    #g = dgl.bipartite(sp.sparse.random(50, 100, density=0.1))
    edge_func = th.nn.Linear(4, 5 * 10)
    nnconv = nn.NNConv((5, 2), 10, edge_func, 'mean')
    feat = F.randn((g.number_of_src_nodes(), 5))
    feat_dst = F.randn((g.number_of_dst_nodes(), 2))
    efeat = F.randn((g.number_of_edges(), 4))
    nnconv = nnconv.to(ctx)
    h = nnconv(g, (feat, feat_dst), efeat)
    # currently we only do shape check
    assert h.shape[-1] == 10
    def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, edge_dim,
                 aggregate, residual, readout, activation_func, dropout, grid,
                 device):
        super(Classifier, self).__init__()
        self.device = device
        self.activation = activation_func
        self.readout = readout
        self.layers = nn.ModuleList()
        self.batch_norms = nn.ModuleList()
        self.grid = grid

        self.layers.append(
            conv.NNConv(in_dim, hidden_dim,
                        nn.Linear(edge_dim, in_dim * hidden_dim), aggregate,
                        residual))
        self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        # hidden layers
        for k in range(0, hidden_layers):
            self.layers.append(
                conv.NNConv(hidden_dim, hidden_dim,
                            nn.Linear(edge_dim, hidden_dim * hidden_dim),
                            aggregate, residual))
            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        # dropout layer
        self.dropout = nn.Dropout(p=dropout)

        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(
                nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim,
                                                    hidden_dim * 2))
        elif self.readout == 'sort':
            self.readout_fcn = conv.SortPooling(100)
        elif self.readout == 'set':
            self.readout_fcn = conv.Set2Set(hidden_dim, 2, 1)
        else:
            self.readout_fcn = SppPooling(hidden_dim, self.grid)

        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim),
                nn.ReLU(inplace=True),
                nn.Linear(hidden_dim, n_classes),
            )
        elif self.readout == 'sort':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * 100, n_classes),
            )
        else:
            var = hidden_dim
            if self.readout == 'gap' or self.readout == 'set':
                var *= 2
            self.classify = nn.Linear(var, n_classes)