Exemplo n.º 1
0
def test_gin_conv(aggregator_type):
    ctx = F.ctx()
    g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
    gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
    feat = F.randn((100, 5))
    gin = gin.to(ctx)
    h = gin(g, feat)
    assert h.shape == (100, 12)

    g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
    gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
    feat = (F.randn((100, 5)), F.randn((200, 5)))
    gin = gin.to(ctx)
    h = gin(g, feat)
    assert h.shape == (200, 12)
Exemplo n.º 2
0
    def __init__(self,
                 in_feats,
                 out_feats,
                 num_gfc_layers=2,
                 num_stats_in=1,
                 num_stats_out=1,
                 activation=None):
        super(GcapsConv, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self._num_stats_in = num_stats_in
        self._num_stats_out = num_stats_out
        self._num_gfc_layers = num_gfc_layers
        self._activation_func = activation

        self._gin = conv.GINConv(None, 'sum')
        self._stat_layers = nn.ModuleList()
        for _ in range(self._num_stats_out):
            gfc_layers = nn.ModuleList()
            curr_input_dim = self._in_feats * self._num_stats_in
            for _ in range(self._num_gfc_layers):
                gfc_layers.append(nn.Linear(curr_input_dim, self._out_feats))
                curr_input_dim = self._out_feats

            self._stat_layers.append(gfc_layers)
Exemplo n.º 3
0
def test_gin_conv(g, idtype, aggregator_type):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
    feat = F.randn((g.number_of_nodes(), 5))
    gin = gin.to(ctx)
    h = gin(g, feat)
    assert h.shape == (g.number_of_nodes(), 12)
Exemplo n.º 4
0
def test_gin_conv(g, idtype, aggregator_type):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
    th.save(gin, tmp_buffer)
    feat = F.randn((g.number_of_src_nodes(), 5))
    gin = gin.to(ctx)
    h = gin(g, feat)

    # test pickle
    th.save(gin, tmp_buffer)

    assert h.shape == (g.number_of_dst_nodes(), 12)

    gin = nn.GINConv(None, aggregator_type)
    th.save(gin, tmp_buffer)
    gin = gin.to(ctx)
    h = gin(g, feat)
Exemplo n.º 5
0
def test_gin_conv():
    for aggregator_type in ['mean', 'max', 'sum']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1),
                         readonly=True)
        gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
        feat = F.randn((100, 5))
        gin = gin.to(ctx)
        h = gin(g, feat)
        assert h.shape[-1] == 12
Exemplo n.º 6
0
    def __init__(self):
        super(StochasticNetwork, self).__init__()
        if config.NETWORK == 'SAGE':
            self.layers = [
                dglnn.SAGEConv(config.IN_FEATURES,
                               config.HIDDEN_FEATURES,
                               aggregator_type='mean',
                               feat_drop=config.DROPOUT),
                dglnn.SAGEConv(config.HIDDEN_FEATURES,
                               config.HIDDEN_FEATURES,
                               aggregator_type='mean',
                               feat_drop=config.DROPOUT)
            ]
        elif config.NETWORK == 'GAT':
            self.layers = [
                dglnn.GATConv(config.IN_FEATURES,
                              config.HIDDEN_FEATURES,
                              feat_drop=config.DROPOUT,
                              attn_drop=config.ATTN_DROPOUT,
                              num_heads=config.ATTN_HEADS),
                dglnn.GATConv(config.ATTN_HEADS * config.HIDDEN_FEATURES,
                              config.HIDDEN_FEATURES,
                              feat_drop=config.DROPOUT,
                              num_heads=1)
            ]
        elif config.NETWORK == 'GIN':
            self.mlp1 = MLP(1, config.IN_FEATURES, config.HIDDEN_FEATURES,
                            config.HIDDEN_FEATURES)
            self.mlp2 = MLP(1, config.HIDDEN_FEATURES, config.HIDDEN_FEATURES,
                            config.HIDDEN_FEATURES)
            self.layers = [
                dglnn.GINConv(apply_func=self.mlp1, aggregator_type='mean'),
                dglnn.GINConv(apply_func=self.mlp2, aggregator_type='mean'),
            ]

        self.layers = torch.nn.ModuleList(self.layers)
        self.final = nn.Linear(config.HIDDEN_FEATURES, 2)
Exemplo n.º 7
0
def test_gin_conv(aggregator_type):
    ctx = F.ctx()
    g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
    gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
    feat = F.randn((100, 5))
    gin = gin.to(ctx)
    h = gin(g, feat)
    assert h.shape == (100, 12)

    g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
    gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
    feat = (F.randn((100, 5)), F.randn((200, 5)))
    gin = gin.to(ctx)
    h = gin(g, feat)
    assert h.shape == (200, 12)

    g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
    seed_nodes = th.unique(g.edges()[1])
    block = dgl.to_block(g, seed_nodes)
    gin = nn.GINConv(th.nn.Linear(5, 12), aggregator_type)
    feat = F.randn((block.number_of_src_nodes(), 5))
    gin = gin.to(ctx)
    h = gin(block, feat)
    assert h.shape == (block.number_of_dst_nodes(), 12)