Exemplo n.º 1
0
 def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
              dropout):
     super().__init__()
     self.n_layers = n_layers
     self.n_hidden = n_hidden
     self.n_classes = n_classes
     self.layers = nn.ModuleList()
     self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
     for i in range(1, n_layers - 1):
         self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))
     self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
     self.dropout = nn.Dropout(dropout)
     self.activation = activation
Exemplo n.º 2
0
    def __init__(self,
                 in_feats,
                 out_feats,
                 n_hidden=None,
                 activation=F.elu,
                 dropout=0.2,
                 n=False):
        super().__init__()
        if n_hidden is None:
            n_hidden = out_feats

        self.d1 = DenseUnit(in_feats,
                            n_hidden,
                            activation=activation,
                            dropout=dropout,
                            n=False)
        self.gc1 = dglnn.GraphConv

        self.l1 = dglnn.SAGEConv(n_hidden,
                                 n_hidden,
                                 'mean',
                                 activation=activation)
        self.d2 = DenseUnit(n_hidden,
                            out_feats,
                            activation=activation,
                            dropout=dropout,
                            n=n)
Exemplo n.º 3
0
def test_sage_conv(idtype, g, aggre_type):
    g = g.astype(idtype).to(F.ctx())
    sage = nn.SAGEConv(5, 10, aggre_type)
    feat = F.randn((g.number_of_nodes(), 5))
    sage = sage.to(F.ctx())
    h = sage(g, feat)
    assert h.shape[-1] == 10
Exemplo n.º 4
0
    def __init__(self, in_dim, hidden_dim, n_classes,hidden_layers,aggregate,readout,
                 activation,dropout,device,grid=8):
        super(Classifier, self).__init__()
        self.device  = device
        self.readout = readout
        self.layers  = nn.ModuleList()
        self.grid    = grid
        
        # input layer
        self.layers.append(conv.SAGEConv(in_dim,hidden_dim,aggregate,feat_drop=0.0,
                                    activation=activation))

        # hidden layers
        for k in range(0,hidden_layers):
            self.layers.append(conv.SAGEConv(hidden_dim,hidden_dim,aggregate,feat_drop=dropout,
                                        activation=activation))

        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2))
        else:
            self.readout_fcn = SppPooling(hidden_dim,self.grid)

        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim*2),
                nn.ReLU(inplace=True),
                nn.Dropout(),
                nn.Linear(2*hidden_dim, 2*hidden_dim),
                nn.ReLU(inplace=True),
                nn.Linear(2*hidden_dim, n_classes),
            )
        else:
            var=hidden_dim
            if self.readout=='gap':
                var*=2
                
            self.classify = nn.Linear(var, n_classes)
Exemplo n.º 5
0
def test_sage_conv(idtype, g, aggre_type):
    g = g.astype(idtype).to(F.ctx())
    sage = nn.SAGEConv(5, 10, aggre_type)
    feat = F.randn((g.number_of_src_nodes(), 5))
    sage = sage.to(F.ctx())
    # test pickle
    th.save(sage, tmp_buffer)
    h = sage(g, feat)
    assert h.shape[-1] == 10
Exemplo n.º 6
0
def test_sage_conv_bi(idtype, g, aggre_type):
    g = g.astype(idtype).to(F.ctx())
    dst_dim = 5 if aggre_type != 'gcn' else 10
    sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
    feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim)))
    sage = sage.to(F.ctx())
    h = sage(g, feat)
    assert h.shape[-1] == 2
    assert h.shape[0] == g.number_of_dst_nodes()
Exemplo n.º 7
0
def test_sage_conv2(idtype):
    # TODO: add test for blocks
    # Test the case for graphs without edges
    g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3})
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    sage = nn.SAGEConv((3, 3), 2, 'gcn')
    feat = (F.randn((5, 3)), F.randn((3, 3)))
    sage = sage.to(ctx)
    h = sage(g, (F.copy_to(feat[0], F.ctx()), F.copy_to(feat[1], F.ctx())))
    assert h.shape[-1] == 2
    assert h.shape[0] == 3
    for aggre_type in ['mean', 'pool', 'lstm']:
        sage = nn.SAGEConv((3, 1), 2, aggre_type)
        feat = (F.randn((5, 3)), F.randn((3, 1)))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 2
        assert h.shape[0] == 3
Exemplo n.º 8
0
def test_sage_conv2(idtype):
    # TODO: add test for blocks
    # Test the case for graphs without edges
    g = dgl.bipartite([], num_nodes=(5, 3))
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    sage = nn.SAGEConv((3, 3), 2, 'gcn')
    feat = (F.randn((5, 3)), F.randn((3, 3)))
    sage = sage.to(ctx)
    h = sage(g, feat)
    assert h.shape[-1] == 2
    assert h.shape[0] == 3
    for aggre_type in ['mean', 'pool', 'lstm']:
        sage = nn.SAGEConv((3, 1), 2, aggre_type)
        feat = (F.randn((5, 3)), F.randn((3, 1)))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 2
        assert h.shape[0] == 3
Exemplo n.º 9
0
 def __init__(self, in_dim, h_dim, n_layers, activation, dropout, rel_names, label_entity):
     super().__init__()
     self.h_dim = h_dim
     self.in_dim = in_dim
     self.layers = nn.ModuleList()
     # self.batch_norms = nn.ModuleList()
     #i2h
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.SAGEConv(in_dim, h_dim, 'mean', activation=activation) for rel in rel_names}))
     # self.batch_norms.append(nn.BatchNorm1d(h_dim))
     #h2h
     for i in range(1, n_layers - 1):
         self.layers.append(dglnn.HeteroGraphConv(
             {rel: dglnn.SAGEConv(h_dim, h_dim, 'mean', feat_drop=dropout, activation=activation) for rel in rel_names}))
         # self.batch_norms.append(nn.BatchNorm1d(h_dim))
     #h2o
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.SAGEConv(h_dim, 1, 'mean', feat_drop=dropout) for rel in rel_names}))
     self.label_entity = label_entity
Exemplo n.º 10
0
def test_sage_conv():
    for aggre_type in ['mean', 'pool', 'gcn', 'lstm']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1),
                         readonly=True)
        sage = nn.SAGEConv(5, 10, aggre_type)
        feat = F.randn((100, 5))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 10

        g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
        dst_dim = 5 if aggre_type != 'gcn' else 10
        sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
        feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 2
        assert h.shape[0] == 200
Exemplo n.º 11
0
def test_sage_conv():
    for aggre_type in ['mean', 'pool', 'gcn', 'lstm']:
        ctx = F.ctx()
        g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1),
                         readonly=True)
        sage = nn.SAGEConv(5, 10, aggre_type)
        feat = F.randn((100, 5))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 10
Exemplo n.º 12
0
 def __init__(self,
              in_dim,
              hidden_dim,
              out_dim,
              aggregator='mean',
              activation='sigmoid'):
     super(GraphSage, self).__init__()
     self.sageconv1 = dglnn.SAGEConv(in_feats=in_dim,
                                     out_feats=hidden_dim,
                                     aggregator_type=aggregator,
                                     feat_drop=0.2,
                                     bias=True)
     self.sageconv2 = dglnn.SAGEConv(in_feats=hidden_dim,
                                     out_feats=hidden_dim,
                                     aggregator_type=aggregator,
                                     feat_drop=0.2,
                                     bias=True)
     self.linear = nn.Linear(hidden_dim, out_dim, bias=False)
     self.activation = activation
Exemplo n.º 13
0
    def __init__(self,
                 num_nodes,
                 in_feats,
                 n_hidden,
                 n_layers,
                 activation,
                 dropout):
        super().__init__()

        self.embedding = nn.Embedding(num_nodes, in_feats)

        self.n_layers = n_layers
        self.n_hidden = n_hidden
        self.layers = nn.ModuleList()
        self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
        for i in range(1, n_layers):
            self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))

        self.dropout = nn.Dropout(dropout)
        self.activation = activation
Exemplo n.º 14
0
    def __init__(self,
                 layer_type,
                 block_type,
                 activation,
                 normalization=None,
                 **core_layer_hyperparms):
        super(GNNBasicBlock, self).__init__()
        self.layer_type = layer_type
        self.block_type = block_type

        if self.layer_type in ['gcn', 'gcn_res']:
            self.core_layer_type = 'gcn'
            self.core_layer = dglnn.GraphConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=core_layer_hyperparms['out_channels'],
                bias=core_layer_hyperparms['bias'])
        elif self.layer_type in ['gat', 'gat_res']:
            self.core_layer_type = 'gat'
            self.core_layer = dglnn.GATConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=int(core_layer_hyperparms['out_channels'] /
                              core_layer_hyperparms['num_heads']),
                num_heads=core_layer_hyperparms['num_heads'],
                feat_drop=core_layer_hyperparms['feat_drop'],
                attn_drop=core_layer_hyperparms['attn_drop'])
        elif self.layer_type in ['sage', 'sage_res']:
            self.core_layer_type = 'sage'
            self.core_layer = dglnn.SAGEConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=core_layer_hyperparms['out_channels'],
                aggregator_type='mean',
                bias=core_layer_hyperparms['bias'])

        else:
            raise NotImplementedError

        acti_type, acti_hyperparam = activation
        if acti_type == 'relu':
            self.activation = nn.ReLU(inplace=acti_hyperparam)
        elif acti_type == 'lkrelu':
            self.activation = nn.LeakyReLU(negative_slope=acti_hyperparam)
        elif acti_type == 'elu':
            self.activation = nn.ELU(inplace=acti_hyperparam)
        elif acti_type == 'no':
            self.activation = None
        else:
            raise NotImplementedError

        if 'n' in block_type.split('_'):
            self.node_norm = get_normalization(
                norm_type=normalization,
                num_channels=core_layer_hyperparms['out_channels'])
        self.block_type_str = self.get_block_type_str()
Exemplo n.º 15
0
def test_sage_conv(aggre_type):
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    sage = nn.SAGEConv(5, 10, aggre_type)
    feat = F.randn((100, 5))
    sage = sage.to(ctx)
    h = sage(g, feat)
    assert h.shape[-1] == 10

    g = dgl.graph(sp.sparse.random(100, 100, density=0.1))
    sage = nn.SAGEConv(5, 10, aggre_type)
    feat = F.randn((100, 5))
    sage = sage.to(ctx)
    h = sage(g, feat)
    assert h.shape[-1] == 10

    g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
    dst_dim = 5 if aggre_type != 'gcn' else 10
    sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
    feat = (F.randn((100, 10)), F.randn((200, dst_dim)))
    sage = sage.to(ctx)
    h = sage(g, feat)
    assert h.shape[-1] == 2
    assert h.shape[0] == 200

    # Test the case for graphs without edges
    g = dgl.bipartite([], num_nodes=(5, 3))
    sage = nn.SAGEConv((3, 3), 2, 'gcn')
    feat = (F.randn((5, 3)), F.randn((3, 3)))
    sage = sage.to(ctx)
    h = sage(g, feat)
    assert h.shape[-1] == 2
    assert h.shape[0] == 3
    for aggre_type in ['mean', 'pool', 'lstm']:
        sage = nn.SAGEConv((3, 1), 2, aggre_type)
        feat = (F.randn((5, 3)), F.randn((3, 1)))
        sage = sage.to(ctx)
        h = sage(g, feat)
        assert h.shape[-1] == 2
        assert h.shape[0] == 3
Exemplo n.º 16
0
def test_dense_sage_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    sage = nn.SAGEConv(5, 2, 'gcn')
    dense_sage = nn.DenseSAGEConv(5, 2)
    dense_sage.fc.weight.data = sage.fc_neigh.weight.data
    dense_sage.fc.bias.data = sage.fc_neigh.bias.data
    feat = F.randn((100, 5))
    sage = sage.to(ctx)
    dense_sage = dense_sage.to(ctx)
    out_sage = sage(g, feat)
    out_dense_sage = dense_sage(adj, feat)
    assert F.allclose(out_sage, out_dense_sage)
Exemplo n.º 17
0
    def __init__(self):
        super(StochasticNetwork, self).__init__()
        if config.NETWORK == 'SAGE':
            self.layers = [
                dglnn.SAGEConv(config.IN_FEATURES,
                               config.HIDDEN_FEATURES,
                               aggregator_type='mean',
                               feat_drop=config.DROPOUT),
                dglnn.SAGEConv(config.HIDDEN_FEATURES,
                               config.HIDDEN_FEATURES,
                               aggregator_type='mean',
                               feat_drop=config.DROPOUT)
            ]
        elif config.NETWORK == 'GAT':
            self.layers = [
                dglnn.GATConv(config.IN_FEATURES,
                              config.HIDDEN_FEATURES,
                              feat_drop=config.DROPOUT,
                              attn_drop=config.ATTN_DROPOUT,
                              num_heads=config.ATTN_HEADS),
                dglnn.GATConv(config.ATTN_HEADS * config.HIDDEN_FEATURES,
                              config.HIDDEN_FEATURES,
                              feat_drop=config.DROPOUT,
                              num_heads=1)
            ]
        elif config.NETWORK == 'GIN':
            self.mlp1 = MLP(1, config.IN_FEATURES, config.HIDDEN_FEATURES,
                            config.HIDDEN_FEATURES)
            self.mlp2 = MLP(1, config.HIDDEN_FEATURES, config.HIDDEN_FEATURES,
                            config.HIDDEN_FEATURES)
            self.layers = [
                dglnn.GINConv(apply_func=self.mlp1, aggregator_type='mean'),
                dglnn.GINConv(apply_func=self.mlp2, aggregator_type='mean'),
            ]

        self.layers = torch.nn.ModuleList(self.layers)
        self.final = nn.Linear(config.HIDDEN_FEATURES, 2)
Exemplo n.º 18
0
def test_dense_sage_conv(g):
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    sage = nn.SAGEConv(5, 2, 'gcn')
    dense_sage = nn.DenseSAGEConv(5, 2)
    dense_sage.fc.weight.data = sage.fc_neigh.weight.data
    dense_sage.fc.bias.data = sage.fc_neigh.bias.data
    if len(g.ntypes) == 2:
        feat = (F.randn(
            (g.number_of_src_nodes(), 5)), F.randn(
                (g.number_of_dst_nodes(), 5)))
    else:
        feat = F.randn((g.number_of_nodes(), 5))
    sage = sage.to(ctx)
    dense_sage = dense_sage.to(ctx)
    out_sage = sage(g, feat)
    out_dense_sage = dense_sage(adj, feat)
    assert F.allclose(out_sage, out_dense_sage), g
Exemplo n.º 19
0
class Multi_level(nn.Module):
    def __init__(self):
        super(Multi_level, self).__init__()
        self.micro_layer = None
        self.macro_layer = None

    def forward(self):
        return


import dgl.nn.pytorch as dglnn
conv = dglnn.HeteroGraphConv({
    'follows' : dglnn.GraphConv(...),
    'plays' : dglnn.GraphConv(...),
    'sells' : dglnn.SAGEConv(...)},
    aggregate='sum')


from openhgnn.models.micro_layer.LSTM_conv import LSTMConv
class HGConvLayer(nn.Module):
    def __init__(self, graph: dgl.DGLHeteroGraph, input_dim: int, hidden_dim: int, n_heads: int = 4,
                 dropout: float = 0.2, residual: bool = True):
        """
        :param graph: a heterogeneous graph
        :param input_dim: int, input dimension
        :param hidden_dim: int, hidden dimension
        :param n_heads: int, number of attention heads
        :param dropout: float, dropout rate
        :param residual: boolean, residual connections or not
        """
Exemplo n.º 20
0
def test_hetero_conv(agg, idtype):
    g = dgl.heterograph(
        {
            ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
            ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
            ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])
        },
        idtype=idtype,
        device=F.ctx())
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
            'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
            'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)
        }, agg)
    conv = conv.to(F.ctx())
    uf = F.randn((4, 2))
    gf = F.randn((4, 4))
    sf = F.randn((2, 3))

    h = conv(g, {'user': uf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 1, 4)

    h = conv(g, {'user': uf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    h = conv(g, {'store': sf})
    assert set(h.keys()) == {'game'}
    if agg != 'stack':
        assert h['game'].shape == (4, 4)
    else:
        assert h['game'].shape == (4, 1, 4)

    # test with pair input
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.SAGEConv(2, 3, 'mean'),
            'plays': nn.SAGEConv((2, 4), 4, 'mean'),
            'sells': nn.SAGEConv(3, 4, 'mean')
        }, agg)
    conv = conv.to(F.ctx())

    h = conv(g, ({'user': uf}, {'user': uf, 'game': gf}))
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 1, 4)

    # pair input requires both src and dst type features to be provided
    h = conv(g, ({'user': uf}, {'game': gf}))
    assert set(h.keys()) == {'game'}
    if agg != 'stack':
        assert h['game'].shape == (4, 4)
    else:
        assert h['game'].shape == (4, 1, 4)

    # test with mod args
    class MyMod(th.nn.Module):
        def __init__(self, s1, s2):
            super(MyMod, self).__init__()
            self.carg1 = 0
            self.carg2 = 0
            self.s1 = s1
            self.s2 = s2

        def forward(self, g, h, arg1=None, *, arg2=None):
            if arg1 is not None:
                self.carg1 += 1
            if arg2 is not None:
                self.carg2 += 1
            return th.zeros((g.number_of_dst_nodes(), self.s2))

    mod1 = MyMod(2, 3)
    mod2 = MyMod(2, 4)
    mod3 = MyMod(3, 4)
    conv = nn.HeteroGraphConv({
        'follows': mod1,
        'plays': mod2,
        'sells': mod3
    }, agg)
    conv = conv.to(F.ctx())
    mod_args = {'follows': (1, ), 'plays': (1, )}
    mod_kwargs = {'sells': {'arg2': 'abc'}}
    h = conv(g, {
        'user': uf,
        'store': sf
    },
             mod_args=mod_args,
             mod_kwargs=mod_kwargs)
    assert mod1.carg1 == 1
    assert mod1.carg2 == 0
    assert mod2.carg1 == 1
    assert mod2.carg2 == 0
    assert mod3.carg1 == 0
    assert mod3.carg2 == 1