def test_agnn_conv(): ctx = F.ctx() g = dgl.graph(sp.sparse.random(100, 100, density=0.1)) agnn = nn.AGNNConv(1) feat = F.randn((100, 5)) agnn = agnn.to(ctx) h = agnn(g, feat) assert h.shape == (100, 5) g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1)) agnn = nn.AGNNConv(1) feat = (F.randn((100, 5)), F.randn((200, 5))) agnn = agnn.to(ctx) h = agnn(g, feat) assert h.shape == (200, 5)
def test_agnn_conv(): ctx = F.ctx() g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True) agnn = nn.AGNNConv(1) feat = F.randn((100, 5)) agnn = agnn.to(ctx) h = agnn(g, feat) assert h.shape[-1] == 5
def test_agnn_conv(g, idtype): g = g.astype(idtype).to(F.ctx()) ctx = F.ctx() agnn = nn.AGNNConv(1) feat = F.randn((g.number_of_nodes(), 5)) agnn = agnn.to(ctx) h = agnn(g, feat) assert h.shape == (g.number_of_nodes(), 5)
def __init__(self, in_dim, n_classes, hidden_layers, init_beta, learn_beta, readout, activation_func, dropout, grid, device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.batch_norms = nn.ModuleList() self.grid = grid # input layer self.layers.append(conv.AGNNConv(init_beta, learn_beta)) self.batch_norms.append(nn.BatchNorm1d(in_dim)) # hidden layers for k in range(0, hidden_layers): self.layers.append(conv.AGNNConv(init_beta, learn_beta)) self.batch_norms.append(nn.BatchNorm1d(in_dim)) # dropout layer self.dropout = nn.Dropout(p=dropout) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling( nn.Linear(in_dim, 1), nn.Linear(in_dim, in_dim * 2)) else: self.readout_fcn = SppPooling(in_dim, self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(in_dim * self.grid * self.grid, in_dim * 2), nn.ReLU(inplace=True), nn.Linear(2 * in_dim, n_classes), ) else: var = in_dim if self.readout == 'gap': var *= 2 self.classify = nn.Linear(var, n_classes)
def test_agnn_conv(): ctx = F.ctx() g = dgl.graph(sp.sparse.random(100, 100, density=0.1)) agnn = nn.AGNNConv(1) feat = F.randn((100, 5)) agnn = agnn.to(ctx) h = agnn(g, feat) assert h.shape == (100, 5) g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1)) agnn = nn.AGNNConv(1) feat = (F.randn((100, 5)), F.randn((200, 5))) agnn = agnn.to(ctx) h = agnn(g, feat) assert h.shape == (200, 5) g = dgl.graph(sp.sparse.random(100, 100, density=0.001)) seed_nodes = th.unique(g.edges()[1]) block = dgl.to_block(g, seed_nodes) agnn = nn.AGNNConv(1) feat = F.randn((block.number_of_src_nodes(), 5)) agnn = agnn.to(ctx) h = agnn(block, feat) assert h.shape == (block.number_of_dst_nodes(), 5)