Exemplo n.º 1
0
def test_topk_nodes():
    # test#1: basic
    g0 = dgl.DGLGraph(nx.path_graph(14))

    feat0 = F.randn((g0.number_of_nodes(), 10))
    g0.ndata['x'] = feat0
    # to test the case where k > number of nodes.
    dgl.topk_nodes(g0, 'x', 20, idx=-1)
    # test correctness
    val, indices = dgl.topk_nodes(g0, 'x', 5, idx=-1)
    ground_truth = F.reshape(
        F.argsort(F.slice_axis(feat0, -1, 9, 10), 0, True)[:5], (5,))
    assert F.allclose(ground_truth, indices)
    g0.ndata.pop('x')

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(12))
    feat1 = F.randn((g1.number_of_nodes(), 10))

    bg = dgl.batch([g0, g1])
    bg.ndata['x'] = F.cat([feat0, feat1], 0)
    # to test the case where k > number of nodes.
    dgl.topk_nodes(bg, 'x', 16, idx=1)
    # test correctness
    val, indices = dgl.topk_nodes(bg, 'x', 6, descending=False, idx=0)
    ground_truth_0 = F.reshape(
        F.argsort(F.slice_axis(feat0, -1, 0, 1), 0, False)[:6], (6,))
    ground_truth_1 = F.reshape(
        F.argsort(F.slice_axis(feat1, -1, 0, 1), 0, False)[:6], (6,))
    ground_truth = F.stack([ground_truth_0, ground_truth_1], 0)
    assert F.allclose(ground_truth, indices)

    # test idx=None
    val, indices = dgl.topk_nodes(bg, 'x', 6, descending=True)
    assert F.allclose(val, F.stack([F.topk(feat0, 6, 0), F.topk(feat1, 6, 0)], 0))
Exemplo n.º 2
0
 def forward(self, dgl_data):
     topknodes, _ = dgl.topk_nodes(dgl_data, 'h', 4)
     topkedges, _ = dgl.topk_edges(dgl_data, 'h', 4)
     meannode = torch.mean(topknodes, 1)
     maxnode, _ = torch.max(topknodes, 1)
     meanedge = torch.mean(topkedges, 1)
     maxedge, _ = torch.max(topkedges, 1)
     dgl_feat = torch.cat([meannode, maxnode, meanedge, maxedge], -1)
     dgl_predict = self.activate(self.weight(dgl_feat))
     return dgl_predict
    def forward(self, gr):
        
        output_gr = []
        
        with gr.local_scope():
            
            graph_list = dgl.unbatch(gr)
            
            batch_itr = 0
              
            for g in graph_list : 
                    
                    batch_itr += 1
                    k_val = int(g.number_of_nodes() * self.frac)
                    
                    X = g.ndata['energy']
                    
                    #print('X shape : ', X.shape)
                    
                    # ----- y = X . p / ||p||
                    if(self.p.shape[1] == 1) : 
                        y = (X * self.p)/torch.sqrt( torch.sum(self.p ** 2) ).item()
                    else : 
                        y = torch.mm(X, self.p)/torch.sqrt( torch.sum(self.p ** 2) ).item()
                    
                    #print('y shape : ', y.shape)
                    g.ndata['y'] = y #torch.transpose(y, 1, 0) 
                    
                    
                    # ------ idx = rank(y, k) ----------- #
                    pooled_node_features, selected_nodes = dgl.topk_nodes(g, 'y', k=k_val, descending=True, idx=0)
                    
                    # --- reduced representation ----- #
                    sg = g.subgraph( selected_nodes[0].tolist() )
                    sg.copy_from_parent()
                    
                    X_bar = sg.ndata['energy']
                    
                    y_bar = nn.Sigmoid()(pooled_node_features)
                    
                    
                    X_bar = torch.reshape( X_bar, ( X_bar.shape[0], self.out_feat) )
#                     print('X_bar shape : ', X_bar.shape)
#                     print('y_bar shape : ', y_bar.shape)
                    
                    
                    mod_en = X_bar * y_bar[0]
                    
                    #print('Mod en shape : ', mod_en.shape)
                    
                    sg.ndata['energy'] = mod_en

                    sg.ndata['parent_node'] = sg.parent.number_of_nodes() * torch.ones([ sg.number_of_nodes() ], dtype=torch.int)
                    sg.ndata['own_node'] = sg.number_of_nodes() * torch.ones([ sg.number_of_nodes() ], dtype=torch.int64)
                    sg.ndata['selected_node'] = selected_nodes[0]
                    
                    
                    output_gr.append( sg )
                    
                    #print('End batch itr : ', batch_itr)
        
        
        #print(output_gr)
        return dgl.batch(output_gr)
Exemplo n.º 4
0
def test_topk(g, idtype, descending):
    g = g.astype(idtype).to(F.ctx())
    g.ndata['x'] = F.randn((g.number_of_nodes(), 3))

    # Test.1: to test the case where k > number of nodes.
    dgl.topk_nodes(g, 'x', 100, sortby=-1)

    # Test.2: test correctness
    min_nnodes = F.asnumpy(g.batch_num_nodes()).min()
    if min_nnodes <= 1:
        return
    k = min_nnodes - 1
    val, indices = dgl.topk_nodes(g, 'x', k, descending=descending, sortby=-1)
    print(k)
    print(g.ndata['x'])
    print('val', val)
    print('indices', indices)
    subg = dgl.unbatch(g)
    subval, subidx = [], []
    for sg in subg:
        subx = F.asnumpy(sg.ndata['x'])
        ai = np.argsort(subx[:, -1:].flatten())
        if descending:
            ai = np.ascontiguousarray(ai[::-1])
        subx = np.expand_dims(subx[ai[:k]], 0)
        subval.append(F.tensor(subx))
        subidx.append(F.tensor(np.expand_dims(ai[:k], 0)))
    print(F.cat(subval, dim=0))
    assert F.allclose(val, F.cat(subval, dim=0))
    assert F.allclose(indices, F.cat(subidx, dim=0))

    # Test.3: sorby=None
    dgl.topk_nodes(g, 'x', k, sortby=None)

    g.edata['x'] = F.randn((g.number_of_edges(), 3))

    # Test.4: topk edges where k > number of edges.
    dgl.topk_edges(g, 'x', 100, sortby=-1)

    # Test.5: topk edges test correctness
    min_nedges = F.asnumpy(g.batch_num_edges()).min()
    if min_nedges <= 1:
        return
    k = min_nedges - 1
    val, indices = dgl.topk_edges(g, 'x', k, descending=descending, sortby=-1)
    print(k)
    print(g.edata['x'])
    print('val', val)
    print('indices', indices)
    subg = dgl.unbatch(g)
    subval, subidx = [], []
    for sg in subg:
        subx = F.asnumpy(sg.edata['x'])
        ai = np.argsort(subx[:, -1:].flatten())
        if descending:
            ai = np.ascontiguousarray(ai[::-1])
        subx = np.expand_dims(subx[ai[:k]], 0)
        subval.append(F.tensor(subx))
        subidx.append(F.tensor(np.expand_dims(ai[:k], 0)))
    print(F.cat(subval, dim=0))
    assert F.allclose(val, F.cat(subval, dim=0))
    assert F.allclose(indices, F.cat(subidx, dim=0))