Esempio n. 1
0
    def forward(self, graph_list, node_feat, edge_feat):
        graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
        node_degs = [
            torch.Tensor(graph_list[i].degs) + 1
            for i in range(len(graph_list))
        ]
        node_degs = torch.cat(node_degs).unsqueeze(1)

        n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)

        if isinstance(node_feat, torch.cuda.FloatTensor):
            n2n_sp = n2n_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
            node_degs = node_degs.cuda()
        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)
        node_degs = Variable(node_degs)

        h = self.sortpooling_embedding(node_feat, edge_feat, n2n_sp, e2n_sp,
                                       subg_sp, graph_sizes, node_degs)
        return h
Esempio n. 2
0
    def forward(self, graph_list, node_feat, edge_feat):
        graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
        # node_degs = [torch.Tensor(graph_list[i].degs) + 1 for i in range(len(graph_list))]
        # node_degs = torch.cat(node_degs).unsqueeze(1)

        dv = node_feat.device

        n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
        n2n_sp, e2n_sp, subg_sp = n2n_sp.to(dv), e2n_sp.to(dv), subg_sp.to(dv)
        # node_degs = node_degs.to(dv)

        node_degs = 0

        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)
        # node_degs = Variable(node_degs)

        h = self.node_level_embedding(node_feat, edge_feat, n2n_sp, e2n_sp,
                                      node_degs)

        h = self.graph_level_embedding(h, subg_sp, graph_sizes)

        # return regular term
        reg_term = self.get_reg(self.reg)

        return h, (reg_term / subg_sp.size()[0])
Esempio n. 3
0
    def forward(self, graph_list, node_feat):
        graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
        # graph_sizes是一个存储每个图的顶点数的list

        node_degs = [
            torch.Tensor(graph_list[i].degs) + 1
            for i in range(len(graph_list))
        ]
        # 每个顶点的度,并且因为原来没有self-loop,所以现在要+1
        node_degs = torch.cat(node_degs).unsqueeze(1)
        # 一个batch的图的度矩阵的拼接

        non_zero, n2n_sp, _, __ = S2VLIB.PrepareMeanField(graph_list)
        # n2n_sp matrix,[total_num_nodes, total_num_nodes]
        # non_zero是有邻边的索引 2 * E
        # 一个batch中所有图的顶点数总和

        if isinstance(node_feat, torch.cuda.FloatTensor):
            n2n_sp = n2n_sp.cuda()
            node_degs = node_degs.cuda()

        node_feat = Variable(node_feat)

        n2n_sp = Variable(n2n_sp)  # Adjacent matrix
        node_degs = Variable(node_degs)  # D^-1

        h = self.sortpooling_embedding(non_zero, node_feat, n2n_sp,
                                       graph_sizes, node_degs)

        return h
Esempio n. 4
0
    def forward(self,
                graph_list,
                node_feat,
                edge_feat,
                pool_global=True,
                n2n_grad=False,
                e2n_grad=False):
        n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
        if type(node_feat) is torch.cuda.FloatTensor:
            n2n_sp = n2n_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp, requires_grad=n2n_grad)
        e2n_sp = Variable(e2n_sp, requires_grad=e2n_grad)
        subg_sp = Variable(subg_sp)

        h = self.mean_field(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp,
                            pool_global)

        if n2n_grad or e2n_grad:
            sp_dict = {'n2n': n2n_sp, 'e2n': e2n_sp}
            return h, sp_dict
        else:
            return h
Esempio n. 5
0
    def forward(self, graphs, nodeFeats, edgeFeats):
        graphSizes = [graphs[i].num_nodes for i in range(len(graphs))]
        nodeDegs = [torch.Tensor(graphs[i].degs) + 1
                    for i in range(len(graphs))]
        nodeDegs = torch.cat(nodeDegs).unsqueeze(1)
        n2nSp, e2nSp, _ = S2VLIB.PrepareMeanField(graphs)
        if isinstance(nodeFeats, torch.cuda.FloatTensor):
            n2nSp = n2nSp.cuda()
            e2nSp = e2nSp.cuda()
            nodeDegs = nodeDegs.cuda()

        nodeFeats = Variable(nodeFeats)
        if edgeFeats is not None:
            edgeFeats = Variable(edgeFeats)

        n2nSp = Variable(n2nSp)
        e2nSp = Variable(e2nSp)
        nodeDegs = Variable(nodeDegs)

        convGraphs = self.graphConvLayers(nodeFeats, edgeFeats,
                                          n2nSp, e2nSp, graphSizes, nodeDegs)
        if self.poolingType == 'adaptive':
            return self.adptivePoolLayer(convGraphs, nodeFeats, graphSizes)
        else:
            spGraphs = self.sortPoolLayer(convGraphs, nodeFeats, graphSizes)
            if self.endingLayers == 'weight_vertices':
                return self.weightVerticesLayers(spGraphs, len(graphSizes))
            else:
                return self.conv1dLayers(spGraphs, len(graphSizes))
Esempio n. 6
0
    def forward(self, graph_list, istraining=True): 
        node_feat = S2VLIB.ConcatNodeFeats(graph_list)        
        sp_list = S2VLIB.PrepareMeanField(graph_list)
        version = get_torch_version()
        if not istraining:
            if version >= 0.4:
                torch.set_grad_enabled(False)
            else:
                node_feat = Variable(node_feat.data, volatile=True)
        
        h = self.mean_field(node_feat, sp_list)

        if not istraining: # recover
            if version >= 0.4:
                torch.set_grad_enabled(True)

        return h
    def forward(self, graph_list, node_feat, edge_feat): 
        n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
        if type(node_feat) is torch.cuda.FloatTensor:
            n2n_sp = n2n_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
        node_feat = Variable(node_feat)
        edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)

        h = self.mean_field(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp)
        
        return h
Esempio n. 8
0
    def forward(self, graph_list, node_feat, edge_feat):
        graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
        node_degs = [
            torch.Tensor(graph_list[i].degs) + 1
            for i in range(len(graph_list))
        ]
        node_degs = torch.cat(node_degs).unsqueeze(1)

        n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
        n2n_sp2 = n2n_sp.to_dense()
        n2n_sp2 = torch.mm(n2n_sp2, n2n_sp2)
        for idx in range(sum(graph_sizes)):
            n2n_sp2[idx, idx] = 0
        n2n_sp2[n2n_sp2 != 0] = 1
        """Get 2-hop degree, 0-hop """
        n_graphs = len(graph_sizes)
        list_degs_2 = []
        start_idx = 0
        for g_idx in range(n_graphs):
            A = n2n_sp2[start_idx:start_idx + graph_sizes[g_idx],
                        start_idx:start_idx + graph_sizes[g_idx]]
            degs = torch.sum(A, dim=1)
            start_idx += graph_sizes[g_idx]
            list_degs_2.append(degs)
        node_degs2 = [list_degs_2[i] + 1 for i in range(len(graph_list))]
        node_degs2 = torch.cat(node_degs2).unsqueeze(1)

        if isinstance(node_feat, torch.cuda.FloatTensor):
            n2n_sp = n2n_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
            node_degs = node_degs.cuda()
            n2n_sp2 = n2n_sp2.cuda()
            node_degs2 = node_degs2.cuda()
        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)
        node_degs = Variable(node_degs)
        node_degs2 = Variable(node_degs2)

        h = self.sortpooling_embedding(node_feat, edge_feat, n2n_sp, n2n_sp2,
                                       e2n_sp, subg_sp, graph_sizes, node_degs,
                                       node_degs2)

        return h
Esempio n. 9
0
    def forward(self, graph_list, node_feat, edge_feat):
        n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
        if is_cuda_float(node_feat):
            n2n_sp = n2n_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)

        h = self.mean_field(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp)

        return h
Esempio n. 10
0
    def forward(self, graph_list, node_feat, edge_feat): 
        n2e_sp, e2e_sp, e2n_sp, subg_sp = S2VLIB.PrepareLoopyBP(graph_list)
        if type(node_feat) is torch.cuda.FloatTensor:
            n2e_sp = n2e_sp.cuda()
            e2e_sp = e2e_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
        node_feat = Variable(node_feat)
        edge_feat = Variable(edge_feat)
        n2e_sp = Variable(n2e_sp)
        e2e_sp = Variable(e2e_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)

        h = self.loopy_bp(node_feat, edge_feat, n2e_sp, e2e_sp, e2n_sp, subg_sp)
        
        return h
Esempio n. 11
0
    def forward(self, graph_list, node_feat, edge_feat):
        n2e_sp, e2e_sp, e2n_sp, subg_sp = S2VLIB.PrepareLoopyBP(graph_list)
        if is_cuda_float(node_feat):
            n2e_sp = n2e_sp.cuda()
            e2e_sp = e2e_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2e_sp = Variable(n2e_sp)
        e2e_sp = Variable(e2e_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)

        h = self.loopy_bp(node_feat, edge_feat, n2e_sp, e2e_sp, e2n_sp,
                          subg_sp)

        return h
Esempio n. 12
0
    def forward(self, graph_list, node_feat, edge_feat):
        n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
        # if type(node_feat) is torch.cuda.FloatTensor:
        if torch.cuda.is_available():
            n2n_sp = n2n_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)
        # print("nf", node_feat)
        # print("ef", edge_feat)
        # print("n2n", n2n_sp)
        # print("e2n", e2n_sp)
        # print("subg", subg_sp)
        h = self.mean_field(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp)

        return h