def process_sparse(graph_list, node_feat, edge_feat):
    graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
    node_degs = [
        torch.Tensor(graph_list[i].degs) + 1 for i in range(len(graph_list))
    ]
    node_degs = torch.cat(node_degs).unsqueeze(1)
    n2n_sp, e2n_sp, subg_sp = GNNLIB.PrepareSparseMatrices(graph_list)

    n2n_sp = n2n_sp.cuda()
    e2n_sp = e2n_sp.cuda()
    subg_sp = subg_sp.cuda()
    node_degs = node_degs.cuda()

    node_feat = Variable(node_feat)
    if edge_feat is not None:
        edge_feat = Variable(edge_feat)
        if torch.cuda.is_available() and isinstance(node_feat,
                                                    torch.cuda.FloatTensor):
            edge_feat = edge_feat.cuda()
    n2n_sp = Variable(n2n_sp)
    e2n_sp = Variable(e2n_sp)
    subg_sp = Variable(subg_sp)
    node_degs = Variable(node_degs)

    if edge_feat is not None:
        input_edge_linear = edge_feat
        e2npool_input = gnn_spmm(e2n_sp, input_edge_linear)
        node_feat = torch.cat([node_feat, e2npool_input], 1)
    return graph_sizes, n2n_sp, e2n_sp, subg_sp, node_degs
    def forward(self, graph_list, node_feat, edge_feat):
        graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
        node_degs = [
            torch.Tensor(graph_list[i].degs) + 1
            for i in range(len(graph_list))
        ]
        node_degs = torch.cat(node_degs).unsqueeze(1)

        n2n_sp, e2n_sp, subg_sp = GNNLIB.PrepareSparseMatrices(graph_list)

        if torch.cuda.is_available() and isinstance(node_feat,
                                                    torch.cuda.FloatTensor):
            n2n_sp = n2n_sp.cuda()
            e2n_sp = e2n_sp.cuda()
            subg_sp = subg_sp.cuda()
            node_degs = node_degs.cuda()
        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
            if torch.cuda.is_available() and isinstance(
                    node_feat, torch.cuda.FloatTensor):
                edge_feat = edge_feat.cuda()
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)
        node_degs = Variable(node_degs)

        h = self.sortpooling_embedding(node_feat, edge_feat, n2n_sp, e2n_sp,
                                       subg_sp, graph_sizes, node_degs)

        return h
Beispiel #3
0
    def forward(self, graph_list, node_feat, edge_feat):
        graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]

        # print('node_feat:', node_feat)
        dv = node_feat.device
        # print('dv:', dv)

        n2n_sp, e2n_sp, subg_sp = GNNLIB.PrepareSparseMatrices(graph_list)
        n2n_sp, e2n_sp, subg_sp = n2n_sp.to(dv), e2n_sp.to(dv), subg_sp.to(dv)

        node_degs = 0

        node_feat = Variable(node_feat)
        if edge_feat is not None:
            edge_feat = Variable(edge_feat)
        n2n_sp = Variable(n2n_sp)
        e2n_sp = Variable(e2n_sp)
        subg_sp = Variable(subg_sp)

        h = self.attention_gcn(node_feat, edge_feat, n2n_sp, e2n_sp, node_degs)
        h = self.attention_pooling(h, subg_sp, graph_sizes)

        # reg_term = self.get_reg(self.reg)
        # return h, (reg_term/subg_sp.size()[0])

        return h