Exemple #1
0
class GCN(nn.Module):
    def __init__(self, nfeat, nhid, nclass, dropout):
        super(GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nhid)
        self.gc3 = GraphConvolution(nhid, nclass)
        self.dropout = dropout

    def forward(self, x, adj, eval=False):
        x = F.relu(self.gc1(x, adj))
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, adj)
        if eval:
            return x
        else:
            x = F.dropout(x, self.dropout, training=self.training)
            x = self.gc3(x, adj)
            return F.log_softmax(x, dim=1)

    def functional_forward(self, x, adj, weights, eval=False):
        x = F.relu(self.gc1.functional_forward(x, adj, id=1, weights=weights))
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2.functional_forward(x, adj, id=2, weights=weights)
        if eval:
            return x
        else:
            x = F.dropout(x, self.dropout, training=self.training)
            x = self.gc3.functional_forward(x, adj, id=3, weights=weights)
            return F.log_softmax(x, dim=1)
class GCN(nn.Module):
    def __init__(self, nfeat, nhid, nclass, dropout):
        super(GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nhid)
        self.mlp = nn.Linear(nhid, nclass)
        self.nhid = nhid
        self.dropout = dropout

    def forward(self, graph_list, node_feat, edge_feat, eval=False):

        graph_sizes, n2n_sp, e2n_sp, subg_sp, node_degs = process_sparse(graph_list, node_feat, edge_feat)

        x = F.relu(self.gc1(node_feat, n2n_sp, node_degs), inplace=True)
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2(x, n2n_sp, node_degs)
        batch_graphs_repr = torch.zeros(len(graph_sizes), self.nhid).cuda()
        batch_graphs_repr = Variable(batch_graphs_repr)
        accum_count = 0
        if eval:
            for i in range(subg_sp.size()[0]):
                batch_graphs_repr[i, :] = torch.mean(x[accum_count: accum_count + graph_sizes[i]], dim=0, keepdim=True)
                accum_count += graph_sizes[i]
            return batch_graphs_repr
        else:
            x = F.dropout(x, self.dropout, training=self.training)
            for i in range(subg_sp.size()[0]):
                batch_graphs_repr[i, :] = torch.mean(x[accum_count: accum_count + graph_sizes[i]], dim=0, keepdim=True)
                accum_count += graph_sizes[i]
            batch_graphs_repr = self.mlp(batch_graphs_repr)
            return F.log_softmax(batch_graphs_repr, dim=1)

    def functional_forward(self, graph_list, node_feat, edge_feat, weights, eval=False):

        graph_sizes, n2n_sp, e2n_sp, subg_sp, node_degs = process_sparse(graph_list, node_feat, edge_feat)

        x = F.relu(self.gc1.functional_forward(node_feat, n2n_sp, node_degs, id=1, weights=weights), inplace=True)
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.gc2.functional_forward(x, n2n_sp, node_degs, id=2, weights=weights)
        batch_graphs_repr = torch.zeros(len(graph_sizes), self.nhid).cuda()
        batch_graphs_repr = Variable(batch_graphs_repr)
        accum_count = 0
        if eval:
            for i in range(subg_sp.size()[0]):
                batch_graphs_repr[i, :] = torch.mean(x[accum_count: accum_count + graph_sizes[i]], dim=0, keepdim=True)
                accum_count += graph_sizes[i]
            return batch_graphs_repr
        else:
            x = F.dropout(x, self.dropout, training=self.training)
            for i in range(subg_sp.size()[0]):
                batch_graphs_repr[i, :] = torch.mean(x[accum_count: accum_count + graph_sizes[i]], dim=0, keepdim=True)
                accum_count += graph_sizes[i]
            batch_graphs_repr = torch.mm(batch_graphs_repr, weights['mlp.weight'].t()) + weights['mlp.bias']
            return F.log_softmax(batch_graphs_repr, dim=1)