Exemplo n.º 1
0
    def __init__(self, nfeat, nhid, dropout):
        super(GCN2, self).__init__()

        self.gc1 = DenseGCNConv(nfeat, nhid)
        self.gc2 = DenseGCNConv(nhid, nhid)
        self.dropout = dropout
        self.classifier = nn.Linear(nhid * 2, 2)
Exemplo n.º 2
0
 def __init__(self,
              num_features,
              n_classes,
              num_hidden,
              num_hidden_layers,
              dropout,
              activation,
              improved=True,
              bias=True):
     super(PDenseGCN, self).__init__()
     # dropout
     if dropout:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = nn.Dropout(p=0.)
     #activation
     self.activation = activation
     # input layer
     self.conv_input = DenseGCNConv(num_features,
                                    num_hidden,
                                    improved=improved,
                                    bias=bias)
     # Hidden layers
     self.layers = nn.ModuleList()
     for _ in range(num_hidden_layers):
         self.layers.append(
             DenseGCNConv(num_hidden,
                          num_hidden,
                          improved=improved,
                          bias=bias))
     # output layer
     self.conv_output = DenseGCNConv(num_hidden,
                                     n_classes,
                                     improved=improved,
                                     bias=bias)
Exemplo n.º 3
0
    def build_graph(self):

        # entry GCN
        self.entry_conv_first = DenseGCNConv(
            in_channels=self.in_feature,
            out_channels=self.hidden_feature,
        )
        self.entry_conv_block = DenseGCNConv(
            in_channels=self.hidden_feature,
            out_channels=self.hidden_feature,
        )
        self.entry_conv_last = DenseGCNConv(
            in_channels=self.hidden_feature,
            out_channels=self.out_feature,
        )

        self.gcn_hpool_layer = GcnHpoolSubmodel(
            self.out_feature + self.hidden_feature * 2, self.h_hidden_feature,
            self.h_out_feature, self.in_node, self.hidden_node, self.out_node
            #self._hparams
        )

        self.pred_model = torch.nn.Sequential(
            torch.nn.Linear(
                2 * self.hidden_feature + 2 * self.h_hidden_feature +
                self.h_out_feature + self.out_feature, self.h_hidden_feature),
            #torch.nn.Linear( self.out_feature, self.h_hidden_feature),
            torch.nn.ReLU(),
            torch.nn.Linear(self.h_hidden_feature, self.h_out_feature))
Exemplo n.º 4
0
class CoarsenBlock(torch.nn.Module):
    def __init__(self, in_channels, assign_ratio):
        super(CoarsenBlock, self).__init__()

        self.gcn_att = DenseGCNConv(in_channels, 1, bias=True)

        # self.att = torch.nn.Linear(in_channels,
        #                            hidden)
        self.assign_ratio = assign_ratio

    def normalize_batch_adj(
        self, adj
    ):  # adj shape: batch_size * num_node * num_node, D^{-1/2} (A+I) D^{-1/2}
        dim = adj.size()[1]
        A = adj + torch.eye(dim, device=adj.device)
        deg_inv_sqrt = A.sum(dim=-1).clamp(min=1).pow(-0.5)

        newA = deg_inv_sqrt.unsqueeze(-1) * A * deg_inv_sqrt.unsqueeze(-2)
        newA = (adj.sum(-1) > 0).float().unsqueeze(-1).to(adj.device) * newA
        return newA

    def reset_parameters(self):
        self.gcn_att.reset_parameters()

    def forward(self, x, adj, batch_num_nodes):
        # alpha_vec = F.softmax(self.att(x).sum(-1), -1)
        alpha_vec = F.sigmoid(torch.pow(self.gcn_att(x, adj),
                                        2)).squeeze()  # b*n*1 --> b*n

        norm_adj = self.normalize_batch_adj(adj)
        batch_size = x.size()[0]
        cut_batch_num_nodes = batch_num_nodes
        cut_value = torch.zeros_like(alpha_vec[:, 0])
        for j in range(batch_size):
            if cut_batch_num_nodes[j] > 1:
                cut_batch_num_nodes[j] = torch.ceil(
                    cut_batch_num_nodes[j].float() * self.assign_ratio) + 1
                # cut_value[j], _ = (-alpha_vec[j]).kthvalue(cut_batch_num_nodes[j], dim=-1)
                temptopk, topk_ind = alpha_vec[j].topk(cut_batch_num_nodes[j],
                                                       dim=-1)
                cut_value[j] = temptopk[-1]

            else:
                cut_value[j] = 0
        # cut_alpha_vec = torch.mul( ((alpha_vec - torch.unsqueeze(cut_value, -1))>=0).float(), alpha_vec)  # b * n
        cut_alpha_vec = F.relu(alpha_vec - torch.unsqueeze(cut_value, -1))

        S = torch.mul(norm_adj, cut_alpha_vec.unsqueeze(
            1))  # repeat rows of cut_alpha_vec, #b * n * n
        # temp_rowsum = torch.sum(S, -1).unsqueeze(-1).pow(-1)
        # # temp_rowsum[temp_rowsum > 0] = 1.0 / temp_rowsum[temp_rowsum > 0]
        # S = torch.mul(S, temp_rowsum)  # row-wise normalization
        S = F.normalize(S, p=1, dim=-1)

        embedding_tensor = torch.matmul(torch.transpose(
            S, 1, 2), x)  # equals to torch.einsum('bij,bjk->bik',...)
        new_adj = torch.matmul(torch.matmul(torch.transpose(S, 1, 2), adj),
                               S)  # batched matrix multiply

        return embedding_tensor, new_adj, S
Exemplo n.º 5
0
class Coarsening(torch.nn.Module):
    def __init__(self, dataset, hidden, ratio=0.25): # we only use 1 layer for coarsening
        super(Coarsening, self).__init__()

        # self.embed_block1 = GNNBlock(dataset.num_features, hidden, hidden)
        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)

        self.jump = JumpingKnowledge(mode='cat')

        self.lin1 = Linear(hidden + dataset.num_features, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.coarse_block1.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data, epsilon=0.01, opt_epochs=100):
        x, adj, mask = data.x, data.adj, data.mask
        batch_num_nodes = data.mask.sum(-1)
        x1 = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        # xs = [x1.mean(dim=1)]
        coarse_x, new_adj, S = self.coarse_block1(x1, adj, batch_num_nodes)
        xs = [coarse_x.mean(dim=1)]
        x2 = F.tanh(self.embed_block2(coarse_x, new_adj, mask, add_loop=True))
        xs.append(x2.mean(dim=1))


        opt_loss = 0.0
        for i in range(len(x)):
            x3 = self.get_nonzero_rows(x[i])
            x4 = self.get_nonzero_rows(x2[i])
            # if x3.size()[0]==0 or x4.size()[0]==0:
            #     continue
            # opt_loss += sinkhorn_loss_default(x3, x4, epsilon, niter=opt_epochs).float()
            opt_loss += sinkhorn_loss_default(x3, x2[i], epsilon, niter=opt_epochs)
        return xs, new_adj, S, opt_loss

    def predict(self, xs):
        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def get_nonzero_rows(self, M):# M is a matrix
        # row_ind = M.sum(-1).nonzero().squeeze() #nonzero has bugs in Pytorch 1.2.0.........
        #So we use other methods to take place of it
        MM, MM_ind = M.sum(-1).sort()
        N = (M.sum(-1)>0).sum()
        return M[MM_ind[:N]]


    def __repr__(self):
        return self.__class__.__name__
Exemplo n.º 6
0
    def __init__(self, in_channels, assign_ratio):
        super(CoarsenBlock, self).__init__()

        self.gcn_att = DenseGCNConv(in_channels, 1, bias=True)

        # self.att = torch.nn.Linear(in_channels,
        #                            hidden)
        self.assign_ratio = assign_ratio
Exemplo n.º 7
0
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(GNNBlock, self).__init__()

        self.conv1 = DenseGCNConv(in_channels, hidden_channels)
        self.conv2 = DenseGCNConv(hidden_channels, out_channels)

        self.lin = torch.nn.Linear(hidden_channels + out_channels,
                                   out_channels)
Exemplo n.º 8
0
    def __init__(self, nfeat, nhid, dropout):
        super(GCN1, self).__init__()

        self.gc1 = DenseGCNConv(nfeat, nhid)
        self.gc2 = DenseGCNConv(nhid, nhid)
        self.dropout = dropout
        self.classifier = nn.Linear(nhid * 2, 2)
        self.attention = nn.Linear(nhid, 8)
        self.node_classifier = nn.Linear(nhid, 2)
Exemplo n.º 9
0
    def __init__(self, hparams):
        super(GVAE, self).__init__()

        self.hparams = hparams

        if hparams.pool_type == 'stat':
            # since stat moments x4
            self.prepool_dim = hparams.hidden_dim // 4
        else:
            self.prepool_dim = hparams.hidden_dim

        # encoding layers
        if hparams.gnn_type == 'gcn':
            self.gcn_1 = DenseGCNConv(hparams.input_dim, hparams.hidden_dim)
            self.gcn_2 = DenseGCNConv(hparams.hidden_dim, hparams.hidden_dim)

            self.gcn_31 = DenseGCNConv(hparams.hidden_dim, self.prepool_dim)
            self.gcn_32 = DenseGCNConv(hparams.hidden_dim, self.prepool_dim)

        elif hparams.gnn_type == 'sage':
            self.gcn_1 = gnn_modules.DenseSAGEConv(hparams.input_dim,
                                                   hparams.hidden_dim)
            self.gcn_2 = gnn_modules.DenseSAGEConv(hparams.hidden_dim,
                                                   hparams.hidden_dim)

            self.gcn_31 = gnn_modules.DenseSAGEConv(hparams.hidden_dim,
                                                    self.prepool_dim)
            self.gcn_32 = gnn_modules.DenseSAGEConv(hparams.hidden_dim,
                                                    self.prepool_dim)

        self.fc2 = nn.Linear(hparams.hidden_dim, hparams.bottle_dim)

        # decoding layers
        self.fc3 = nn.Linear(hparams.bottle_dim,
                             hparams.node_dim * hparams.input_dim)
        self.fc4 = nn.Linear(hparams.node_dim * hparams.input_dim,
                             hparams.node_dim * hparams.input_dim)

        # energy prediction
        self.regfc1 = nn.Linear(hparams.bottle_dim, 20)
        self.regfc2 = nn.Linear(20, 1)

        # diff pool
        if hparams.pool_type == 'diff':
            self.gcn_diff = DenseGCNConv(hparams.hidden_dim, 1)

        if hparams.n_gpus > 0:
            self.dev_type = 'cuda'

        if hparams.n_gpus == 0:
            self.dev_type = 'cpu'

        self.eps = 1e-5
Exemplo n.º 10
0
    def __init__(self, dataset, hidden, ratio=0.25): # we only use 1 layer for coarsening
        super(Coarsening, self).__init__()

        # self.embed_block1 = GNNBlock(dataset.num_features, hidden, hidden)
        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)

        self.jump = JumpingKnowledge(mode='cat')

        self.lin1 = Linear(hidden + dataset.num_features, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)
Exemplo n.º 11
0
    def __init__(self, dataset, hidden, num_layers=2, ratio=0.5):
        super(MultiLayerCoarsening, self).__init__()

        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)
        # self.embed_block2 = GNNBlock(hidden, hidden, dataset.num_features)

        self.num_layers = num_layers

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(hidden + dataset.num_features, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)
Exemplo n.º 12
0
    def __init__(self, nfeat, nhid, dropout):
        super(GCN, self).__init__()

        self.gc1 = DenseGCNConv(nfeat, nhid)
        self.gc2 = DenseGCNConv(nhid, nhid)
        self.gc3 = DenseGCNConv(nfeat, nhid)
        self.gc4 = DenseGCNConv(nhid, nhid)
        self.dropout = dropout
        self.number_attention = 1
        self.classifier = nn.Linear(nhid * self.number_attention, 2)
        # self.classifier2 = nn.Linear(nhid*self.number_attention, 2)

        self.node_classifier = nn.Linear(nhid, 2)
        self.attention = nn.Linear(nhid, self.number_attention)
Exemplo n.º 13
0
class Block(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(Block, self).__init__()

        self.conv1 = DenseGCNConv(in_channels, hidden_channels)
        self.conv2 = DenseGCNConv(hidden_channels, hidden_channels)
        self.lin = nn.Linear(hidden_channels + hidden_channels, out_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        self.lin.reset_parameters()

    def forward(self, x, adj, mask=None, add_loop=True):
        x1 = F.relu(self.conv1(x, adj, mask, add_loop))
        x2 = F.relu(self.conv2(x1, adj, mask, add_loop))
        out = self.lin(torch.cat((x1, x2), -1))

        return out
Exemplo n.º 14
0
class GNNBlock(torch.nn.Module):  #2 layer GCN block
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(GNNBlock, self).__init__()

        self.conv1 = DenseGCNConv(in_channels, hidden_channels)
        self.conv2 = DenseGCNConv(hidden_channels, out_channels)

        self.lin = torch.nn.Linear(hidden_channels + out_channels,
                                   out_channels)
        # self.lin1 = torch.nn.Linear(hidden_channels, out_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        self.lin.reset_parameters()

    def forward(self, x, adj, mask=None, add_loop=True):
        x1 = F.relu(self.conv1(x, adj, mask, add_loop))
        x2 = F.relu(self.conv2(x1, adj, mask, add_loop))
        return self.lin(torch.cat([x1, x2], dim=-1))
Exemplo n.º 15
0
    def __init__(self, in_feature, hidden_feature, out_feature, in_node,
                 hidden_node, out_node):
        super(GcnHpoolSubmodel, self).__init__()

        #self._hparams = hparams_lib.copy_hparams(hparams)
        #self.build_graph(in_feature, hidden_feature, out_feature, in_node, hidden_node, out_node)
        self.reset_parameters()

        #self._device = torch.device(self._hparams.device)
        self.pool_tensor = None

        # # embedding blocks
        #
        # self.embed_conv_first = GCNConv(
        #     in_channels=in_feature,
        #     out_channels=hidden_feature,
        # )
        # self.embed_conv_block = GCNConv(
        #     in_channels=hidden_feature,
        #     out_channels=hidden_feature,
        # )
        # self.embed_conv_last = GCNConv(
        #     in_channels=hidden_feature,
        #     out_channels=out_feature,
        # )
        # embedding blocks

        self.embed_conv_first = DenseGCNConv(
            in_channels=in_feature,
            out_channels=hidden_feature,
        )
        self.embed_conv_block = DenseGCNConv(
            in_channels=hidden_feature,
            out_channels=hidden_feature,
        )
        self.embed_conv_last = DenseGCNConv(
            in_channels=hidden_feature,
            out_channels=out_feature,
        )

        # pooling blocks

        self.pool_conv_first = DenseGCNConv(
            in_channels=in_node,
            out_channels=hidden_node,
        )

        self.pool_conv_block = DenseGCNConv(
            in_channels=hidden_node,
            out_channels=hidden_node,
        )

        self.pool_conv_last = DenseGCNConv(
            in_channels=hidden_node,
            out_channels=out_node,
        )

        self.pool_linear = torch.nn.Linear(hidden_node * 2 + out_node,
                                           out_node)
def test_dense_gcn_conv():
    channels = 16
    sparse_conv = GCNConv(channels, channels)
    dense_conv = DenseGCNConv(channels, channels)
    assert dense_conv.__repr__() == 'DenseGCNConv(16, 16)'

    # Ensure same weights and bias.
    dense_conv.weight = sparse_conv.weight
    dense_conv.bias = sparse_conv.bias

    x = torch.randn((5, channels))
    edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 4],
                               [1, 2, 0, 2, 0, 1, 4, 3]])

    sparse_out = sparse_conv(x, edge_index)
    assert sparse_out.size() == (5, channels)

    x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels)
    adj = torch.Tensor([
        [
            [0, 1, 1],
            [1, 0, 1],
            [1, 1, 0],
        ],
        [
            [0, 1, 0],
            [1, 0, 0],
            [0, 0, 0],
        ],
    ])
    mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.uint8)

    dense_out = dense_conv(x, adj, mask)
    assert dense_out.size() == (2, 3, channels)

    assert dense_out[1, 2].abs().sum().item() == 0
    dense_out = dense_out.view(6, channels)[:-1]
    assert torch.allclose(sparse_out, dense_out, atol=1e-04)
def test_dense_gcn_conv_with_broadcasting():
    batch_size, num_nodes, channels = 8, 3, 16
    conv = DenseGCNConv(channels, channels)

    x = torch.randn(batch_size, num_nodes, channels)
    adj = torch.Tensor([
        [0, 1, 1],
        [1, 0, 1],
        [1, 1, 0],
    ])

    assert conv(x, adj).size() == (batch_size, num_nodes, channels)
    mask = torch.tensor([1, 1, 1], dtype=torch.uint8)
    assert conv(x, adj, mask).size() == (batch_size, num_nodes, channels)
Exemplo n.º 18
0
    def __init__(self,
                 input_dim=3,
                 hidden_dim=16,
                 embedding_dim=32,
                 output_dim_id=len(class_to_id),
                 output_dim_p4=4,
                 dropout_rate=0.5,
                 convlayer="sgconv",
                 space_dim=2,
                 nearest=3):
        super(PFNet5, self).__init__()
        self.input_dim = input_dim
        act = nn.LeakyReLU
        self.inp = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, embedding_dim),
            act(),
        )
        self.conv = DenseGCNConv(embedding_dim, embedding_dim)

        self.nn1 = nn.Sequential(
            nn.Linear(embedding_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, output_dim_id),
        )
        self.nn2 = nn.Sequential(
            nn.Linear(embedding_dim + output_dim_id, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, output_dim_p4),
        )
Exemplo n.º 19
0
def test_gcn():
    x_len, x_dim = 100, 1000
    x = np.random.randn(x_len, x_dim)
    adj = sps.rand(x_len, x_len, density=0.1)
    edge_index = np.array(adj.nonzero())

    gcn = SparseGCN(x_dim, x_dim)
    print(x[7].mean(), np.linalg.norm(x))

    start = time.time()
    out = gcn.forward(x, edge_index)
    print(time.time() - start)
    print(out[7].mean(), np.linalg.norm(out), sps.linalg.norm(gcn.w))

    gcn1 = DenseGCNConv(x_dim, x_dim, improved=True, bias=False)
    adj = adj > 0
    out = gcn1(torch.tensor(x, dtype=torch.float),
               torch.tensor(adj.toarray(), dtype=torch.float))
    print(out[0, 7].mean(), out.norm(), gcn1.weight.norm())
Exemplo n.º 20
0
    def __init__(self,
                 num_nodes,
                 input_dim,
                 output_dim,
                 lstm_hidden_size,
                 lstm_num_layers,
                 batch_size,
                 gnn_hidden_size,
                 lstm_dropout=0,
                 **kwargs):
        super(GNNLSTM, self).__init__()

        self.num_nodes = num_nodes
        self.input_dim = input_dim
        self.lstm_hidden_size = lstm_hidden_size
        self.lstm_num_layers = lstm_num_layers
        self.gnn_hidden_size = gnn_hidden_size
        self.lstm_dropout = lstm_dropout
        self.output_dim = output_dim
        self.batch_size = batch_size

        if 'target_node' in kwargs.keys():
            self.target_node = kwargs['target_node']
        else:
            self.target_node = None

        # LSTM layers definition
        self.graph_lstm = GraphLSTM(num_nodes=num_nodes,
                                    input_dim=input_dim,
                                    hidden_size=lstm_hidden_size,
                                    num_layers=lstm_num_layers,
                                    batch_size=batch_size,
                                    dropout=lstm_dropout)

        # GNN layers definition
        self.gcn = DenseGCNConv(in_channels=lstm_num_layers * lstm_hidden_size,
                                out_channels=gnn_hidden_size)

        # MLP definition
        self.mlp = nn.Sequential(nn.Linear(gnn_hidden_size, 512), nn.ReLU(),
                                 nn.Linear(512, 256), nn.ReLU(),
                                 nn.Linear(256, 64), nn.ReLU(),
                                 nn.Linear(64, output_dim))
Exemplo n.º 21
0
 def __init__(self, in_dim, out_dim, act, p):
     super(GCN, self).__init__()
     self.act = act
     self.drop = nn.Dropout(p=p) if p > 0.0 else nn.Identity()
     self.gcn = DenseGCNConv(in_dim, out_dim, improved=True)
Exemplo n.º 22
0
class MultiLayerCoarsening(torch.nn.Module):
    def __init__(self, dataset, hidden, num_layers=2, ratio=0.5):
        super(MultiLayerCoarsening, self).__init__()

        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)
        # self.embed_block2 = GNNBlock(hidden, hidden, dataset.num_features)

        self.num_layers = num_layers

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear( hidden *num_layers, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.coarse_block1.reset_parameters()

        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data, epsilon=0.01, opt_epochs=100):
        x, adj, mask = data.x, data.adj, data.mask
        batch_num_nodes = data.mask.sum(-1)

        new_adjs = [adj]
        Ss = []

        x1 = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        # xs = [x1.mean(dim=1)]
        xs = []
        coarse_x, new_adj, S = self.coarse_block1(x1, adj, batch_num_nodes)
        new_adjs.append(new_adj)
        Ss.append(S)
        # x2 = F.relu(self.embed_block1(coarse_x, new_adj, mask, add_loop=True))
        xs.append(coarse_x.mean(dim=1))

        x2 = self.embed_block2(coarse_x, new_adj, mask,
                               add_loop=True)  # should not add ReLu, otherwise x2 could be all zero.
        # xs.append(x2.mean(dim=1))

        for i in range(self.num_layers-1):
            x1 = F.relu(self.embed_block1(F.relu(x2), new_adj, mask, add_loop=True))
            coarse_x, new_adj, S = self.coarse_block1(x1, new_adj, batch_num_nodes)
            new_adjs.append(new_adj)
            Ss.append(S)
            xs.append(coarse_x.mean(dim=1))
            x2 = self.embed_block2(coarse_x, new_adj, mask, add_loop=True)#should not add ReLu, otherwise x2 could be all zero.
        # xs.append(x2.mean(dim=1))
        opt_loss = 0.0
        for i in range(len(x)):
            x3 = self.get_nonzero_rows(x[i])
            x4 = self.get_nonzero_rows(x2[i])
            if x3.size()[0]==0:
                continue
            if x4.size()[0]==0:
                opt_loss += sinkhorn_loss_default(x3, x2[i], epsilon, niter=opt_epochs).float()
                continue
            opt_loss += sinkhorn_loss_default(x3, x4, epsilon, niter=opt_epochs).float()

        return xs, new_adjs, Ss, opt_loss

    def get_nonzero_rows(self, M):# M is a matrix
        # row_ind = M.sum(-1).nonzero().squeeze() #nonzero has bugs in Pytorch 1.2.0.........
        #So we use other methods to take place of it
        MM, MM_ind = torch.abs(M.sum(-1)).sort()
        N = (torch.abs(M.sum(-1))>0).sum()
        return M[MM_ind[:N]]

    def predict(self, xs):
        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)


    def test(self, train_z, train_y, test_z, test_y, solver='lbfgs',
             multi_class='auto', *args, **kwargs):
        r"""Evaluates latent space quality via a logistic regression downstream
        task."""
        clf = LogisticRegression(solver=solver, multi_class=multi_class, *args,
                                 **kwargs).fit(train_z.detach().cpu().numpy(),
                                               train_y.detach().cpu().numpy())
        return clf.score(test_z.detach().cpu().numpy(),
                         test_y.detach().cpu().numpy())

    def __repr__(self):
        return self.__class__.__name__
Exemplo n.º 23
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = DenseGCNConv(dataset.num_node_features, 16)
     self.conv2 = DenseGCNConv(16, dataset.num_classes)
Exemplo n.º 24
0
class MultiLayerCoarsening(torch.nn.Module):
    def __init__(self, dataset, hidden, num_layers=2, ratio=0.5):
        super(MultiLayerCoarsening, self).__init__()

        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)
        # self.embed_block2 = GNNBlock(hidden, hidden, dataset.num_features)

        self.num_layers = num_layers

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(hidden + dataset.num_features, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.coarse_block1.reset_parameters()
        self.embed_block2.reset_parameters()

        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data, epsilon=0.01, opt_epochs=100):
        x, adj, mask = data.x, data.adj, data.mask
        batch_num_nodes = data.mask.sum(-1)

        new_adjs = [adj]
        Ss = []

        x1 = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [x1.mean(dim=1)]
        new_adj = adj
        coarse_x = x1
        # coarse_x, new_adj, S = self.coarse_block1(x1, adj, batch_num_nodes)
        # new_adjs.append(new_adj)
        # Ss.append(S)

        for i in range(self.num_layers):
            coarse_x, new_adj, S = self.coarse_block1(coarse_x, new_adj,
                                                      batch_num_nodes)
            new_adjs.append(new_adj)
            Ss.append(S)
        x2 = self.embed_block2(
            coarse_x, new_adj, mask, add_loop=True
        )  #should not add ReLu, otherwise x2 could be all zero.
        xs.append(x2.mean(dim=1))
        opt_loss = 0.0
        for i in range(len(x)):
            x3 = self.get_nonzero_rows(x[i])
            x4 = self.get_nonzero_rows(x2[i])
            if x3.size()[0] == 0:
                continue
            if x4.size()[0] == 0:
                # opt_loss += sinkhorn_loss_default(x3, x2[i], epsilon, niter=opt_epochs).float()
                continue
            opt_loss += sinkhorn_loss_default(x3,
                                              x4,
                                              epsilon,
                                              niter=opt_epochs).float()

        return xs, new_adjs, Ss, opt_loss

    def get_nonzero_rows(self, M):  # M is a matrix
        # row_ind = M.sum(-1).nonzero().squeeze() #nonzero has bugs in Pytorch 1.2.0.........
        #So we use other methods to take place of it
        MM, MM_ind = torch.abs(M.sum(-1)).sort()
        N = (torch.abs(M.sum(-1)) > 0).sum()
        return M[MM_ind[:N]]

    def predict(self, xs):
        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__