コード例 #1
0
    def forward(self, x, adj, mask=None):
        #som1 = MiniSom(5,5,3, sigma=0.3, learning_rate=0.5)
        #data1 = x.reshape(-1,3)
        #data1 = data1.cpu().numpy()
        #som1.train_batch(data1,10)
        x_ = x
        x = self.gnn1_embed(x, adj, mask)
        #qnt1 = som1.quantization(data1)
        #qnt1 = torch.from_numpy(qnt1).float().to(device)
        #qnt1 = qnt1.reshape(-1,100,3)
        s = self.gnn1_pool(x_, adj, mask)
        #s = self.gnn1_pool(qnt1, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        som2 = MiniSom(5, 5, 32, sigma=0.3, learning_rate=0.5)
        data2 = x.reshape(-1, 32)
        data2 = data2.cpu().detach().numpy()
        som2.train_batch(data2, 10)
        x = self.gnn2_embed(x, adj)
        qnt2 = som2.quantization(data2)

        qnt2 = torch.from_numpy(qnt2).float().to(device)
        qnt2 = qnt2.reshape(-1, 25, 64 * 3)
        s = self.gnn2_pool(qnt2, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), l1 + l2, e1 + e2
コード例 #2
0
ファイル: gnn2.py プロジェクト: vthost/DAGNN
    def forward(self, batched_data, mask=None):

        x, edge_index, edge_attr, node_depth, batch = batched_data.x, batched_data.edge_index,  batched_data.edge_attr, batched_data.node_depth, batched_data.batch

        x = self.node_encoder(x, node_depth.view(-1,))

        x, mask = to_dense_batch(x, batch=batch)
        adj = to_dense_adj(edge_index, batch=batch)

        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)
        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)
        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        # x = self.lin2(x)
        # return self.activation(x)  #, l1 + l2, e1 + e2

        pred_list = []
        for i in range(self.max_seq_len):
            pred_list.append(self.graph_pred_linear_list[i](x))

        return pred_list
コード例 #3
0
    def forward(self, data):
        # x:[batch_size,num_nodes,in_channels]
        x, adj, mask = data.x, data.adj, data.mask

        # x:[batch_size, num_nodes, c_num_nodes]
        s = self.pool_block1(x, adj, mask, add_loop=True)
        # s:[batch_size, num_nodes, hidden]
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [x.mean(dim=1)]
        # x:[batch_size, c_num_nodes, hidden]
        x, adj, _, _ = dense_diff_pool(x, adj, s, mask)
        # adj: [batch_size,c_num_nodes, c_num_nodes]
        for i, (embed_block, pool_block) in enumerate(
                zip(self.embed_blocks, self.pool_blocks)):
            # s: [batch_size,c_num_nodes, cc_num_nodes]
            s = pool_block(x, adj)
            # x: [batch_size,c_num_nodes,hidden]
            x = F.relu(embed_block(x, adj))
            xs.append(x.mean(dim=1))
            if i < len(self.embed_blocks) - 1:
                # x: [batch_size,cc_num_nodes, hidden]
                x, adj, _, _ = dense_diff_pool(x, adj, s)
                # adj: [batch_size,cc_num_nodes,cc_num_nodes]
        x = self.jump(xs)  # x: [batch_size,len(self.embed_blocks)+1)*hidden]
        x = F.relu(self.lin1(x))  # x: [batch_size,hidden]
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)  # x: [batch_size,dataset.num_classes]
        return F.log_softmax(x, dim=-1)
コード例 #4
0
    def forward(self, x_input, adj, mask=None):
        #print('forward diff')
        s = self.gnn1_pool(x_input, adj=adj, mask=mask)
        s = s.view(-1, self.num_features, s.size()[-1])
        x = self.gnn1_embed(x_input, adj=adj, mask=mask)
        x = x.view(-1, self.num_features, x.size()[-1])
        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        # =============================================================================
        #         print('x')
        #         print(x.size())
        #         print('end x.size')
        #         print('adj')
        #         print(adj.size())
        #         print('adj end')
        #         print('s size')
        #         print(s.size())
        #         print('s size end')
        # =============================================================================
        s = self.gnn2_pool(x, adj=adj)
        s = s.view(-1, adj.size()[1], s.size()[-1])
        x = self.gnn2_embed(x, adj=adj)
        x = x.view(-1, x.size()[1], x.size()[-1])
        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj=adj)
        x = x.view(-1, int(0.2 * self.out_clusters), self.out_channels)
        #x = x.mean(dim=-1)#1*1 convolution
        x = self.activ(self.lin1times1(x))
        x = x.view(x.size()[0:2])

        x = self.activ(self.lin1(x))

        return x, l1 + l2, e1 + e2
コード例 #5
0
    def forward(self, data):
        #print(data)
        x, adj, mask = data.x, data.adj, data.mask
        
        somnum = ceil(sqrt(self.num_nodes/1.5))
        som = MiniSom(somnum,somnum, self.dimnum, sigma=0.3, learning_rate=0.5)
        tempdata = x.reshape(-1,self.dimnum)
        tempdata = tempdata.cpu().numpy()
        som.train_batch(tempdata,15)
        qnt = som.quantization(tempdata)
        qnt = torch.from_numpy(qnt).float().to(device)
        qnt = qnt.reshape( adj.size()[0],-1, self.dimnum)
        #print(qnt.size())
        #print(adj.size()) 
        s = self.pool_block1(qnt, adj, mask, add_loop=True)
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [x.mean(dim=1)]
        x, adj, _, _ = dense_diff_pool(x, adj, s, mask)

        for i, (embed_block, pool_block) in enumerate(
                zip(self.embed_blocks, self.pool_blocks)):
            s = pool_block(x, adj)
            x = F.relu(embed_block(x, adj))
            xs.append(x.mean(dim=1))
            if i < len(self.embed_blocks) - 1:
                x, adj, _, _ = dense_diff_pool(x, adj, s)

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)
コード例 #6
0
ファイル: diff_pool.py プロジェクト: snubeaver/pytorch_geo
    def forward(self, data):
        x, adj, mask = data.x, data.adj, data.mask
        link_losses = 0.
        ent_losses = 0.
        s = self.pool_block1(x, adj, mask, add_loop=True)
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [
            torch.sum(x, 1) / (mask.sum(-1, keepdims=True).to(x.dtype) + 1e-10)
        ]
        x, adj, link_loss, ent_loss = dense_diff_pool(x, adj, s, mask)
        link_losses += link_loss
        ent_losses += ent_loss
        for i, (embed_block, pool_block) in enumerate(
                zip(self.embed_blocks, self.pool_blocks)):
            s = pool_block(x, adj)
            x = F.relu(embed_block(x, adj))
            xs.append(x.mean(dim=1))
            if i < len(self.embed_blocks):
                x, adj, link_loss, ent_loss = dense_diff_pool(x, adj, s)
                link_losses += link_loss
                ent_losses += ent_loss
        x = F.relu(self.embed_final(x, adj, add_loop=True))
        xs.append(x.mean(dim=1))
        x = self.jump(xs)

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)

        return F.log_softmax(x, dim=-1), (link_losses + ent_losses)
コード例 #7
0
    def forward(self, data, mask):
        #  x0, edge_index0, edge_weight0 = data.x, data.edge_index, data.edge_attr


        edge_index0, _ = dropout_adj(
            data.edge_index, p=self.initial_dropout_adj, force_undirected=True,
            num_nodes=data.num_nodes, training=self.training)
        x0 = F.dropout(data.x, p=self.initial_dropout_nodes, training=self.training)

        # level 0 conv  
        x0_ = self.gcn0_in(x0, edge_index0)

        # pooled 1 
        s1 = F.relu(self.conv_pool1(x0_, edge_index0))
        x1, adj1, l1, e1 = dense_diff_pool(x0_, data.adj, s1, mask)
        x1 = torch.squeeze(x1)
        
        # get edge index level 1
        adj1_sparse_tuple = dense_to_sparse(torch.squeeze(adj1))
        edge_index1 = adj1_sparse_tuple[0]
        edge_weight1 = adj1_sparse_tuple[1]
                
        # level 1 conv
        x1_ = self.gcn1_in(x1, edge_index1, edge_weight1)
        
        # pooled 2 
        s2 = self.conv_pool2(x1_, edge_index1, edge_weight1)
        s2 = F.relu(s2)
        x2, adj2, l2, e2 = dense_diff_pool(x1_, adj1, s2)
        x2 = torch.squeeze(x2)
        
        # get edge index level 2
        adj2_sparse_tuple = dense_to_sparse(torch.squeeze(adj2))
        edge_index2 = adj2_sparse_tuple[0]
        edge_weight2 = adj2_sparse_tuple[1]
        
        # level 2 conv
        x2_out = self.gcn2_in(x2, edge_index2, edge_weight2)
        x2_out_up = torch.matmul(s2, x2_out) # unpool level 2
        
        # output level 1
        x1_out = self.gcn1_out(torch.cat((x1_, x2_out_up), 1), edge_index1, edge_weight1)
        x1_out_up = torch.matmul(s1, x1_out) # unpool level 1
        
        # output level 0 
        x0_out = self.gcn0_out(torch.cat((x0_, x1_out_up), 1), edge_index0)
    
        edge_loss = l1 + e1 +l2 + e2
        
        edges = {'e1' :{'e': edge_index1, 'w': edge_weight1},
                 'e2' :{'e': edge_index2, 'w': edge_weight2}}

        output_dict = {'prediction': F.log_softmax(x0_out, dim=1), 's01': s1,
                       'edge_loss': edge_loss, 'adj1': adj1, 'edges': edges}

        return output_dict
コード例 #8
0
    def forward(self, data):
        x, adj, mask = data.x, data.adj, data.mask

        s = self.pool_block1(x, adj, mask, add_loop=True)
        s_return = s.clone().detach()
        x = self.embed_block1(x, adj, mask, add_loop=True)
        if self.jp:
            xs = [x.mean(dim=1)]
        x, adj, _, _ = dense_diff_pool(x, adj, s, mask)
        adj_return = adj.clone().detach()

        for i, (embed_block, pool_block) in enumerate(
                zip(self.embed_blocks, self.pool_blocks)):
            s = pool_block(x, adj)
            x = embed_block(x, adj)
            if i < len(self.embed_blocks) - 1:
                if self.jp:
                    xs.append(x.mean(dim=1))
                x, adj, _, _ = dense_diff_pool(x, adj, s)
                
        s = self.pool_block_last(x, adj)
        x, _, _, _ = dense_diff_pool(x, adj, s)
        if self.jp:
            xs.append(x.squeeze())
            x = self.jump(xs)
            x = F.relu(self.lin1(x)) 
            # return graph embedding
            if self.ge:
                return x
            # !!! 
            x = x.squeeze().reshape(x.size(0)//self.num_patches, self.num_patches, -1).max(dim=1)[0]
            if self.dropout:
                x = F.dropout(x, p=0.2, training=self.training)
            x = self.lin2(x)
              
        else:
            # return graph embedding
            if self.ge:
                return x.squeeze()
            num_patients = x.size(0)//self.num_patches
            #!!! 改成average graph embeddings / maximum of graph embeddings
            x = x.squeeze().reshape(num_patients, self.num_patches, -1).max(dim=1)[0]
            if self.dropout:
                x = F.dropout(x, p=0.2, training=self.training)
            x = self.lin1(x)
            
                
        # 暂时没考虑有多个s
        if self.plot:
            return F.softmax(x, dim=-1), (s_return, adj_return)
        # 首先要用一个LogSoftmax,然后外面计算loss的时候就可以使用NLLLoss,这样和直接用CrossEntropyLoss的效果是一样的
        return F.log_softmax(x, dim=-1)
コード例 #9
0
    def encode(self, x, adj, lengs, mask, maxNodes):

        ### 1
        hidden = self.sage1(x, adj)
        hidden = F.leaky_relu(hidden)  ## BxNxL1
        #hidden=self.bano1(hidden)
        hidden1 = self.drop3(hidden)
        """
        ### 2
        hidden=self.sage2(hidden,adj)
        hidden=self.bano2(hidden)
        hidden=F.leaky_relu(hidden) ## BxNxL2
        hidden=self.drop(hidden)
        """

        ### Pool1
        pool1 = self.poolit1(hidden)

        hidden, adj, _, _ = dense_diff_pool(hidden, adj, pool1, mask)

        ### 3
        hidden = self.sage3(hidden, adj)
        hidden = F.leaky_relu(hidden)
        #hidden=self.bano3(hidden)
        hidden = self.drop4(hidden)
        """
        ### 4 
        hidden=self.sage4(hidden,adj)
        hidden=F.leaky_relu(hidden) 
        #hidden=self.bano4(hidden)
        hidden=self.drop3(hidden)
        """

        ### Pool2
        pool2 = self.poolit2(hidden)
        hidden, adj, _, _ = dense_diff_pool(hidden, adj, pool2)

        hidden = self.sage5(hidden, adj)
        hidden = F.leaky_relu(hidden)
        #hidden=self.bano5(hidden)
        hidden = self.drop2(hidden)

        ### 5
        hidden = self.tr1(hidden)
        hidden = F.leaky_relu(hidden)

        hidden = self.tr2(hidden)
        hidden = F.leaky_relu(hidden)

        hidden = self.fin(hidden.squeeze_(2))

        return F.sigmoid(hidden)
コード例 #10
0
    def forward(self, x, adj, mask=None):

        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)  # , print(x.shape)
        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)  # , print(x.shape)

        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)  # , print(x.shape)
        x, adj, l2, e2 = dense_diff_pool(x, adj, s)  # , print(x.shape)

        x = self.gnn3_embed(x, adj)  # , print(x.shape)

        x = x.mean(dim=1)
        return x  # print(x.shape) #x= F.relu(self.lin1(x)) #x= self.lin2(x)                       #return F.log_softmax(x, dim=-1), l1+l2, e1+e2
    def forward(self, nodes, adjs):
        edge, _ = dense_to_sparse(adjs)
        x = self.sage1(nodes, edge)
        s = self.sage2(nodes, edge)
        s = torch.reshape(s, (1, nodes.size(0), 128))

        x = torch.reshape(x, (1, nodes.size(0), 128))

        adjs = torch.reshape(adjs, (1, nodes.size(0), nodes.size(0)))

        x, edge, link_loss1, ent_loss1 = dense_diff_pool(x, adjs, s)

        x = torch.reshape(x, (128, 128))

        edge = torch.reshape(edge, (128, 128))
        #for i in range(edge.size(0)):
        #    edge[i,:] = torch.where(edge[i,:] == torch.max(edge[i,:]),torch.ones(1,128).cuda(), torch.zeros(1,128).cuda())

        edge_out = edge
        edge, _ = dense_to_sparse(edge)
        #nodes_out = x
        x = self.sage3(x, edge)
        nodes_out = torch.tanh(x)

        #x = self.sage4(nodes_out, edge)

        edge = torch.Tensor(
            convert.to_scipy_sparse_matrix(edge).todense()).cuda()
        edge = torch.reshape(edge, (1, 128, 128))

        x = torch.reshape(x, (1, 128, 2))

        s = torch.ones(1, 128, 1).cuda()
        x, edge, link_loss2, ent_loss2 = dense_diff_pool(x, edge, s)

        x = x.reshape(-1)
        link_loss = link_loss1 + link_loss2
        ent_loss = ent_loss1 + ent_loss2
        #print(x.shape, edge.shape)
        #print(asd)
        """ x_out = torch.reshape(x, (128,2))
        edge = torch.reshape(edge, (128,128))
        for i in range(edge.size(0)):
            edge[i,:] = torch.where(edge[i,:] == torch.max(edge[i,:]),torch.ones(1,128).cuda(), torch.zeros(1,128).cuda())
        edge, _ = dense_to_sparse(edge)
        x = self.sage3(x_out, edge)
        x = torch.reshape(x, (128,)) """

        return x, link_loss, ent_loss, nodes_out, edge_out
コード例 #12
0
    def forward(self, x, adj, mask=None):
        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)

        return x, l1 + l2, e1 + e2
コード例 #13
0
ファイル: gcn_hpool.py プロジェクト: anonymous429/AGMC
    def forward(self,
                embedding_tensor,
                pool_x_tensor,
                edge_index,
                adj,
                embedding_mask=None):

        pooling_tensor = self.gcn_forward(pool_x_tensor, adj,
                                          self.pool_conv_first,
                                          self.pool_conv_block,
                                          self.pool_conv_last, embedding_mask)
        pooling_tensor = F.softmax(self.pool_linear(pooling_tensor), dim=-1)

        if embedding_mask is not None:
            pooling_tensor = pooling_tensor * embedding_mask

        x_pool, adj_pool, _, _ = dense_diff_pool(embedding_tensor, adj,
                                                 pooling_tensor)

        embedding_tensor = self.gcn_forward(
            x_pool,
            adj_pool,
            self.embed_conv_first,
            self.embed_conv_block,
            self.embed_conv_last,
        )
        output, _ = torch.max(embedding_tensor, dim=1)

        self.pool_tensor = pooling_tensor
        return output, adj_pool, x_pool, embedding_tensor
コード例 #14
0
    def forward(self, x, adj, mask=None):
        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = self.lin1(x).relu()
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), l1 + l2, e1 + e2
コード例 #15
0
ファイル: diffpool.py プロジェクト: ozen/ml_commons
    def encode(self, data):
        x, adj, mask = data.x, data.adj, data.mask

        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        return x
コード例 #16
0
    def forward(self, data):
        x = data.x
        adj = data.adj
        mask = data.mask

        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)
        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)
        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), l1 + l2, e1 + e2
コード例 #17
0
    def forward(self, x, adj, mask=None):
        print("//////////////first pool///////////////")
        s = self.gnn1_pool(x, adj, mask)
        print('size of s: ', s.size())
        x = self.gnn1_embed(x, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)
        print("//////////////second pool///////////////")
        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), l1 + l2, e1 + e2
コード例 #18
0
    def forward(self, data):
        x, adj, mask = data.x, data.adj, data.mask

        s = self.pool_block1(x, adj, mask, add_loop=True)
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [x.mean(dim=1)]
        x, adj, reg = dense_diff_pool(x, adj, s, mask)

        for embed, pool in zip(self.embed_blocks, self.pool_blocks):
            s = pool(x, adj)
            x = F.relu(embed(x, adj))
            xs.append(x.mean(dim=1))
            x, adj, _, _ = dense_diff_pool(x, adj, s)

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)
コード例 #19
0
    def forward(self, x, adj, mask):
        s = self.pool_block1(x, adj, mask, add_loop=True)
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))

        xs = [self.att(x, mask)]
        x, adj, _, _ = dense_diff_pool(x, adj, s, mask)

        for i, (embed,
                pool) in enumerate(zip(self.embed_blocks, self.pool_blocks)):
            s = pool(x, adj)
            x = F.relu(embed(x, adj))
            xs.append(self.att(x))
            if i < (len(self.embed_blocks) - 1):
                x, adj, _, _ = dense_diff_pool(x, adj, s)

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return x
コード例 #20
0
def test_dense_diff_pool():
    batch_size, num_nodes, channels, num_clusters = (2, 20, 16, 10)
    x = torch.randn((batch_size, num_nodes, channels))
    adj = torch.rand((batch_size, num_nodes, num_nodes))
    s = torch.randn((batch_size, num_nodes, num_clusters))
    mask = torch.randint(0, 1, (batch_size, num_nodes), dtype=torch.uint8)

    x, adj, reg = dense_diff_pool(x, adj, s, mask)
    assert x.size() == (2, 10, 16)
    assert adj.size() == (2, 10, 10)
    assert reg.item() >= 0
コード例 #21
0
    def forward(self, x, adj, mask=None):
        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)

        x, adj, reg = dense_diff_pool(x, adj, s, mask)

        x = self.gnn2_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), reg
コード例 #22
0
    def forward(self, data):
        x, adj = data.x, data.adj

        s = self.gnn1_pool(x, adj)
        x = self.gnn1_embed(x, adj)
        x, adj, reg1 = dense_diff_pool(x, adj, s, data.mask)
        x = self.gnn2_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), reg1
コード例 #23
0
ファイル: test_diff_pool.py プロジェクト: YukeWang96/pyG-orig
def test_dense_diff_pool():
    batch_size, num_nodes, channels, num_clusters = (2, 20, 16, 10)
    x = torch.randn((batch_size, num_nodes, channels))
    adj = torch.rand((batch_size, num_nodes, num_nodes))
    s = torch.randn((batch_size, num_nodes, num_clusters))
    mask = torch.randint(0, 1, (batch_size, num_nodes), dtype=torch.bool)

    x, adj, link_loss, ent_loss = dense_diff_pool(x, adj, s, mask)
    assert x.size() == (2, 10, 16)
    assert adj.size() == (2, 10, 10)
    assert link_loss.item() >= 0
    assert ent_loss.item() >= 0
コード例 #24
0
    def forward(self, data):
        x, adj, mask = data.x, data.adj, data.mask

        s = self.pool_block1(x, adj, mask)
        x = F.relu(self.embed_block1(x, adj, mask))
        xs = [x.mean(dim=1)]
        x, adj, _, _ = dense_diff_pool(x, adj, s, mask)

        for i, (embed_block, pool_block) in enumerate(
                zip(self.embed_blocks, self.pool_blocks)):
            s = pool_block(x, adj)
            x = F.relu(embed_block(x, adj))
            xs.append(x.mean(dim=1))
            if i < len(self.embed_blocks) - 1:
                x, adj, _, _ = dense_diff_pool(x, adj, s)

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)
コード例 #25
0
    def forward(self, data):
        x, adj = data.x, data.adj
        # mask = data.mask.unsqueeze(-1).to(torch.float)

        s = self.gnn1_pool(x, adj)
        x = self.gnn1_embed(x, adj)
        x, adj, reg1 = dense_diff_pool(x, adj, s)
        x = self.gnn2_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), reg1
コード例 #26
0
    def forward(self, x, adj, mask=None):

        s = self.gnn1_pool(x, adj, mask)
        x = self.gnn1_embed(x, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        #print('time for gnn2')

        s = self.gnn2_pool(x, adj)
        x = self.gnn2_embed(x, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))

        x = self.lin2(x)

        return x, l1 + l2, e1 + e2
コード例 #27
0
    def forward(self, x, adj, mask=None):
        som1 = MiniSom(8, 8, 3, sigma=0.3, learning_rate=0.5)
        qnt = []
        for i in range(x.size()[0]):

            data1 = x[i]
            data1 = data1.cpu().numpy()
            som1.train_random(data1, 10)
            temp = []
            np.append(temp, som1.quantization(data1))
            np.append(qnt, temp)

        #qnt = torch.cat(qnt, dim=-1)
        x = self.gnn1_embed(x, adj, mask)
        qnt = torch.from_numpy(qnt).float().to(device)

        s = self.gnn1_pool(qnt, adj, mask)

        x, adj, l1, e1 = dense_diff_pool(x, adj, s, mask)

        #som2 = MiniSom(5,5,3, sigma=0.3, learning_rate=0.5)
        #data2 = x.reshape(-1,3)
        #data2 = data2.cpu().detach().numpy()
        #som2.train_batch(data2,10)
        x = self.gnn2_embed(x, adj)
        #qnt2 = som2.quantization(data2)

        #qnt2 = torch.from_numpy(qnt2).float().to(device)
        #qnt2 = qnt2.reshape(-1,25,64*3)
        s = self.gnn2_pool(x, adj)

        x, adj, l2, e2 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), l1 + l2, e1 + e2
コード例 #28
0
    def forward(self, x, adj, s, short_cut=False):
        """
        Returns pooled node feature matrix, coarsened adjacency matrix and the
        auxiliary link prediction objective
        Args:
            adj: Adjacency matrix with shape [num_nodes, num_nodes]
        """
        out_x, out_adj, reg = dense_diff_pool(x, adj, s)
        out_adj = out_adj.squeeze(0) if out_adj.dim() == 3 else out_adj
        out_x = out_x.squeeze(0) if out_x.dim() == 3 else out_x
        if not short_cut:
            out_edge_index, out_edge_attr = adj_to_edge_index(out_adj.detach())
        else:
            out_edge_index, out_edge_attr = None, None

        return out_x, out_edge_index, out_edge_attr, out_adj, reg
コード例 #29
0
    def forward(self, x, adj, mask=None):

        if self.pooling_type == 'gnn':
            s = self.gnn_pool(x, adj, mask)
        else:
            if self.invariant:
                s = self.rm.unsqueeze(dim=0).expand(x.size(0), -1, -1)
                s = s.to(x.device)
                s = x.detach().matmul(s)
            else:
                s = self.rm[:x.size(1), :].unsqueeze(dim=0)
                s = s.expand(x.size(0), -1, -1)
                s = s.to(x.device)
        x = self.gnn_embed(x, adj, mask)

        x, adj, l, e = dense_diff_pool(x, adj, s, mask)
        return x, adj, l, e
コード例 #30
0
ファイル: model.py プロジェクト: yanglan0225/s3net
    def forward(self, data):
        seq_len = data['s']

        inputs = data['c'].reshape((len(seq_len), -1, 3))

        inputs = inputs.reshape((len(seq_len), -1, 3))
        _, idx_sort = torch.sort(seq_len, dim=0, descending=True)
        _, idx_unsort = torch.sort(idx_sort, dim=0)
        input_x = inputs.index_select(0, Variable(idx_sort))
        length_list = list(seq_len[idx_sort])
        input_x = input_x.float()
        pack = nn.utils.rnn.pack_padded_sequence(input_x,
                                                 length_list,
                                                 batch_first=True)
        out, state = self.lstm(pack)
        del state
        un_padded = nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
        un_padded = un_padded[0].index_select(0, Variable(idx_unsort))
        out = self.dropout(un_padded)
        feature = self.fc(out)
        batch_feature = None
        del out, pack, un_padded
        for i in range(data.num_graphs):
            emptyfeature = torch.zeros((1, self.fea_dim)).to(device)
            fea = torch.cat((feature[i][:(seq_len[i])], emptyfeature))
            if batch_feature is None:
                batch_feature = fea
            else:
                batch_feature = torch.cat((batch_feature, fea))

        data['x'] = batch_feature
        x, edge_index = data.x, data.edge_index
        dense_x = utils.to_dense_batch(x, batch=data.batch)
        x = dense_x[0]
        adj = utils.to_dense_adj(data.edge_index, batch=data.batch)
        s = self.gnn1_pool(x, adj)
        x = self.gnn1_embed(x, adj)
        x, adj, l1, e1 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x1 = self.lin1(x)
        x = F.relu(x1)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), x1, l1, e1