예제 #1
0
	def forward(self, data):
		# x has shape [N, in_channels]
		# edge_index has shape [2, E]
		# pdb.set_trace()

		x          = data.x
		edge_index = data.edge_index
		batch      = data.batch
		edge_attr  = data.edge_attr

		x = F.relu(self.conv1(x, edge_index))
		x, edge_index, edge_attr, batch, _, _ = self.pool1(x, edge_index, edge_attr, batch)
		x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

		x = F.relu(self.conv2(x, edge_index))
		x, edge_index, edge_attr, batch, _, _ = self.pool2(x, edge_index, edge_attr, batch)
		x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

		x = F.relu(self.conv3(x, edge_index))
		x, edge_index, edge_attr, batch, _, _ = self.pool3(x, edge_index, edge_attr, batch)
		x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

		x = x1 + x2 + x3

		x = F.relu(self.lin1(x))
		x = self.lin2(x)

		x = x.reshape(-1)
		return x
예제 #2
0
    def forward(self, x, edge_index, batch, inference = False):
        x = self.item_embedding(x)
        x = x.squeeze(1) 

        x = F.leaky_relu(self.conv1(x, edge_index))

        x, edge_index, _, batch, _, _= self.pool1(x, edge_index, None, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim = 1)

        x = F.leaky_relu(self.conv2(x, edge_index))
     
        x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim = 1)

        x = F.leaky_relu(self.conv3(x, edge_index))

        x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim = 1)

        x = x1 + x2 + x3
        
        x = self.lin1(x)
        x = self.bn1(self.act1(x))
        x = self.lin2(x)
        x = self.bn2(self.act2(x))      
        x = F.dropout(x, p = 0.5, training = self.training)

        x = self.lin3(x).squeeze(1)

        if inference:
          x_out = x1 + x2 + x3
          return x, x_out
        else:
          return x
예제 #3
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        edge_attr = None

        x = F.relu(self.conv1(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch = self.pool1(x, edge_index, edge_attr,
                                                     batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch = self.pool2(x, edge_index, edge_attr,
                                                     batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index, edge_attr))
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(x1) + F.relu(x2) + F.relu(x3)

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.lin3(x)
        pred = F.log_softmax(x, dim=-1)

        if data.y is not None:
            loss = F.nll_loss(pred, data.y)
            return pred, loss
        return pred, None
예제 #4
0
파일: models.py 프로젝트: hujilin1229/GMI
    def forward(self, data, neg_num, samp_bias1, samp_bias2):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        edge_attr = None

        original_x = x
        x = F.relu(self.conv1(x, edge_index, edge_attr))
        # inputs: x, edge_index, edge_attr, batch, h, neg_num, samp_bias1, samp_bias2
        x, edge_index, edge_attr, batch = self.pool1(x, edge_index, edge_attr, batch, original_x,
                                                     neg_num, samp_bias1, samp_bias2)

        original_x = x
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch = self.pool2(x, edge_index, edge_attr, batch, original_x,
                                                     neg_num, samp_bias1, samp_bias2)

        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index, edge_attr))
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(x1) + F.relu(x2) + F.relu(x3)

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.log_softmax(self.lin3(x), dim=-1)

        return x
예제 #5
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        if isinstance(edge_index, tuple):
            edge_index = torch.stack(edge_index)
        edge_attr = None

        x = F.relu(self.conv1(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch = self.pool1(x, edge_index, edge_attr,
                                                     batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch = self.pool2(x, edge_index, edge_attr,
                                                     batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index, edge_attr))
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(x1) + F.relu(x2) + F.relu(x3)

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=self.dropout, training=self.training)
        pred = self.lin3(x)

        return pred
예제 #6
0
    def forward(self, x, edge_index, batch, edge_attr):
        # edge_attr = edge_attr.squeeze()
        # edge_index, edge_attr = self.augment_adj(edge_index, edge_attr, x.size(0))
        x = self.conv1(x, edge_index)
        if x.norm(p=2, dim=-1).min() == 0:
            print('x is zeros')
        x, edge_index, edge_attr, batch, perm, score1 = self.pool1(
            x, edge_index, edge_attr, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        edge_attr = edge_attr.squeeze()
        edge_index, edge_attr = self.augment_adj(edge_index, edge_attr,
                                                 x.size(0))

        x = self.conv2(x, edge_index)
        x, edge_index, edge_attr, batch, perm, score2 = self.pool2(
            x, edge_index, edge_attr, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = torch.cat([x1, x2], dim=1)  #concate

        x = self.bn4(F.relu(self.fc1(x)))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.bn5(F.relu(self.fc2(x)))
        x = F.dropout(x, p=0.5, training=self.training)
        x = F.log_softmax(self.fc3(x), dim=-1)

        return x, score1, score2
예제 #7
0
    def forward(self, x, edge_index, batch=None):

        if batch is None:
            batch = torch.zeros(x.shape[0]).long()

        x = F.relu(self.conv1(x, edge_index))
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index))
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        node_embs = x

        x = x1 + x2 + x3

        graph_emb = x

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.p, training=self.training)
        x = F.relu(self.lin2(x))

        x = self.lin3(x)

        return x, (node_embs.detach(), graph_emb.detach())
예제 #8
0
    def forward(self, data):
        x, edge_index, batch = data.x.float(), data.edge_index.float(), data.batch.float()

        x = F.relu(self.conv1(x.float(), edge_index.long()))

        x1 = torch.cat([gmp(x.long(), batch.long()), gap(x.long(), batch.long())], dim=1)

        x = F.relu(self.conv2(x.float(), edge_index.long()))

        x2 = torch.cat([gmp(x.long(), batch.long()), gap(x.long(), batch.long())], dim=1)

        x = F.relu(self.conv3(x.float(), edge_index.long()))

        x, edge_index, _, batch, _,_ = self.pool3(x.long(), edge_index.long(), None, batch.long())
        x3 = torch.cat([gmp(x.long(), batch.long()), gap(x.long(), batch.long())], dim=1)

        x = x1 + x2 + x3

        x = self.lin1(x)
        x = self.act1(x)
        x = self.lin2(x)
        x = self.act2(x)
        x = F.dropout(x, p=0.2, training=self.training)

        x = torch.sigmoid(self.lin3(x)).squeeze(1)

        return x
예제 #9
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        if(self.training == True):
            mask, torchmask = random_drop_node(self.node_per_graph, (int)(batch.size()[0] / self.node_per_graph), 0.75, 0.9)
            x = x[mask]
            batch = batch[mask]
            edge_index, _ = subgraph(torchmask, edge_index, relabel_nodes = True)

        x = self.linprev(x, edge_index)

        x = F.relu(self.conv1(x, edge_index))
        x = self.bn1(x)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x = F.relu(self.conv2(x, edge_index))
        x = self.bn2(x)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x = F.relu(self.conv3(x, edge_index))
        x = self.bn3(x)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x = F.relu(self.conv4(x, edge_index))
        x = self.bn3(x)
        x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = torch.cat([x1,x2,x3,x4], dim=1)
        x = self.mlp(x)
        x = F.log_softmax(x, dim=-1)

        return x
예제 #10
0
    def forward(self, data):
        x, batch = data.x, data.batch
        edge_index = knn_graph(x, 100, batch)
        edge_index, _ = dropout_adj(edge_index, p=0.3)
        batch = data.batch

        x = F.leaky_relu(self.conv1(x, edge_index))
        x1 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)

        x = F.leaky_relu(self.conv2(x, edge_index))
        x2 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)

        x = F.leaky_relu(self.conv3(x, edge_index))
        x3 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)

        x = torch.cat([x1, x2, x3], dim=1)

        x = self.batchnorm1(x)

        x = F.leaky_relu(self.linear1(x))

        x = self.drop(x)
        x = F.leaky_relu(self.linear2(x))
        x = F.leaky_relu(self.linear3(x))
        x = F.leaky_relu(self.linear4(x))
        x = F.leaky_relu(self.linear5(x))

        x = self.out(x)
        if self.classification:
            x = torch.sigmoid(x)
        x = x.view(-1)

        return x
예제 #11
0
파일: models.py 프로젝트: imSeaton/Lookhops
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        edge_weight = None

        x_out = 0
        for idx in range(self.num_layers - 1):
            self.edges.append(edge_index)
            x, x_score = self.convs[idx](x, edge_index, edge_weight, batch)
            x = F.relu(x)  # x: [69403, 89]  --> [69403, 128]
            x, edge_index, edge_weight, batch = self.pool(
                x, x_score, edge_index, edge_weight, batch)  # x: [34769, 128]
            x_ = torch.cat([gmp(x, batch), gap(x, batch)],
                           dim=1)  # x1: [256, 256]
            x_out += F.relu(x_)

        x, x_score = self.convs[self.num_layers - 1](x, edge_index,
                                                     edge_weight, batch)
        x = F.relu(x)  # x: [69403, 89]  --> [69403, 128]
        x_ = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x_out += F.relu(x_)

        x = F.relu(self.lin1(x_out))
        x = F.relu(self.lin2(x))
        x = F.log_softmax(self.lin3(x), dim=-1)
        return x
예제 #12
0
    def forward(self, **kwargs):
        data = kwargs['x_grph']
        data = NormalizeFeaturesV2()(data)
        data = NormalizeEdgesV2()(data)
        x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch

        #x, edge_index, edge_attr, batch = data.x.type(torch.cuda.FloatTensor), data.edge_index.type(torch.cuda.LongTensor), data.edge_attr.type(torch.cuda.FloatTensor), data.batch
        x = F.relu(self.conv1(x, edge_index))
        x, edge_index, edge_attr, batch, _ = self.pool1(
            x, edge_index, edge_attr, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index))
        x, edge_index, edge_attr, batch, _ = self.pool2(
            x, edge_index, edge_attr, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))
        x, edge_index, edge_attr, batch, _ = self.pool3(
            x, edge_index, edge_attr, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout_rate, training=self.training)
        features = F.relu(self.lin2(x))
        out = self.lin3(features)
        if self.act is not None:
            out = self.act(out)

            if isinstance(self.act, nn.Sigmoid):
                out = out * self.output_range + self.output_shift

        return features, out
예제 #13
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = self.item_embedding(x)  # 初始化节点embedding
        x = x.squeeze(1)

        x = F.relu(self.conv1(x, edge_index))  # SAGEConv卷积
        x, edge_index, _, batch, _ = self.pool1(
            x, edge_index, None, batch)  # TopK池化 保留 kN 个节点(k=ratio) 类似于Dropout
        x1 = torch.cat([gmp(x, batch), gap(x, batch)],
                       dim=1)  # shape:[*, 128x2=256]

        x = F.relu(self.conv2(x, edge_index))
        x, edge_index, _, batch, _ = self.pool2(x, edge_index, None, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))
        x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3  # shape:[*, 128x2=256]

        x = self.lin1(x)  # shape:[*, 128]
        x = self.act1(x)
        x = self.lin2(x)  # shape:[*, 64]
        x = self.act2(x)
        x = F.dropout(x, p=0.5, training=self.training)

        x = torch.sigmoid(self.lin3(x)).squeeze(1)  # shape:[*, 1]

        return x
예제 #14
0
    def forward(self, x, edge_index, batch=None):
        if len(x.shape
               ) == 3:  #NEEDED FOR EXAI, NOTICE THAT IT MUST BE ONLY ONE MOL
            data_list = []
            for x_i, edge_index_i in zip(x, edge_index):
                data_list.append(Data(x=x_i, edge_index=edge_index_i))
            data = Batch.from_data_list(data_list).to(self.device)
            x = data.x
            batch = data.batch
            edge_index = data.edge_index
        shape = x.shape
        x = x.reshape(-1, shape[-1])
        x = self.atom_embedding(x)
        x = x.reshape(shape)
        x = x.squeeze(1)

        x = F.relu(self.conv1(x, edge_index))

        x, edge_index, batch, _ = self.pool1(x, edge_index, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index))

        x, edge_index, batch, _ = self.pool2(x, edge_index, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x = x1 + x2
        x = self.linear(x)
        #x = torch.sigmoid(x)
        x = x.squeeze(1)
        return x
예제 #15
0
파일: gnn2.py 프로젝트: vthost/DAGNN
    def forward(self, batched_data):
        x, edge_index, edge_attr, node_depth, batch = batched_data.x, batched_data.edge_index,  batched_data.edge_attr, batched_data.node_depth, batched_data.batch

        x = self.node_encoder(x, node_depth.view(-1,))

        xs = []
        x = F.relu(self.conv1(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch, _, _ = self.pool1(x, edge_index, edge_attr, batch)
        xs += [torch.cat([gmp(x, batch), gap(x, batch)], dim=1)]

        for i in range(self.num_layers-1):
            x = F.relu(self.convs[i](x, edge_index, edge_attr))
            x, edge_index, edge_attr, batch, _, _ = self.pools[i](x, edge_index, edge_attr, batch)
            xs += [torch.cat([gmp(x, batch), gap(x, batch)], dim=1)]

        x = xs[0]
        for i in range(1, len(xs)):
            x += xs[i]

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.lin2(x))

        pred_list = []
        for i in range(self.max_seq_len):
            pred_list.append(self.graph_pred_linear_list[i](x))

        return pred_list
예제 #16
0
    def forward(self, x, edge_index, batch):
        # x, edge_index, batch = data.x, data.edge_index, data.batch

        x = F.relu(self.conv1(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv4(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool4(x, edge_index, None, batch)
        x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv5(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool5(x, edge_index, None, batch)
        x5 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3 + x4 + x5

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.softmax(self.lin3(x), dim=-1)

        return x
예제 #17
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        x = F.relu(self.conv1(x, edge_index))
        # x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch)
        # x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x1 = gmp(x, batch)

        x = F.relu(self.conv2(x, edge_index))
        # x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)
        # x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x2 = gmp(x, batch)

        x = F.relu(self.conv3(x, edge_index))
        # x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)
        # x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x3 = gmp(x, batch)

        x = x1 + x2 + x3

        # x = F.relu(self.lin1(x))
        # x = F.dropout(x, p=0.5, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = F.log_softmax(self.lin3(x), dim=-1)

        return x
    def forward(self, data):
        x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
        x = self.embedding_layer(x)
        # edge_attr = self.embedding_layer(edge_attr)

        x = F.relu(self.conv1(x, edge_index))
        x_local = self.l1(x)
        x, edge_index, edge_attr, batch, _, _ = self.pool1(x, edge_index, edge_attr, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch, _, _ = self.pool2(x, edge_index, edge_attr, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))
        x, edge_index, edge_attr, batch, _, _ = self.pool3(x, edge_index, edge_attr, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv4(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch, _, _ = self.pool4(x, edge_index, edge_attr, batch)
        x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

       # x = x1 + x2 + x3 + x4
        x_global = (x1 + x2 + x3 + x4) 
        x = torch.cat((x_local, x_global.repeat(x_local.shape[0], 1)), dim=1)

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.3, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.log_softmax(self.lin3(x), dim=-1)
        # print (x.shape)
        return x
예제 #19
0
    def forward(self, x, edge_index, batch, edge_attr):
        edge_attr = edge_attr.squeeze()
        edge_index, edge_attr = self.augment_adj(edge_index, edge_attr,
                                                 x.size(0))
        x = self.conv1(x, edge_index, edge_attr)
        x, edge_index, edge_attr, batch, perm, score1 = self.pool1(
            x, edge_index, edge_attr, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        edge_attr = edge_attr.squeeze()
        edge_index, edge_attr = self.augment_adj(edge_index, edge_attr,
                                                 x.size(0))
        x = self.conv2(x, edge_index, edge_attr)
        x, edge_index, edge_attr, batch, perm, score2 = self.pool2(
            x, edge_index, edge_attr, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = torch.cat([x1, x2], dim=1)

        x = F.relu(self.bn4(self.fc1(x)))
        x = F.dropout(x, p=0.5, training=self.training)
        x = F.relu(self.bn5(self.fc2(x)))
        x = F.dropout(x, p=0.5, training=self.training)
        x = F.log_softmax(self.fc3(x), dim=-1)

        return x, score1, score2
예제 #20
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = self.item_embedding(x)
        x = x.squeeze(1)        

        x = F.relu(self.conv1(x, edge_index))

        x, edge_index, _, batch, _ = self.pool1(x, edge_index, None, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        x = F.relu(self.conv2(x, edge_index))
     
        x, edge_index, _, batch, _ = self.pool2(x, edge_index, None, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))

        x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3

        x = self.lin1(x)
        x = self.act1(x)
        x = self.lin2(x)
        x = self.act2(x)      
        x = F.dropout(x, p=0.5, training=self.training)

        x = torch.sigmoid(self.lin3(x)).squeeze(1)

        return x
    def forward(self, data):
        x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
        x = self.embedding_layer(x)
        edge_attr = self.embedding_layer(edge_attr)

        x = F.relu(self.conv1(x, edge_index, edge_weight=edge_attr))
        x, edge_index, edge_attr, batch, _, _ = self.pool1(
            x, edge_index, edge_attr, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index, edge_weight=edge_attr))
        x, edge_index, edge_attr, batch, _, _ = self.pool2(
            x, edge_index, edge_attr, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index, edge_weight=edge_attr))
        x, edge_index, edge_attr, batch, _, _ = self.pool3(
            x, edge_index, edge_attr, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv4(x, edge_index, edge_weight=edge_attr))
        x, edge_index, edge_attr, batch, _, _ = self.pool4(
            x, edge_index, edge_attr, batch)
        x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3 + x4

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.3, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.log_softmax(self.lin3(x), dim=-1)

        return x
예제 #22
0
	def forward(self, data):
		x, edge_index, batch, weight = data.x, data.edge_index, data.batch, data.weight

		x = self.gbn1(F.relu(self.conv1(x, edge_index, weight)))
		x, edge_index, weight, batch, _, _ = self.pool1(x,edge_index,weight,batch)
		x1 = torch.cat([gmp(x,batch), gap(x,batch)],dim =1)

		x = self.gbn2(F.relu(self.conv2(x, edge_index, weight)))
		x, edge_index, weight, batch, _, _ = self.pool2(x, edge_index,weight, batch)
		x2 = torch.cat([gmp(x,batch), gap(x,batch)], dim=1)

		x = self.gbn3(F.relu(self.conv3(x, edge_index, weight)))
		x, edge_index, weight, batch, _, _ = self.pool3(x, edge_index,weight , batch)
		x3 = torch.cat([gmp(x,batch), gap(x,batch)], dim=1)

		x = self.gbn4(F.relu(self.conv3(x, edge_index, weight)))
		x, edge_index, _, batch, _, _ = self.pool4(x, edge_index,weight , batch)
		x4 = torch.cat([gmp(x,batch), gap(x,batch)], dim=1)

		x = torch.cat([x1, x2, x3, x4], dim=1)

		
		# x = self.set2set(x, batch)
		x = F.relu(self.lin1(x))
		x = self.bn1(x)
		x = F.dropout(x, p=.2, training = self.training)
		
		x = F.relu(self.lin2(x))
		x = self.bn2(x)
		#x = F.dropout(x, p=.5, training = self.training)
		x = self.lin3(x)
		return x.view(-1)
예제 #23
0
파일: topkpool.py 프로젝트: zhuyawen/AutoGL
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        graph_feature = data.gf

        x = F.relu(self.conv1(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index))
        x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = x1 + x2 + x3
        x = torch.cat([x, graph_feature], dim=-1)
        x = self.lin1(x)
        x = activate_func(x, self.args["act"])
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.lin2(x)
        x = activate_func(x, self.args["act"])
        x = F.log_softmax(self.lin3(x), dim=-1)

        return x
예제 #24
0
    def forward(self, batch):
        x = batch.x
        edge_index = batch.edge_index
        batch_h = batch.batch

        x = F.relu(self.conv_layer_1(x, edge_index))
        x, edge_index, _, batch_h, _ = self.pool_layer_1(
            x, edge_index, None, batch_h)
        out = torch.cat([gmp(x, batch_h), gap(x, batch_h)], dim=1)

        x = F.relu(self.conv_layer_2(x, edge_index))
        x, edge_index, _, batch_h, _ = self.pool_layer_2(
            x, edge_index, None, batch_h)
        out += torch.cat([gmp(x, batch_h), gap(x, batch_h)], dim=1)

        x = F.relu(self.conv_layer_3(x, edge_index))
        x, edge_index, _, batch_h, _ = self.pool_layer_3(
            x, edge_index, None, batch_h)
        out += torch.cat([gmp(x, batch_h), gap(x, batch_h)], dim=1)

        out = F.relu(self.lin_layer_1(out))
        out = F.dropout(out, p=self.dropout, training=self.training)
        out = F.relu(self.lin_layer_2(out))
        out = F.log_softmax(self.lin_layer_3(out), dim=-1)
        if batch.y is not None:
            loss = F.nll_loss(out, batch.y)
            return out, loss
        return out, None
예제 #25
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        edge_attr = None

        x = F.relu(self.conv1(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch, _ = self.pool1(x, edge_index, edge_attr, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv2(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index, edge_attr))
        x, edge_index, edge_attr, batch, _ = self.pool3(x, edge_index, edge_attr, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(x1) + F.relu(x2) + F.relu(x3)

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.log_softmax(self.lin3(x), dim=-1)

        return x
예제 #26
0
    def forward(self, data):
        x, batch = data.x, data.batch
        edge_index = knn_graph(x, 100, batch)  #?
        edge_index, _ = dropout_adj(edge_index, p=0.3)  #?
        batch = data.batch

        x = F.leaky_relu(self.conv1(x, edge_index))
        x1 = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)
        convlist = [x1]

        for f in range(self.conv_depth - 1):
            x = F.leaky_relu(self.convfkt[f](x, edge_index))
            xi = torch.cat([gap(x, batch), gmp(x, batch)], dim=1)
            convlist.append(xi)

        x = torch.cat(convlist, dim=1)

        x = self.batchnorm1(x)
        for g in range(self.lin_depth):
            x = F.leaky_relu(self.linearfkt[g](x))
            if (
                    g - 1
            ) % 3 == 0 and self.lin_depth - 1 > g:  #g=1,4,7,... u. noch mind. zwei weitere Layers
                x = self.drop[g](x)

        x = self.out(x)
        if self.classification:
            x = torch.sigmoid(x)
        x = x.view(-1)

        return x
예제 #27
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        edge_attr = None
        if x is None:
            x = torch.ones((batch.shape[0], 1)).to(device)
        xs = []
        batches = []
        
        x = F.relu(self.conv1(x, edge_index, edge_attr))
        xs.append(x)
        batches.append(batch)
        x, edge_index, edge_attr, batch = self.pool1(x, edge_index, edge_attr, batch)
        xs.append(x)
        batches.append(batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        
        x = F.relu(self.conv2(x, edge_index, edge_attr))
        xs.append(x)
        batches.append(batch)
        x, edge_index, edge_attr, batch = self.pool2(x, edge_index, edge_attr, batch)
        xs.append(x)
        batches.append(batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = F.relu(self.conv3(x, edge_index, edge_attr))
        xs.append(x)
        batches.append(batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        summary = F.relu(x1) + F.relu(x2) + F.relu(x3)
        xs = [xs[i] for i in self.args.takeout]
        batches = [batches[i] for i in self.args.takeout]
        return summary, xs, batches
예제 #28
0
    def forward(self, data):
        x, edge_index, batch = data
        edge_attr = None
        edge_index = edge_index.transpose(0, 1)

        x = self.relu(self.conv1(x, edge_index, edge_attr), negative_slope=0.1)
        x, edge_index, edge_attr, batch, _, _ = self.pool1(
            x, edge_index, None, batch)
        # x, edge_index, edge_attr, batch, _ = self.pool1(x, edge_index, edge_attr, batch)
        x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = self.relu(self.conv2(x, edge_index, edge_attr), negative_slope=0.1)
        x, edge_index, edge_attr, batch, _, _ = self.pool2(
            x, edge_index, None, batch)
        # x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch)
        x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        #

        x = self.relu(self.conv3(x, edge_index, edge_attr), negative_slope=0.1)
        x, edge_index, edge_attr, batch, _, _ = self.pool3(
            x, edge_index, None, batch)
        # x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch)
        x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        # x = self.relu(self.conv4(x, edge_index, edge_attr), negative_slope=0.1)
        # x, edge_index, edge_attr, batch, _, _ = self.pool4(x, edge_index, None, batch)
        # x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch)
        # x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        # x = self.relu(self.conv5(x, edge_index, edge_attr),negative_slope=0.1)
        # x, edge_index, edge_attr, batch, _, _ = self.pool5(x, edge_index, None, batch)
        # x, edge_index, edge_attr, batch, _ = self.pool3(x, edge_index, edge_attr, batch)

        x_information_score = self.calc_information_score(x, edge_index)
        score = torch.sum(torch.abs(x_information_score), dim=1)

        # x5 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)

        x = self.relu(x1, negative_slope=0.1) + self.relu(
            x2, negative_slope=0.1) + self.relu(x3, negative_slope=0.1)

        # x = self.relu(x1,negative_slope=0.1) + self.relu(x2,negative_slope=0.1)
        #     + self.relu(x3,negative_slope=0.1)+self.relu(x4,negative_slope=0.1)+self.relu(x5,negative_slope=0.1)
        # x = F.relu(x1)
        graph_emb = x
        # x = self.lin1(x)
        x = self.relu(self.lin1(x), negative_slope=0.1)
        # x=self.bn1(x)
        # x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = self.relu(self.lin2(x), negative_slope=0.1)
        # x=self.bn2(x)

        # x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = self.lin3(x)

        # x = F.log_softmax(x, dim=-1)

        return x, score.mean(), graph_emb
예제 #29
0
    def forward(self, data, conv_train=False):

        x = data.x

        edge_index = data.edge_index
        x1 = self.norm1(self.act1(self.conv1(x, edge_index)))
        x = self.dropout(x1)

        x2 = self.norm2(self.act2(self.conv2(x, edge_index)))
        x = self.dropout(x2)

        x3 = self.norm3(self.act3(self.conv3(x, edge_index)))

        h_conv = torch.cat([x1, x2, x3], dim=1)

        #compute GNN only output

        conv_batch_avg = gap(h_conv, data.batch)
        conv_batch_add = gadd(h_conv, data.batch)
        conv_batch_max = gmp(h_conv, data.batch)

        h_GNN = torch.cat([conv_batch_avg, conv_batch_add, conv_batch_max],
                          dim=1)

        gnn_out = self.out_fun(self.lin_GNN(h_GNN))

        if conv_train:
            return None, None, gnn_out

        #SOM
        _, _, som_out_1 = self.som1(x1)
        _, _, som_out_2 = self.som2(x2)
        _, _, som_out_3 = self.som3(x3)

        #READOUT
        h1 = self.out_norm1(self.act1(self.out_conv1(som_out_1, edge_index)))
        h2 = self.out_norm2(self.act2(self.out_conv2(som_out_2, edge_index)))
        h3 = self.out_norm3(self.act3(self.out_conv3(som_out_3, edge_index)))

        som_out_conv = torch.cat([h1, h2, h3], dim=1)

        som_batch_avg = gap(som_out_conv, data.batch)
        som_batch_add = gadd(som_out_conv, data.batch)
        som_batch_max = gmp(som_out_conv, data.batch)

        h = torch.cat([som_batch_avg, som_batch_add, som_batch_max], dim=1)

        h = self.out_norm4(h)

        h = self.out_act(self.lin_out1(h))
        h = self.dropout(h)

        h = self.out_act(self.lin_out2(h))
        h = self.dropout(h)

        h = self.out_fun(self.lin_out3(h))

        return h, h_conv, gnn_out
예제 #30
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        x1 = F.relu(self.conv1(x, edge_index))

        x2 = F.relu(self.conv2(x1, edge_index))
        x2 = x1 + x2

        x3 = F.relu(self.conv3(x2, edge_index))
        x3 = x2 + x3

        x = F.relu(self.conv4(x3, edge_index))
        x, edge_index, _, batch, _ = self.pool1(x, edge_index, None, batch)
        x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        #x4 = x3 + x4

        x5 = F.relu(self.conv5(x, edge_index))
        x5 = x + x5

        x6 = F.relu(self.conv6(x5, edge_index))
        x6 = x5 + x6

        x7 = F.relu(self.conv7(x6, edge_index))
        x7 = x6 + x7

        x8 = F.relu(self.conv8(x7, edge_index))
        x8 = x7 + x8

        x = F.relu(self.conv9(x8, edge_index))
        x, edge_index, _, batch, _ = self.pool2(x, edge_index, None, batch)
        x9 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        #x9 = x8 + x9

        x10 = F.relu(self.conv10(x, edge_index))
        x10 = x + x10

        x11 = F.relu(self.conv11(x10, edge_index))
        x11 = x10 + x11

        x12 = F.relu(self.conv12(x11, edge_index))
        x12 = x11 + x12

        x13 = F.relu(self.conv13(x12, edge_index))
        x13 = x12 + x13

        x = F.relu(self.conv14(x13, edge_index))
        x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch)
        x14 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        #x14 = x13 + x14

        x = x4 + x9 + x14

        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        x = F.relu(self.lin2(x))
        x = F.log_softmax(self.lin3(x), dim=-1)

        return x