def forward(self, data): # x has shape [N, in_channels] # edge_index has shape [2, E] # pdb.set_trace() x = data.x edge_index = data.edge_index batch = data.batch edge_attr = data.edge_attr x = F.relu(self.conv1(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool1(x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool2(x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool3(x, edge_index, edge_attr, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 x = F.relu(self.lin1(x)) x = self.lin2(x) x = x.reshape(-1) return x
def forward(self, x, edge_index, batch, inference = False): x = self.item_embedding(x) x = x.squeeze(1) x = F.leaky_relu(self.conv1(x, edge_index)) x, edge_index, _, batch, _, _= self.pool1(x, edge_index, None, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim = 1) x = F.leaky_relu(self.conv2(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim = 1) x = F.leaky_relu(self.conv3(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim = 1) x = x1 + x2 + x3 x = self.lin1(x) x = self.bn1(self.act1(x)) x = self.lin2(x) x = self.bn2(self.act2(x)) x = F.dropout(x, p = 0.5, training = self.training) x = self.lin3(x).squeeze(1) if inference: x_out = x1 + x2 + x3 return x, x_out else: return x
def forward(self, x, edge_index, batch): # x, edge_index, batch = data.x, data.edge_index, data.batch x = F.relu(self.conv1(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv4(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool4(x, edge_index, None, batch) x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv5(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool5(x, edge_index, None, batch) x5 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 + x4 + x5 x = F.relu(self.lin1(x)) x = F.dropout(x, p=0.5, training=self.training) x = F.relu(self.lin2(x)) x = F.log_softmax(self.lin3(x), dim=-1) return x
def forward(self, x, edge_index, batch, edge_attr): edge_attr = edge_attr.squeeze() edge_index, edge_attr = self.augment_adj(edge_index, edge_attr, x.size(0)) x = self.conv1(x, edge_index, edge_attr) x, edge_index, edge_attr, batch, perm, score1 = self.pool1( x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) edge_attr = edge_attr.squeeze() edge_index, edge_attr = self.augment_adj(edge_index, edge_attr, x.size(0)) x = self.conv2(x, edge_index, edge_attr) x, edge_index, edge_attr, batch, perm, score2 = self.pool2( x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = torch.cat([x1, x2], dim=1) x = F.relu(self.bn4(self.fc1(x))) x = F.dropout(x, p=0.5, training=self.training) x = F.relu(self.bn5(self.fc2(x))) x = F.dropout(x, p=0.5, training=self.training) x = F.log_softmax(self.fc3(x), dim=-1) return x, score1, score2
def forward(self, x, edge_index, batch=None): if len(x.shape ) == 3: #NEEDED FOR EXAI, NOTICE THAT IT MUST BE ONLY ONE MOL data_list = [] for x_i, edge_index_i in zip(x, edge_index): data_list.append(Data(x=x_i, edge_index=edge_index_i)) data = Batch.from_data_list(data_list).to(self.device) x = data.x batch = data.batch edge_index = data.edge_index shape = x.shape x = x.reshape(-1, shape[-1]) x = self.atom_embedding(x) x = x.reshape(shape) x = x.squeeze(1) x = F.relu(self.conv1(x, edge_index)) x, edge_index, batch, _ = self.pool1(x, edge_index, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, batch, _ = self.pool2(x, edge_index, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 x = self.linear(x) #x = torch.sigmoid(x) x = x.squeeze(1) return x
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch edge_attr = None x = F.relu(self.conv1(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch = self.pool1(x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch = self.pool2(x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index, edge_attr)) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(x1) + F.relu(x2) + F.relu(x3) before_fc = x x = F.relu(self.lin1(x)) x = F.dropout(x, p=self.dropout_ratio, training=self.training) x = self.lin2(x) x = F.dropout(x, p=self.dropout_ratio, training=self.training) x_cls = F.relu(self.lin3(before_fc)) x_cls = F.dropout(x_cls, p=self.dropout_ratio, training=self.training) x_cls = self.lin4(x_cls) x_cls = F.dropout(x_cls, p=self.dropout_ratio, training=self.training) x_cls = F.log_softmax(self.lin5(x_cls), dim=-1) return x, x_cls
def forward(self, x, edge_index, batch, edge_attr): # edge_attr = edge_attr.squeeze() # edge_index, edge_attr = self.augment_adj(edge_index, edge_attr, x.size(0)) x = self.conv1(x, edge_index) if x.norm(p=2, dim=-1).min() == 0: print('x is zeros') x, edge_index, edge_attr, batch, perm, score1 = self.pool1( x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) edge_attr = edge_attr.squeeze() edge_index, edge_attr = self.augment_adj(edge_index, edge_attr, x.size(0)) x = self.conv2(x, edge_index) x, edge_index, edge_attr, batch, perm, score2 = self.pool2( x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = torch.cat([x1, x2], dim=1) #concate x = self.bn4(F.relu(self.fc1(x))) x = F.dropout(x, p=0.5, training=self.training) x = self.bn5(F.relu(self.fc2(x))) x = F.dropout(x, p=0.5, training=self.training) x = F.log_softmax(self.fc3(x), dim=-1) return x, score1, score2
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch if isinstance(edge_index, tuple): edge_index = torch.stack(edge_index) edge_attr = None x = F.relu(self.conv1(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch = self.pool1(x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch = self.pool2(x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index, edge_attr)) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(x1) + F.relu(x2) + F.relu(x3) x = F.relu(self.lin1(x)) x = F.dropout(x, p=self.dropout, training=self.training) x = F.relu(self.lin2(x)) x = F.dropout(x, p=self.dropout, training=self.training) pred = self.lin3(x) return pred
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch edge_attr = None x = F.relu(self.conv1(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch = self.pool1(x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch = self.pool2(x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index, edge_attr)) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(x1) + F.relu(x2) + F.relu(x3) x = F.relu(self.lin1(x)) x = F.dropout(x, p=self.dropout, training=self.training) x = F.relu(self.lin2(x)) x = F.dropout(x, p=self.dropout, training=self.training) x = self.lin3(x) pred = F.log_softmax(x, dim=-1) if data.y is not None: loss = F.nll_loss(pred, data.y) return pred, loss return pred, None
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch x = self.item_embedding(x) x = x.squeeze(1) x = F.relu(self.conv1(x, edge_index)) # x, edge_index, _, batch, _, _ = self.pool1(x=x,edge_index=edge_index,batch=batch) x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) # x,edge_index, _, batch, _, _ = self.pool2(x=x,edge_index=edge_index,batch=batch) x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) # x,edge_index, _, batch, _, _ = self.pool3(x=x,edge_index=edge_index,batch=batch) x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 x = self.lin1(x) x = self.act1(x) x = self.lin2(x) x = self.act2(x) x = F.dropout(x, p=0.5, training=self.training) x = torch.sigmoid(self.lin3(x)).squeeze(1) return x
def forward(self, data): x, edge_index_prior, batch, edge_weight_prior = data.x, data.edge_index, \ data.batch, data.edge_attr edge_weight_learn = self.edge_weight_learn edge_index_learn = self.edge_index_learn batch_size = x.shape[0] // self.num_channels _edge_weight_learn = edge_weight_learn for i in range(batch_size - 1): edge_weight_learn = torch.cat( (edge_weight_learn, _edge_weight_learn), dim=0) edge_index_learn = edge_index_learn.repeat(1, batch_size).to(device) # calculate the prior graph representation x_prior = F.relu( self.conv_prior_1(x, edge_index_prior, edge_weight_prior)) x_prior = F.relu( self.conv_prior_2(x_prior, edge_index_prior, edge_weight_prior)) x_prior = gap(x_prior, batch) # calculate the learned graph representation x_learn = F.relu( self.conv_learn_1(x, edge_index_learn, edge_weight_learn)) x_learn = F.relu( self.conv_learn_2(x_learn, edge_index_learn, edge_weight_learn)) x_learn = gap(x_learn, batch) x = x_prior + x_learn # x = torch.cat([x_prior, x_learn], dim=1) x = self.fc(x) return x
def forward(self, batched_data): x, edge_index, edge_attr, node_depth, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.node_depth, batched_data.batch x = self.node_encoder(x, node_depth.view(-1,)) xs = [] x = F.relu(self.conv1(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch, _, _ = self.pool1(x, edge_index, edge_attr, batch) xs += [torch.cat([gmp(x, batch), gap(x, batch)], dim=1)] for i in range(self.num_layers-1): x = F.relu(self.convs[i](x, edge_index, edge_attr)) x, edge_index, edge_attr, batch, _, _ = self.pools[i](x, edge_index, edge_attr, batch) xs += [torch.cat([gmp(x, batch), gap(x, batch)], dim=1)] x = xs[0] for i in range(1, len(xs)): x += xs[i] x = F.relu(self.lin1(x)) x = F.dropout(x, p=self.dropout_ratio, training=self.training) x = F.relu(self.lin2(x)) pred_list = [] for i in range(self.max_seq_len): pred_list.append(self.graph_pred_linear_list[i](x)) return pred_list
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch x1 = F.relu(self.conv1(x, edge_index)) x2 = F.relu(self.conv2(x1, edge_index)) x2 = x1 + x2 x3 = F.relu(self.conv3(x2, edge_index)) x3 = x2 + x3 x = F.relu(self.conv4(x3, edge_index)) x, edge_index, _, batch, _ = self.pool1(x, edge_index, None, batch) x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) #x4 = x3 + x4 x5 = F.relu(self.conv5(x, edge_index)) x5 = x + x5 x6 = F.relu(self.conv6(x5, edge_index)) x6 = x5 + x6 x7 = F.relu(self.conv7(x6, edge_index)) x7 = x6 + x7 x8 = F.relu(self.conv8(x7, edge_index)) x8 = x7 + x8 x = F.relu(self.conv9(x8, edge_index)) x, edge_index, _, batch, _ = self.pool2(x, edge_index, None, batch) x9 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) #x9 = x8 + x9 x10 = F.relu(self.conv10(x, edge_index)) x10 = x + x10 x11 = F.relu(self.conv11(x10, edge_index)) x11 = x10 + x11 x12 = F.relu(self.conv12(x11, edge_index)) x12 = x11 + x12 x13 = F.relu(self.conv13(x12, edge_index)) x13 = x12 + x13 x = F.relu(self.conv14(x13, edge_index)) x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch) x14 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) #x14 = x13 + x14 x = x4 + x9 + x14 x = F.relu(self.lin1(x)) x = F.dropout(x, p=self.dropout_ratio, training=self.training) x = F.relu(self.lin2(x)) x = F.log_softmax(self.lin3(x), dim=-1) return x
def forward(self, data): x, edge_index, batch = data edge_attr = None edge_index = edge_index.transpose(0, 1) x = self.relu(self.conv1(x, edge_index, edge_attr), negative_slope=0.1) x, edge_index, edge_attr, batch, _, _ = self.pool1( x, edge_index, None, batch) # x, edge_index, edge_attr, batch, _ = self.pool1(x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = self.relu(self.conv2(x, edge_index, edge_attr), negative_slope=0.1) x, edge_index, edge_attr, batch, _, _ = self.pool2( x, edge_index, None, batch) # x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) # x = self.relu(self.conv3(x, edge_index, edge_attr), negative_slope=0.1) x, edge_index, edge_attr, batch, _, _ = self.pool3( x, edge_index, None, batch) # x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) # x = self.relu(self.conv4(x, edge_index, edge_attr), negative_slope=0.1) # x, edge_index, edge_attr, batch, _, _ = self.pool4(x, edge_index, None, batch) # x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch) # x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) # x = self.relu(self.conv5(x, edge_index, edge_attr),negative_slope=0.1) # x, edge_index, edge_attr, batch, _, _ = self.pool5(x, edge_index, None, batch) # x, edge_index, edge_attr, batch, _ = self.pool3(x, edge_index, edge_attr, batch) x_information_score = self.calc_information_score(x, edge_index) score = torch.sum(torch.abs(x_information_score), dim=1) # x5 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = self.relu(x1, negative_slope=0.1) + self.relu( x2, negative_slope=0.1) + self.relu(x3, negative_slope=0.1) # x = self.relu(x1,negative_slope=0.1) + self.relu(x2,negative_slope=0.1) # + self.relu(x3,negative_slope=0.1)+self.relu(x4,negative_slope=0.1)+self.relu(x5,negative_slope=0.1) # x = F.relu(x1) graph_emb = x # x = self.lin1(x) x = self.relu(self.lin1(x), negative_slope=0.1) # x=self.bn1(x) # x = F.dropout(x, p=self.dropout_ratio, training=self.training) x = self.relu(self.lin2(x), negative_slope=0.1) # x=self.bn2(x) # x = F.dropout(x, p=self.dropout_ratio, training=self.training) x = self.lin3(x) # x = F.log_softmax(x, dim=-1) return x, score.mean(), graph_emb
def forward(self, data, conv_train=False): x = data.x edge_index = data.edge_index x1 = self.norm1(self.act1(self.conv1(x, edge_index))) x = self.dropout(x1) x2 = self.norm2(self.act2(self.conv2(x, edge_index))) x = self.dropout(x2) x3 = self.norm3(self.act3(self.conv3(x, edge_index))) h_conv = torch.cat([x1, x2, x3], dim=1) #compute GNN only output conv_batch_avg = gap(h_conv, data.batch) conv_batch_add = gadd(h_conv, data.batch) conv_batch_max = gmp(h_conv, data.batch) h_GNN = torch.cat([conv_batch_avg, conv_batch_add, conv_batch_max], dim=1) gnn_out = self.out_fun(self.lin_GNN(h_GNN)) if conv_train: return None, None, gnn_out #SOM _, _, som_out_1 = self.som1(x1) _, _, som_out_2 = self.som2(x2) _, _, som_out_3 = self.som3(x3) #READOUT h1 = self.out_norm1(self.act1(self.out_conv1(som_out_1, edge_index))) h2 = self.out_norm2(self.act2(self.out_conv2(som_out_2, edge_index))) h3 = self.out_norm3(self.act3(self.out_conv3(som_out_3, edge_index))) som_out_conv = torch.cat([h1, h2, h3], dim=1) som_batch_avg = gap(som_out_conv, data.batch) som_batch_add = gadd(som_out_conv, data.batch) som_batch_max = gmp(som_out_conv, data.batch) h = torch.cat([som_batch_avg, som_batch_add, som_batch_max], dim=1) h = self.out_norm4(h) h = self.out_act(self.lin_out1(h)) h = self.dropout(h) h = self.out_act(self.lin_out2(h)) h = self.dropout(h) h = self.out_fun(self.lin_out3(h)) return h, h_conv, gnn_out
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch # print('x =', x) # print('====================') item_id = x[:, :, 0] category = x[:, :, 1] # print("item_id: ", item_id) # print("category: \n", category) # print('====================') emb_item = self.item_embedding(item_id).squeeze(1) emb_category = self.category_embedding(category).squeeze(1) # emb_item = emb_item.squeeze(1) # emb_cat x = torch.cat([emb_item, emb_category], dim=1) # print(x.shape) x = F.relu(self.conv1(x, edge_index)) # print(x.shape) x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 x = self.lin1(x) x = self.act1(x) x = self.lin2(x) x = F.dropout(x, p=0.5, training=self.training) x = self.act2(x) outputs = [] for i in range(x.size(0)): output = torch.matmul(emb_item[data.batch == i], x[i, :]) outputs.append(output) x = torch.cat(outputs, dim=0) x = torch.sigmoid(x) # save # savePath = "./output/test_model.pth" # torch.save(model.state_dict(), savePath) return x
def forward(self, data): # I put the edge weight at the position for edge attribute such that the weight can also be masked. modular_data, ddi_edge_index, ddi_edge_attr = data modular_output = [] ids = list(modular_data.keys()) for modular_id in ids: x, edge_index, edge_weight, batch = modular_data[modular_id] x = x.to(self.args.device) edge_index = edge_index.to(self.args.device) edge_weight = edge_weight.to(self.args.device) batch = batch.to(self.args.device) x = F.relu(self.conv1(x, edge_index, edge_weight)) batch = batch.long() x, edge_index, edge_weight, batch, _, _ = self.pool1( x, edge_index, edge_weight, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index, edge_weight)) x, edge_index, edge_weight, batch, _, _ = self.pool2( x, edge_index, edge_weight, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index, edge_weight)) x, edge_index, edge_weight, batch, _, _ = self.pool3( x, edge_index, edge_weight, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) out_x = torch.cat((x1, x2, x3), dim=1) modular_output.append(out_x) modular_feature = torch.cat(tuple(modular_output)) modular_feature = nn.Dropout(self.args.dropout_ratio)(modular_feature) # x = F.relu(self.conv4(modular_feature, ddi_edge_index, ddi_edge_attr)) x = F.relu(self.conv_noattn(modular_feature, ddi_edge_index)) pos_source, pos_target, neg_source, neg_target = self.feature_split( x, ddi_edge_index) pos_feat_x = F.sigmoid(self.lin1(pos_source)) pos_feat_y = F.sigmoid(self.lin2(pos_target)) neg_feat_x = F.sigmoid(self.lin1(neg_source)) neg_feat_y = F.sigmoid(self.lin2(neg_target)) pos_attr = F.sigmoid(self.lin3(ddi_edge_attr[0:self.args.batch_size])) neg_attr = F.sigmoid(self.lin3(ddi_edge_attr[self.args.batch_size:])) loss_pos_vec = pos_feat_x + pos_attr - pos_feat_y loss_neg_vec = neg_feat_x + neg_attr - neg_feat_y norm_pos = torch.norm(loss_pos_vec, p=2, dim=1) norm_neg = torch.norm(loss_neg_vec, p=2, dim=1) loss = 2 * self.ddi_nhid - torch.norm( loss_pos_vec, p=2, dim=1) + self.args.neg_decay * torch.norm(loss_neg_vec, p=2, dim=1) return loss, norm_pos, norm_neg, pos_feat_x
def forward(self, x, batch, c_size): if self.cfg == 'ATT': x_sum = gap(x, batch) c = torch.tanh(torch.matmul(x_sum, self.W)) # context vector c = torch.cat( [p.repeat(c_size[idx], 1) for idx, p in enumerate(c)], dim=0) alpha = torch.sigmoid((x * c).sum(dim=1).view(-1, 1)) return global_add_pool(x * alpha, batch) elif self.cfg == 'AVG': return gap(x, batch)
def forward(self, x, edge_index, batch): # x, edge_index, batch = data.x, data.edge_index, data.batch x = F.relu(self.conv1(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv4(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool4(x, edge_index, None, batch) x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv5(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool5(x, edge_index, None, batch) x5 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 + x4 + x5 x = 1/5 * x sx = x x = x.unsqueeze(dim=2) # attention层 x = F.relu(self.convAtt1(x)) x = self.poolAtt1(x) x = F.relu(self.convAtt2(x)) x = self.poolAtt2(x) x = F.relu(self.convAtt3(x)) x = self.poolAtt3(x) x = F.relu(self.convAtt4(x)) x = self.poolAtt4(x) x = F.relu(self.convAtt5(x)) x = self.poolAtt5(x) x = F.relu(self.convAtt6(x)) x = self.poolAtt6(x) x = x.squeeze() x = (x + 1) * sx x = F.relu(self.lin1(x)) x = F.dropout(x, p=0.5, training=self.training) x = F.relu(self.lin2(x)) x = F.log_softmax(self.lin3(x), dim=-1) return x
def forward(self, data): # I put the edge weight at the position for edge attribute such that the weight can also be masked. modular_data, ddi_edge_index, neg_edge_index, ddi_edge_attr, neg_edge_attr = data modular_output = [] ids = list(modular_data.keys()) for modular_id in ids: x, edge_index, edge_weight, batch = modular_data[modular_id] x = x.to(self.args.device) edge_index = edge_index.to(self.args.device) edge_weight = edge_weight.to(self.args.device) batch = batch.to(self.args.device) x = F.relu(self.conv1(x, edge_index, edge_weight)) batch = batch.long() x, edge_index, edge_weight, batch, _, _ = self.pool1( x, edge_index, edge_weight, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index, edge_weight)) x, edge_index, edge_weight, batch, _, _ = self.pool2( x, edge_index, edge_weight, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index, edge_weight)) x, edge_index, edge_weight, batch, _, _ = self.pool3( x, edge_index, edge_weight, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) out_x = torch.cat((x1, x2, x3), dim=1) modular_output.append(out_x) modular_feature = torch.cat(tuple(modular_output)) modular_feature = nn.Dropout(self.args.dropout_ratio)(modular_feature) x = F.relu(self.conv4(modular_feature, ddi_edge_index, ddi_edge_attr)) pos_source, pos_target, neg_source, neg_target = self.feature_split( x, ddi_edge_index, neg_edge_index) # sigmoid or softmax or nothing, add relu pos_feat_x = self.lin1(pos_source) pos_feat_y = self.lin2(pos_target) neg_feat_x = self.lin1(neg_source) neg_feat_y = self.lin2(neg_target) pos_attr = self.lin3(ddi_edge_attr) neg_attr = self.lin3(neg_edge_attr) norm_pos, norm_neg = self.xent_loss(pos_feat_x, pos_feat_y, neg_feat_x, neg_feat_y) pos_tgt = torch.ones_like(norm_pos) neg_tgt = torch.zeros_like(norm_neg) # loss = nn.BCEWithLogitsLoss()( norm_pos, pos_tgt) + nn.BCEWithLogitsLoss()(norm_neg, neg_tgt) return loss, norm_pos, norm_neg, pos_feat_x
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch x = F.relu(self.input_GCL(x, edge_index)) x, edge_index, _, batch, _, _ = self.input_GPL(x, edge_index, None, batch) # (batch size, hidden) out = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) for i in range(self.layer_num - 1): x = F.relu(getattr(self, f"hidden_GCL{i}")(x, edge_index)) x, edge_index, _, batch, _, _ = getattr(self, f"hidden_GPL{i}")( x, edge_index, None, batch) out += torch.cat([gmp(x, batch), gap(x, batch)], dim=1) return out
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch if(self.training == True): mask, torchmask = random_drop_node(self.node_per_graph, (int)(batch.size()[0] / self.node_per_graph), 0.5, 0.5) x = x[mask] batch = batch[mask] edge_index, _ = subgraph(torchmask, edge_index, relabel_nodes = True) x0 = self.linprev(x, edge_index) x1 = F.relu(self.conv1(x0, edge_index)) x1 = self.bn1(x1) x2 = F.relu(self.conv2(x1, edge_index)) x2 = self.bn2(x2) x3 = F.relu(self.conv3(x2, edge_index)) x3 = self.bn3(x3) x4 = F.relu(self.conv4(x3, edge_index)) x4 = self.bn4(x4) out = torch.cat([x0,x1,x2,x3,x4], dim=1) out = torch.cat([gmp(out, batch), gap(out, batch)], dim = 1) out = self.mlp(out) out = F.log_softmax(out, dim=-1) return out
def forward(self, data): x, batch = data.x, data.batch edge_index = knn_graph(x, 100, batch) #? edge_index, _ = dropout_adj(edge_index, p=0.3) #? batch = data.batch y=data.x y=self.point1(y, edge_index) #dim=n_intermediate pointlist=[y] for f in range(self.point_depth-1): y=self.pointfkt[f](y, edge_index) pointlist.append(y) y=torch.cat(pointlist, dim=1) #dim=n_intermediate*point_depth y = torch.cat([gap(y, batch), gmp(y, batch)], dim=1) x = self.batchnorm1(y) for g in range(self.lin_depth): x=F.leaky_relu(self.linearfkt[g](x)) if (g-1)%3==0 and self.lin_depth-1>g: #g=1,4,7,... u. noch mind. zwei weitere Layers x = self.drop[g](x) x = self.out(x) if self.classification: x = torch.sigmoid(x) x = x.view(-1) return x
def forward_single(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch target = data.target # print('x shape = ', x.shape) x = self.conv1(x, edge_index) x = self.relu(x) x = self.conv2(x, edge_index) x = self.relu(x) # apply global max pooling (gmp) and global mean pooling (gap) x = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = self.relu(self.fc_g1(x)) x = self.dropout(x) x = self.fc_g2(x) embedded_xt = self.embedding_xt(target) conv_xt = self.conv_xt_1(embedded_xt) # flatten xt = conv_xt.view(-1, 32 * 121) xt = self.fc1_xt(xt) # concat xc = torch.cat((x, xt), 1) # add some dense layers xc = self.fc1(xc) xc = self.relu(xc) xc = self.dropout(xc) xc = self.fc2(xc) xc = self.relu(xc) xc = self.dropout(xc) out = self.out(xc) return out
def forward(self, data, hidden_layer_aggregator=None): X = data.x k = self.max_k #compute Laplacian L_edge_index, L_values = get_laplacian(data.edge_index, normalization="sym") L = torch.sparse.FloatTensor(L_edge_index, L_values, torch.Size([X.shape[0], X.shape[0]])).to_dense() H = [X] for i in range(k - 1): xhi_layer_i = torch.mm(torch.matrix_power(L, i + 1), X) H.append(xhi_layer_i) H = self.lin(torch.cat(H, dim=1), self.xhi_layer_mask) H = self.reservoir_act_fun(H) H = self.bn_hidden_rec(H) H_avg = gap(H, data.batch) H_add = gadd(H, data.batch) H_max = gmp(H, data.batch) H = torch.cat([H_avg, H_add, H_max], dim=1) if self.output == "funnel" or self.output is None: return self.funnel_output(H) elif self.output == "one_layer": return self.one_layer_out(H) elif self.output == "restricted_funnel": return self.restricted_funnel_output(H) else: assert False, "error in output stage"
def forward(self, data): """tbd.""" # Get graph input x, edge_index, batch = data.x, data.edge_index, data.batch # Get protein input target = data.target x = self.conv1(x, edge_index) x = self.relu(x) x = self.conv2(x, edge_index) x = self.relu(x) # Apply global max pooling (gmp) and global mean pooling (gap) x = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = self.relu(self.fc_g1(x)) x = self.dropout(x) x = self.fc_g2(x) embedded_xt = self.embedding_xt(target) conv_xt = self.conv_xt_1(embedded_xt) # Flatten xt = conv_xt.view(-1, 32 * 121) xt = self.fc1_xt(xt) # Concat xc = torch.cat((x, xt), 1) # Add some dense layers xc = self.fc1(xc) xc = self.relu(xc) xc = self.dropout(xc) xc = self.fc2(xc) xc = self.relu(xc) xc = self.dropout(xc) out = self.out(xc) return out
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch item_id = x[:, :, 0] category = x[:, :, 1] emb_item = self.item_embedding(item_id).squeeze(1) emb_category = self.category_embedding(category).squeeze(1) # emb_item = emb_item.squeeze(1) # emb_cat x = torch.cat([emb_item, emb_category], dim=1) # print(x.shape) x = F.relu(self.conv1(x, edge_index)) # print(x.shape) x, edge_index, _, batch, _ = self.pool1(x, edge_index, None, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, _, batch, _ = self.pool2(x, edge_index, None, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 x = self.lin1(x) x = self.act1(x) x = self.lin2(x) x = F.dropout(x, p=0.5, training=self.training) x = self.act2(x) outputs = [] for i in range(x.size(0)): output = torch.matmul(emb_item[data.batch == i], x[i, :]) outputs.append(output) x = torch.cat(outputs, dim=0) x = torch.sigmoid(x) return x
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch x = self.bn1(x) x = F.relu(self.bn2(self.conv1(x, edge_index))) x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch) x = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = self.lin1(x) return x
def forward(self, data): #print('net 2 forward method called with parameters ' +str(data)) #again, all of it from https://github.com/rusty1s/pytorch_geometric/blob/master/examples/enzymes_topk_pool.py x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch x = F.relu(self.conv1(x, edge_index, edge_attr)) #x=F.relu(self.conv1(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool1( x, edge_index, edge_attr, batch) #x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, batch) #x, edge_index, edge_attr, batch, _, _ = self.pool1(x, edge_index, edge_attr, batch) #x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) #print('batch size x1 :' + str(batch.shape)) #print('x1 shape : ' + str(x1.shape)) x = F.relu(self.conv2(x, edge_index, edge_attr)) x, edge_index, edge_attr, batch, _, _ = self.pool2( x, edge_index, edge_attr, batch) #x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) """ #print('batch size x2 :' + str(batch.shape)) #print('x2 shape : ' + str(x2.shape)) x = F.relu(self.conv3(x, edge_index)) x, edge_index, _, batch, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)""" """ #print('batch size x3 :' + str(batch.shape)) #print('x3 shape : ' + str(x3.shape)) x = x1 + x2 + x3""" x = x1 + x2 x = F.relu(self.lin1(x)) #print('x after lin1 : ' + str(x.shape)) x = F.dropout(x, p=0.5, training=self.training) x = F.relu(self.lin2(x)) #print('x after lin2 : ' + str(x.shape)) x = F.log_softmax(self.lin3(x), dim=-1) #print('x after softmax : ' + str(x.shape)) return x
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch x = self.conv1(x, edge_index) x = self.conv2(x, edge_index) x = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) # x = torch.sigmoid(self.fc(x)).squeeze(1) x = self.fc(x) return x