def get_skipgram_pred( self, overlapped_rep, overlapped_context_size, neighbor_rep ): expanded_neighbor_rep = torch.cat( [ neighbor_rep[i].repeat(overlapped_context_size[i], 1) for i in range(len(neighbor_rep)) ], dim=0 ) assert overlapped_rep.shape == expanded_neighbor_rep.shape pos_scores = torch.sum(expanded_neighbor_rep * overlapped_rep, dim=1) batch_size = neighbor_rep.size(0) neg_scores = [] for i in range(self.negative_samples): neg_neighbor_rep = neighbor_rep[cycle_index(batch_size, i + 1)] expanded_neg_neighbor_rep = torch.cat( [ neg_neighbor_rep[i].repeat(overlapped_context_size[k], 1) for k in range(len(neg_neighbor_rep)) ], dim=0 ) neg_scores.append( torch.sum(expanded_neg_neighbor_rep * overlapped_rep, dim=1) ) neg_scores = torch.cat(neg_scores) return pos_scores, neg_scores
def get_cbow_pred( self, overlapped_rep, overlapped_context, neighbor_rep ): if self.context_pooling == "sum": context_rep = batch_sum_pooling(overlapped_rep, overlapped_context) elif self.context_pooling == "mean": context_rep = batch_mean_pooling(overlapped_rep, overlapped_context) else: raise NotImplementedError batch_size = context_rep.size(0) neg_context_rep = torch.cat( [ context_rep[cycle_index(batch_size, i + 1)] for i in range(self.negative_samples) ], dim=0 ) pos_scores = torch.sum(neighbor_rep * context_rep, dim=1) neg_scores = torch.sum( neighbor_rep.repeat(self.negative_samples, 1) * neg_context_rep, dim=1 ) return pos_scores, neg_scores
def _train_step(self): loss_items = [] acc_items = [] self.model.train() for batch in self.dataloader: batch = batch.to(self.device) hidden = self.model(x=batch.x, edge_index=batch.edge_index, edge_attr=batch.edge_attr, self_loop_index=self.self_loop_index, self_loop_type=self.self_loop_type) summary_h = torch.sigmoid(batch_mean_pooling(hidden, batch.batch)) pos_summary = summary_h[batch.batch] neg_summary_h = summary_h[cycle_index(summary_h.size(0), 1)] neg_summary = neg_summary_h[batch.batch] pos_scores = self.discriminator(hidden, pos_summary) neg_scores = self.discriminator(hidden, neg_summary) self.optimizer.zero_grad() loss = self.loss_fn( pos_scores, torch.ones_like(pos_scores) ) + \ self.loss_fn( neg_scores, torch.zeros_like(neg_scores) ) loss.backward() self.optimizer.step() loss_items.append(loss.item()) acc_items.append( ((pos_scores > 0).float().sum() + (neg_scores < 0).float().sum()) / (pos_scores.shape[0] * 2)) return sum(loss_items) / len(loss_items), sum(acc_items) / len( acc_items)