def aggr(self, x, edge_index, num_nodes):
     if self.pooling == "mean":
         return batch_mean_pooling(x, edge_index[0])
     elif self.pooling == "sum":
         return batch_sum_pooling(x, edge_index[0])
     else:
         raise NotImplementedError
 def pool(self, x, batch):
     if self.graph_pooling == "mean":
         return batch_mean_pooling(x, batch)
     elif self.graph_pooling == "sum":
         return batch_sum_pooling(x, batch)
     else:
         raise NotImplementedError
Beispiel #3
0
    def get_cbow_pred(
            self,
            overlapped_rep,
            overlapped_context,
            neighbor_rep
    ):
        if self.context_pooling == "sum":
            context_rep = batch_sum_pooling(overlapped_rep, overlapped_context)
        elif self.context_pooling == "mean":
            context_rep = batch_mean_pooling(overlapped_rep, overlapped_context)
        else:
            raise NotImplementedError

        batch_size = context_rep.size(0)

        neg_context_rep = torch.cat(
            [
                context_rep[cycle_index(batch_size, i + 1)]
                for i in range(self.negative_samples)
            ],
            dim=0
        )

        pos_scores = torch.sum(neighbor_rep * context_rep, dim=1)
        neg_scores = torch.sum(
            neighbor_rep.repeat(self.negative_samples, 1) * neg_context_rep,
            dim=1
        )
        return pos_scores, neg_scores
    def _train_step(self):
        loss_items = []
        acc_items = []

        self.model.train()
        for batch in self.dataloader:
            batch = batch.to(self.device)
            hidden = self.model(x=batch.x,
                                edge_index=batch.edge_index,
                                edge_attr=batch.edge_attr,
                                self_loop_index=self.self_loop_index,
                                self_loop_type=self.self_loop_type)
            summary_h = torch.sigmoid(batch_mean_pooling(hidden, batch.batch))

            pos_summary = summary_h[batch.batch]
            neg_summary_h = summary_h[cycle_index(summary_h.size(0), 1)]
            neg_summary = neg_summary_h[batch.batch]

            pos_scores = self.discriminator(hidden, pos_summary)
            neg_scores = self.discriminator(hidden, neg_summary)

            self.optimizer.zero_grad()
            loss = self.loss_fn(
                pos_scores,
                torch.ones_like(pos_scores)
            ) + \
                   self.loss_fn(
                       neg_scores,
                       torch.zeros_like(neg_scores)
                   )

            loss.backward()
            self.optimizer.step()

            loss_items.append(loss.item())
            acc_items.append(
                ((pos_scores > 0).float().sum() +
                 (neg_scores < 0).float().sum()) / (pos_scores.shape[0] * 2))
        return sum(loss_items) / len(loss_items), sum(acc_items) / len(
            acc_items)