def forward(self, inputs, rel_rec, rel_send): if torch.sum(self.adj_A != self.adj_A): print('nan error \n') adj_A1 = torch.sinh(3. * self.adj_A) adj_Aforz = preprocess_adj_new(adj_A1) adj_A = torch.eye(adj_A1.size()[0]).double() bninput = self.embed(inputs.long().view(-1, inputs.size(2))) bninput = bninput.view(*inputs.size(), -1).squeeze() H1 = F.relu((self.fc1(bninput))) x = (self.fc2(H1)) logits = torch.matmul(adj_Aforz, x + self.Wa) - self.Wa prob = my_softmax(logits, -1) alpha = my_softmax(self.alpha, -1) return x, prob, adj_A1, adj_A, self.z, self.z_positive, self.adj_A, self.Wa, alpha
def forward(self, inputs): # Input shape: [num_sims * num_edges, num_dims, num_timesteps] x = F.relu(self.conv1(inputs)) x = self.bn1(x) x = F.dropout(x, self.dropout_prob, training=self.training) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.bn2(x) pred = self.conv_predict(x) attention = my_softmax(self.conv_attention(x), axis=2) edge_prob = (pred * attention).mean(dim=2) return edge_prob