def forward(self, inputs, rel_rec, rel_send): if torch.sum(self.adj_A != self.adj_A): print('nan error \n') adj_A1 = torch.sinh(3.*self.adj_A) # adj_A = I-A^T, adj_A_inv = (I-A^T)^(-1) adj_A = preprocess_adj_new((adj_A1)) adj_A_inv = preprocess_adj_new1((adj_A1)) meanF = torch.matmul(adj_A_inv, torch.mean(torch.matmul(adj_A, inputs), 0)) logits = torch.matmul(adj_A, inputs-meanF) return inputs-meanF, logits, adj_A1, adj_A, self.z, self.z_positive, self.adj_A
def forward(self, inputs, rel_rec, rel_send): if torch.sum(self.adj_A != self.adj_A): print('nan error \n') # to amplify the value of A and accelerate convergence. adj_A1 = torch.sinh(3. * self.adj_A) # adj_Aforz = I-A^T adj_Aforz = preprocess_adj_new(adj_A1) adj_A = torch.eye(adj_A1.size()[0]).double() H1 = F.relu((self.fc1(inputs))) x = (self.fc2(H1)) logits = torch.matmul(adj_Aforz, x + self.Wa) - self.Wa return x, logits, adj_A1, adj_A, self.z, self.z_positive, self.adj_A, self.Wa
def forward(self, inputs, rel_rec, rel_send): if torch.sum(self.adj_A != self.adj_A): print('nan error \n') adj_A1 = torch.sinh(3. * self.adj_A) adj_Aforz = preprocess_adj_new(adj_A1) adj_A = torch.eye(adj_A1.size()[0]).double() bninput = self.embed(inputs.long().view(-1, inputs.size(2))) bninput = bninput.view(*inputs.size(), -1).squeeze() H1 = F.relu((self.fc1(bninput))) x = (self.fc2(H1)) logits = torch.matmul(adj_Aforz, x + self.Wa) - self.Wa prob = my_softmax(logits, -1) alpha = my_softmax(self.alpha, -1) return x, prob, adj_A1, adj_A, self.z, self.z_positive, self.adj_A, self.Wa, alpha