def get_queries(self, queries: torch.Tensor): """Compute embedding and biases of queries.""" lhs_e = givens_rotations(self.rel_diag(queries[:, 1]), self.entity(queries[:, 0])) + self.rel( queries[:, 1]) lhs_biases = self.bh(queries[:, 0]) return lhs_e, lhs_biases
def get_queries(self, queries): """Compute embedding and biases of queries.""" c = F.softplus(self.c[queries[:, 1]]) head = expmap0(self.entity(queries[:, 0]), c) rel1, rel2 = torch.chunk(self.rel(queries[:, 1]), 2, dim=1) rel1 = expmap0(rel1, c) rel2 = expmap0(rel2, c) lhs = project(mobius_add(head, rel1, c), c) res1 = givens_rotations(self.rel_diag(queries[:, 1]), lhs) res2 = mobius_add(res1, rel2, c) return (res2, c), self.bh(queries[:, 0])
def get_queries(self, queries): """Compute embedding and biases of queries.""" c = F.softplus(self.c[queries[:, 1]]) head = self.entity(queries[:, 0]) rot_mat, ref_mat = torch.chunk(self.rel_diag(queries[:, 1]), 2, dim=1) rot_q = givens_rotations(rot_mat, head).view((-1, 1, self.rank)) ref_q = givens_reflection(ref_mat, head).view((-1, 1, self.rank)) cands = torch.cat([ref_q, rot_q], dim=1) context_vec = self.context_vec(queries[:, 1]).view((-1, 1, self.rank)) att_weights = torch.sum(context_vec * cands * self.scale, dim=-1, keepdim=True) att_weights = self.act(att_weights) att_q = torch.sum(att_weights * cands, dim=1) lhs = expmap0(att_q, c) rel, _ = torch.chunk(self.rel(queries[:, 1]), 2, dim=1) rel = expmap0(rel, c) res = project(mobius_add(lhs, rel, c), c) return (res, c), self.bh(queries[:, 0])
def get_rotation_queries(self, queries): lhs_rot_e = givens_rotations(self.rot(queries[:, 1]), self.entity(queries[:, 0])) return lhs_rot_e