Esempio n. 1
0
    def forward(self, ps, qs, rs):
        constrain(next(self._FC.parameters()))

        ps_embedding = self._PsEmbedding(ps)
        qs_embedding = self._QsEmbedding(qs)
        rs_embedding = self._RsEmbedding(rs)

        inferences = self._FC(ps_embedding * qs_embedding * rs_embedding)
        regs = self.reg * (torch.norm(ps_embedding) +
                           torch.norm(qs_embedding) + torch.norm(rs_embedding))
        return inferences, regs
Esempio n. 2
0
    def forward(self, ps, qs, rs):
        for p in self._FC.parameters():
            if len(p.size()) == 1: continue
            constrain(p)

        ps_embedding = self._PsEmbedding(ps)
        qs_embedding = self._QsEmbedding(qs)
        rs_embedding = self._RsEmbedding(rs)

        inferences = self._FC(
            torch.cat([ps_embedding, qs_embedding, rs_embedding], dim=-1))
        regs = self.reg * (torch.norm(ps_embedding) +
                           torch.norm(qs_embedding) + torch.norm(rs_embedding))

        return inferences, regs
Esempio n. 3
0
    def forward(self, ps, qs, rs):
        constrain(next(self._FC.parameters()))
        constrain(next(self._W.parameters()))

        ps_embedding = self._PsEmbedding(ps)
        qs_embedding = self._QsEmbedding(qs)
        rs_embedding = self._RsEmbedding(rs)

        gmf_out = ps_embedding * qs_embedding * rs_embedding
        mlp_out = self._W(
            torch.cat([ps_embedding, qs_embedding, rs_embedding], dim=-1))
        inferences = self._FC(F.relu(gmf_out + mlp_out))
        regs = self.reg * (torch.norm(ps_embedding) +
                           torch.norm(qs_embedding) + torch.norm(rs_embedding))
        return inferences, regs
Esempio n. 4
0
    def forward(self, ps, qs, rs):
        constrain(next(self._FC.parameters()))
        ps_embedding = self._PsEmbedding(ps)
        qs_embedding = self._QsEmbedding(qs)
        rs_embedding = self._RsEmbedding(rs)

        ps_embedding_trans = self.mlp_p(ps_embedding.view(-1, 1)).view(
            ps_embedding.size())
        qs_embedding_trans = self.mlp_q(qs_embedding.view(-1, 1)).view(
            qs_embedding.size())
        rs_embedding_trans = self.mlp_r(rs_embedding.view(-1, 1)).view(
            rs_embedding.size())

        inferences = self._FC(
            ops_triple(self.arch['triple'], ps_embedding_trans,
                       qs_embedding_trans, rs_embedding_trans))
        regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + \
         torch.norm(rs_embedding))
        return inferences, regs
Esempio n. 5
0
    def forward(self, ps, qs, rs):
        for i in range(len(PRIMITIVES_TRIPLE)):
            constrain(next(self._FC[i].parameters()))

        ps_embedding = self._PsEmbedding(ps)
        qs_embedding = self._QsEmbedding(qs)
        rs_embedding = self._RsEmbedding(rs)

        ps_embedding_trans = self._arch_parameters['mlp']['p'](
            ps_embedding.view(-1, 1)).view(ps_embedding.size())
        qs_embedding_trans = self._arch_parameters['mlp']['q'](
            qs_embedding.view(-1, 1)).view(qs_embedding.size())
        rs_embedding_trans = self._arch_parameters['mlp']['r'](
            rs_embedding.view(-1, 1)).view(rs_embedding.size())

        # the weight is already binarized
        assert self._arch_parameters['triple'].sum() == 1.
        inferences = MixedTriple(ps_embedding_trans, qs_embedding_trans,
                                 rs_embedding_trans,
                                 self._arch_parameters['triple'], self._FC)

        regs = self.reg * (torch.norm(ps_embedding) +
                           torch.norm(qs_embedding) + torch.norm(rs_embedding))
        return inferences, regs