Ejemplo n.º 1
0
    def evaluateRec(self, u_ids, all_i_ids=None):
        batch_size = len(u_ids)
        all_i = self.item_embeddings(
            all_i_ids
        ) if all_i_ids is not None and self.is_share else self.item_embeddings.weight
        item_total, dim = all_i.size()

        u = self.user_embeddings(u_ids)
        # expand u and i to pair wise match, batch * item * dim
        u_e = u.expand(item_total, batch_size, dim).permute(1, 0, 2)

        i_e = all_i.expand(batch_size, item_total, dim)

        e_ids = self.paddingItems(
            all_i_ids.data if all_i_ids is not None else self.i_map,
            self.ent_total - 1)
        e_var = to_gpu(V(torch.LongTensor(e_ids)))
        e_e = self.ent_embeddings(e_var).expand(batch_size, item_total, dim)

        ie_e = i_e + e_e

        # batch * item * dim
        _, r_e, norm = self.getPreferences(u_e,
                                           ie_e,
                                           use_st_gumbel=self.use_st_gumbel)

        proj_u_e = projection_transH_pytorch(u_e, norm)
        proj_i_e = projection_transH_pytorch(ie_e, norm)

        # batch * item
        if self.L1_flag:
            score = torch.sum(torch.abs(proj_u_e + r_e - proj_i_e), 2)
        else:
            score = torch.sum((proj_u_e + r_e - proj_i_e)**2, 2)
        return score
Ejemplo n.º 2
0
    def evaluateTail(self, h, r, all_e_ids=None):
        batch_size = len(h)
        all_e = self.ent_embeddings(
            all_e_ids
        ) if all_e_ids is not None and self.is_share else self.ent_embeddings.weight
        ent_total, dim = all_e.size()
        # batch * dim
        h_e = self.ent_embeddings(h)
        r_e = self.rel_embeddings(r)
        norm_e = self.norm_embeddings(r)

        proj_h_e = projection_transH_pytorch(h_e, norm_e)
        c_t_e = proj_h_e + r_e

        # batch * entity * dim
        c_t_expand = c_t_e.expand(ent_total, batch_size, dim).permute(1, 0, 2)

        # batch * entity * dim
        norm_expand = norm_e.expand(ent_total, batch_size,
                                    dim).permute(1, 0, 2)

        ent_expand = all_e.expand(batch_size, ent_total, dim)
        proj_ent_e = projection_transH_pytorch(ent_expand, norm_expand)

        # batch * entity
        if self.L1_flag:
            score = torch.sum(torch.abs(c_t_expand - proj_ent_e), 2)
        else:
            score = torch.sum((c_t_expand - proj_ent_e)**2, 2)
        return score
Ejemplo n.º 3
0
    def forward(self, h, t, r):
        h_e = self.ent_embeddings(h)
        t_e = self.ent_embeddings(t)
        r_e = self.rel_embeddings(r)
        norm_e = self.norm_embeddings(r)

        proj_h_e = projection_transH_pytorch(h_e, norm_e)
        proj_t_e = projection_transH_pytorch(t_e, norm_e)

        if self.L1_flag:
            score = torch.sum(torch.abs(proj_h_e + r_e - proj_t_e), 1)
        else:
            score = torch.sum((proj_h_e + r_e - proj_t_e) ** 2, 1)
        return score
Ejemplo n.º 4
0
    def forward(self, u_ids, i_ids):
        u_e = self.user_embeddings(u_ids)
        i_e = self.item_embeddings(i_ids)

        _, r_e, norm = self.getPreferences(u_e,
                                           i_e,
                                           use_st_gumbel=self.use_st_gumbel)

        proj_u_e = projection_transH_pytorch(u_e, norm)
        proj_i_e = projection_transH_pytorch(i_e, norm)

        if self.L1_flag:
            score = torch.sum(torch.abs(proj_u_e + r_e - proj_i_e), 1)
        else:
            score = torch.sum((proj_u_e + r_e - proj_i_e)**2, 1)
        return score
Ejemplo n.º 5
0
    def forward(self, ratings, triples, is_rec=True):

        if is_rec and ratings is not None:
            u_ids, i_ids = ratings

            e_ids = self.paddingItems(i_ids.data, self.ent_total - 1)
            e_var = to_gpu(V(torch.LongTensor(e_ids)))

            u_e = self.user_embeddings(u_ids)
            i_e = self.item_embeddings(i_ids)
            e_e = self.ent_embeddings(e_var)
            ie_e = i_e + e_e

            _, r_e, norm = self.getPreferences(
                u_e, ie_e, use_st_gumbel=self.use_st_gumbel)

            proj_u_e = projection_transH_pytorch(u_e, norm)
            proj_i_e = projection_transH_pytorch(ie_e, norm)

            if self.L1_flag:
                score = torch.sum(torch.abs(proj_u_e + r_e - proj_i_e), 1)
            else:
                score = torch.sum((proj_u_e + r_e - proj_i_e)**2, 1)
        elif not is_rec and triples is not None:
            h, t, r = triples
            h_e = self.ent_embeddings(h)
            t_e = self.ent_embeddings(t)
            r_e = self.rel_embeddings(r)
            norm_e = self.norm_embeddings(r)

            proj_h_e = projection_transH_pytorch(h_e, norm_e)
            proj_t_e = projection_transH_pytorch(t_e, norm_e)

            if self.L1_flag:
                score = torch.sum(torch.abs(proj_h_e + r_e - proj_t_e), 1)
            else:
                score = torch.sum((proj_h_e + r_e - proj_t_e)**2, 1)
        else:
            raise NotImplementedError

        return score
Ejemplo n.º 6
0
    def evaluate(self, u_ids):
        batch_size = len(u_ids)
        u = self.user_embeddings(u_ids)
        # expand u and i to pair wise match, batch * item * dim
        u_e = u.expand(self.item_total, batch_size,
                       self.embedding_size).permute(1, 0, 2)
        i_e = self.item_embeddings.weight.expand(batch_size, self.item_total,
                                                 self.embedding_size)

        # batch * item * dim
        _, r_e, norm = self.getPreferences(u_e,
                                           i_e,
                                           use_st_gumbel=self.use_st_gumbel)

        proj_u_e = projection_transH_pytorch(u_e, norm)
        proj_i_e = projection_transH_pytorch(i_e, norm)

        # batch * item
        if self.L1_flag:
            score = torch.sum(torch.abs(proj_u_e + r_e - proj_i_e), 2)
        else:
            score = torch.sum((proj_u_e + r_e - proj_i_e)**2, 2)
        return score
Ejemplo n.º 7
0
    def evaluateTail(self, h, r):
        batch_size = len(h)
        # batch * dim
        h_e = self.ent_embeddings(h)
        r_e = self.rel_embeddings(r)
        norm_e = self.norm_embeddings(r)

        proj_h_e = projection_transH_pytorch(h_e, norm_e)
        c_t_e = proj_h_e + r_e
        
        # batch * entity * dim
        c_t_expand = c_t_e.expand(self.ent_total, batch_size, self.embedding_size).permute(1, 0, 2)

        # batch * entity * dim
        norm_expand = norm_e.expand(self.ent_total, batch_size, self.embedding_size).permute(1, 0, 2)
        ent_expand = self.ent_embeddings.weight.expand(batch_size, self.ent_total, self.embedding_size)
        proj_ent_e = projection_transH_pytorch(ent_expand, norm_expand)

        # batch * entity
        if self.L1_flag:
            score = torch.sum(torch.abs(c_t_expand-proj_ent_e), 2)
        else:
            score = torch.sum((c_t_expand-proj_ent_e) ** 2, 2)
        return score