Exemplo n.º 1
0
    def tail_forward(self, pos_triplets, neg_triplets, real_ent_ids):
        entity_emb = self.entity_embedding(real_ent_ids)
        if not self.args.cpu_emb:
            self.entity_embedding.curr_emb = entity_emb

        entity_feat = paddle.to_tensor(self.entity_feat[real_ent_ids.numpy()]
                                       .astype('float32'))
        emb = paddle.concat([entity_emb, entity_feat], axis=-1)

        pos_head = self.transform_net.embed_entity(
            F.embedding(pos_triplets[0], emb))
        pos_tail = self.transform_net.embed_entity(
            F.embedding(pos_triplets[2], emb))
        neg_head = self.transform_net.embed_entity(
            F.embedding(neg_triplets[0], emb))
        neg_tail = self.transform_net.embed_entity(
            F.embedding(neg_triplets[2], emb))

        pos_rel = self.transform_net.embed_relation(paddle.concat([self.relation_embedding(pos_triplets[1]), \
                                 paddle.to_tensor(self.relation_feat[pos_triplets[1].numpy()].astype('float32'))], axis=-1))
        neg_rel = self.transform_net.embed_relation(paddle.concat([self.relation_embedding(neg_triplets[1]), \
                                 paddle.to_tensor(self.relation_feat[neg_triplets[1].numpy()].astype('float32'))], axis=-1))
        batch_size = pos_head.shape[0]
        if batch_size < self.args.neg_sample_size:
            neg_sample_size = batch_size
        else:
            neg_sample_size = self.args.neg_sample_size
        pos_score = self.score_function.get_score(pos_head, pos_rel, pos_tail)
        neg_score = self.score_function.get_neg_score(
            pos_head, pos_rel, pos_tail, batch_size, neg_sample_size,
            neg_sample_size, False)
        loss = self.loss_func.get_total_loss(pos_score, neg_score)
        return loss
Exemplo n.º 2
0
    def forward(self, input):
        dtype = input.dtype
        flatten = input.reshape([-1, self.dim])
        dist = (flatten.pow(2).sum(1, keepdim=True) -
                2 * flatten.transpose([0, 1]).matmul(self.embed) +
                self.embed.pow(2).sum(0, keepdim=True))
        embed_ind = (-dist).argmax(1)
        embed_onehot = F.one_hot(embed_ind, self.n_embed).astype(dtype)
        embed_ind = embed_ind.reshape(input.shape[:-1])
        quantize = F.embedding(embed_ind,
                               self.embed.transpose([1, 0]),
                               padding_idx=-1)

        if self.training:
            embed_onehot_sum = embed_onehot.sum(0)
            embed_sum = flatten.transpose([1, 0]).matmul(embed_onehot)

            if dist_fn.get_world_size() > 1:
                dist_fn.all_reduce(embed_onehot_sum)
                dist_fn.all_reduce(embed_sum)

            ema_inplace(self.cluster_size, embed_onehot_sum, self.decay)
            ema_inplace(self.embed_avg, embed_sum, self.decay)
            cluster_size = laplace_smoothing(
                self.cluster_size, self.n_embed,
                self.eps) * self.cluster_size.sum()
            embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)
            self.embed[:] = embed_normalized

        loss = F.mse_loss(quantize.detach(), input) * self.commitment
        quantize = input + (quantize - input).detach()
        return quantize, embed_ind, loss
Exemplo n.º 3
0
    def forward(self, inputs):
        """
        u_idx, v_idx, neg_idx = inputs
        """
        u_idx, v_idx, neg_idx = inputs

        final_user_embeddings, final_item_embeddings = self.infer_embedding()

        # create self-supervised loss
        ss_loss = 0.0
        ss_loss += self.hierarchical_self_supervision(
            self.self_supervised_gating(final_user_embeddings, 1), self.H_s)
        ss_loss += self.hierarchical_self_supervision(
            self.self_supervised_gating(final_user_embeddings, 2), self.H_j)
        ss_loss += self.hierarchical_self_supervision(
            self.self_supervised_gating(final_user_embeddings, 3), self.H_p)

        # embedding look-up
        batch_neg_item_emb = F.embedding(weight=final_item_embeddings,
                                         x=neg_idx)
        batch_user_emb = F.embedding(weight=final_user_embeddings, x=u_idx)
        batch_pos_item_emb = F.embedding(weight=final_item_embeddings, x=v_idx)

        return batch_user_emb, batch_pos_item_emb, batch_neg_item_emb, ss_loss
Exemplo n.º 4
0
    def forward(self, input, expand_ratio=None, channel=None):
        assert (
            expand_ratio == None or channel == None
        ), "expand_ratio and channel CANNOT be NOT None at the same time."
        if expand_ratio != None:
            out_nc = int(expand_ratio * self.base_output_dim)
        elif channel != None:
            out_nc = int(channel)
        else:
            out_nc = self._embedding_dim

        weight = self.weight[:, :out_nc]
        return F.embedding(
            input,
            weight=weight,
            padding_idx=self._padding_idx,
            sparse=self._sparse,
            name=self._name)
Exemplo n.º 5
0
 def forward(self, x):
     if self.is_mp:
         output_parallel = paddle.distributed.collective._c_lookup_table(
             self.weight,
             x,
             start_index=self.vocab_start_index,
             name=self._name)
         output = paddle.distributed.collective._mp_allreduce(
             output_parallel,
             group=self.model_parallel_group,
             use_calc_stream=True,
             use_model_parallel=True)
     else:
         output = F.embedding(x,
                              weight=self.weight,
                              padding_idx=None,
                              sparse=False,
                              name=self._name)
     return output
Exemplo n.º 6
0
    def forward(self,
                input_u,
                item_attribute,
                input_ur=None,
                item_bind_M=None):
        user_feature_emb = self.user_feature_emb(input_u)
        summed_user_emb = user_feature_emb.sum(1)
        all_item_feature_emb = self.all_item_feature_emb(item_attribute)
        summed_all_item_emb = all_item_feature_emb.sum(1)
        user_cross = 0.5 * (summed_user_emb**2 - (user_feature_emb**2).sum(1))
        item_cross = 0.5 * (summed_all_item_emb**2 -
                            (all_item_feature_emb**2).sum(1))
        user_cross_score = user_cross.matmul(self.H_s)
        item_cross_score = item_cross.matmul(self.H_s)
        user_bias = self.user_bias(input_u).sum(1)
        item_bias = self.item_bias(item_attribute).sum(1)

        I = paddle.ones([input_u.shape[0], 1])
        p_emb = paddle.concat(
            [summed_user_emb, user_cross_score + user_bias + self.bias, I], 1)

        I = paddle.ones([summed_all_item_emb.shape[0], 1])
        q_emb = paddle.concat(
            [summed_all_item_emb, I, item_cross_score + item_bias], 1)
        H_i_emb = paddle.concat(
            [self.H_i,
             paddle.to_tensor([[1.0]]),
             paddle.to_tensor([[1.0]])], 0)
        dot = paddle.einsum('ac,bc->abc', p_emb, q_emb)
        pre = paddle.einsum('ajk,kl->aj', dot, H_i_emb)
        if input_ur is None:
            return (pre, )

        pos_item = F.embedding(input_ur, q_emb)
        pos_num_r = (input_ur != item_bind_M).astype(default_type)
        pos_item = paddle.einsum('ab,abc->abc', pos_num_r, pos_item)

        pos_r = paddle.einsum('ac,abc->abc', p_emb, pos_item)
        pos_r = paddle.einsum('ajk,kl->ajl', pos_r, H_i_emb).flatten(1)
        return pre, pos_r, q_emb, p_emb, H_i_emb
Exemplo n.º 7
0
            def test_bad_x():
                initializer = fluid.initializer.NumpyArrayInitializer(
                    np.random.random(size=(128, 100)))

                param_attr = fluid.ParamAttr(name="emb_weight",
                                             learning_rate=0.5,
                                             initializer=initializer,
                                             trainable=True)

                weight = prog.global_block().create_parameter((128, 100),
                                                              attr=param_attr,
                                                              dtype="float32")

                label = fluid.layers.data(name="label",
                                          shape=[4],
                                          append_batch_size=False,
                                          dtype="int64")

                emb = functional.embedding(x=label,
                                           weight=weight,
                                           sparse=True,
                                           name="embedding")