Esempio n. 1
0
    def call(self, inputs, training=None, **kwargs):
        keys = inputs[0]
        query = inputs[1]
        weight = reduce_sum(keys * query, axis=-1, keep_dims=True)
        weight = tf.pow(weight, self.pow_p)  # [x,k_max,1]

        if len(inputs) == 3:
            k_user = tf.cast(
                tf.maximum(
                    1.,
                    tf.minimum(
                        tf.cast(self.k_max, dtype="float32"),  # k_max
                        tf.log1p(tf.cast(inputs[2], dtype="float32")) /
                        tf.log(2.)  # hist_len
                    )),
                dtype="int64")
            seq_mask = tf.transpose(tf.sequence_mask(k_user, self.k_max),
                                    [0, 2, 1])
            padding = tf.ones_like(seq_mask, dtype=tf.float32) * (
                -2**32 + 1)  # [x,k_max,1]
            weight = tf.where(seq_mask, weight, padding)

        weight = softmax(weight, dim=1, name="weight")
        output = reduce_sum(keys * weight, axis=1)

        return output
Esempio n. 2
0
 def call(self, inputs, **kwargs):
     query, candidate = inputs
     if self.type == "cos":
         query_norm = tf.norm(query, axis=self.axis)
         candidate_norm = tf.norm(candidate, axis=self.axis)
     cosine_score = reduce_sum(tf.multiply(query, candidate), -1)
     if self.type == "cos":
         cosine_score = div(cosine_score,
                            query_norm * candidate_norm + 1e-8)
     cosine_score = tf.clip_by_value(cosine_score, -1, 1.0) * self.gamma
     return cosine_score
Esempio n. 3
0
    def call(self, inputs, **kwargs):
        behavior_embddings, seq_len = inputs
        batch_size = tf.shape(behavior_embddings)[0]
        seq_len_tile = tf.tile(seq_len, [1, self.k_max])

        for i in range(self.iteration_times):
            mask = tf.sequence_mask(seq_len_tile, self.max_len)
            pad = tf.ones_like(mask, dtype=tf.float32) * (-2**32 + 1)
            routing_logits_with_padding = tf.where(
                mask, tf.tile(self.routing_logits, [batch_size, 1, 1]), pad)
            weight = tf.nn.softmax(routing_logits_with_padding)
            behavior_embdding_mapping = tf.tensordot(
                behavior_embddings, self.bilinear_mapping_matrix, axes=1)
            Z = tf.matmul(weight, behavior_embdding_mapping)
            interest_capsules = squash(Z)
            delta_routing_logits = reduce_sum(tf.matmul(
                interest_capsules,
                tf.transpose(behavior_embdding_mapping, perm=[0, 2, 1])),
                                              axis=0,
                                              keep_dims=True)
            self.routing_logits.assign_add(delta_routing_logits)
        interest_capsules = tf.reshape(interest_capsules,
                                       [-1, self.k_max, self.out_units])
        return interest_capsules
Esempio n. 4
0
 def call(self, seq_value_len_list, mask=None, **kwargs):
     if not isinstance(seq_value_len_list, list):
         seq_value_len_list = [seq_value_len_list]
     if len(seq_value_len_list) == 1:
         return seq_value_len_list[0]
     expand_seq_value_len_list = list(
         map(lambda x: tf.expand_dims(x, axis=-1), seq_value_len_list))
     a = concat_func(expand_seq_value_len_list)
     if self.mode == "mean":
         hist = reduce_mean(
             a,
             axis=-1,
         )
     if self.mode == "sum":
         hist = reduce_sum(
             a,
             axis=-1,
         )
     if self.mode == "max":
         hist = reduce_max(
             a,
             axis=-1,
         )
     return hist
Esempio n. 5
0
def FM(user_feature_columns,
       item_feature_columns,
       l2_reg_embedding=1e-6,
       init_std=0.0001,
       seed=1024,
       metric='cos'):
    """Instantiates the FM architecture.

    :param user_feature_columns: An iterable containing user's features used by  the model.
    :param item_feature_columns: An iterable containing item's features used by  the model.
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param metric: str, ``"cos"`` for  cosine  or  ``"ip"`` for inner product
    :return: A Keras model instance.

    """

    embedding_matrix_dict = create_embedding_matrix(user_feature_columns +
                                                    item_feature_columns,
                                                    l2_reg_embedding,
                                                    init_std,
                                                    seed,
                                                    seq_mask_zero=True)

    user_features = build_input_features(user_feature_columns)
    user_inputs_list = list(user_features.values())
    user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns(
        user_features,
        user_feature_columns,
        l2_reg_embedding,
        init_std,
        seed,
        support_dense=False,
        embedding_matrix_dict=embedding_matrix_dict)

    item_features = build_input_features(item_feature_columns)
    item_inputs_list = list(item_features.values())
    item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns(
        item_features,
        item_feature_columns,
        l2_reg_embedding,
        init_std,
        seed,
        support_dense=False,
        embedding_matrix_dict=embedding_matrix_dict)

    user_dnn_input = concat_func(user_sparse_embedding_list, axis=1)
    user_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(
        user_dnn_input)

    item_dnn_input = concat_func(item_sparse_embedding_list, axis=1)
    item_vector_sum = Lambda(lambda x: reduce_sum(x, axis=1, keep_dims=False))(
        item_dnn_input)

    score = Similarity(type=metric)([user_vector_sum, item_vector_sum])

    output = PredictionLayer("binary", False)(score)

    model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)

    model.__setattr__("user_input", user_inputs_list)
    model.__setattr__("user_embedding", user_vector_sum)

    model.__setattr__("item_input", item_inputs_list)
    model.__setattr__("item_embedding", item_vector_sum)

    return model
Esempio n. 6
0
def squash(inputs):
    vec_squared_norm = reduce_sum(tf.square(inputs), axis=-1, keep_dims=True)
    scalar_factor = vec_squared_norm / (
        1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + 1e-8)
    vec_squashed = scalar_factor * inputs
    return vec_squashed