def KDD_DIN(dnn_feature_columns,
            history_feature_list,
            dnn_use_bn=False,
            dnn_hidden_units=(200, 80),
            dnn_activation='relu',
            att_hidden_size=(80, 40),
            att_activation="dice",
            att_weight_normalization=False,
            l2_reg_dnn=0,
            l2_reg_embedding=1e-6,
            dnn_dropout=0,
            init_std=0.0001,
            seed=1024,
            task='binary'):
    """Instantiates the Deep Interest Network architecture.

    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param history_feature_list: list,to indicate  sequence sparse field
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param dnn_activation: Activation function to use in deep net
    :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net
    :param att_activation: Activation function to use in attention net
    :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.

    """

    features = build_input_features(dnn_feature_columns)

    sparse_feature_columns = list(
        filter(lambda x: isinstance(x, SparseFeat),
               dnn_feature_columns)) if dnn_feature_columns else []
    dense_feature_columns = list(
        filter(lambda x: isinstance(x, DenseFeat),
               dnn_feature_columns)) if dnn_feature_columns else []
    varlen_sparse_feature_columns = list(
        filter(lambda x: isinstance(x, VarLenSparseFeat),
               dnn_feature_columns)) if dnn_feature_columns else []

    history_feature_columns = []
    sparse_varlen_feature_columns = []
    history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list))
    for fc in varlen_sparse_feature_columns:
        feature_name = fc.name
        if feature_name in history_fc_names:
            history_feature_columns.append(fc)
        else:
            sparse_varlen_feature_columns.append(fc)

    inputs_list = list(features.values())

    embedding_dict = kdd_create_embedding_matrix(dnn_feature_columns,
                                                 l2_reg_embedding,
                                                 init_std,
                                                 seed,
                                                 prefix="")

    query_emb_list = embedding_lookup(embedding_dict,
                                      features,
                                      sparse_feature_columns,
                                      history_feature_list,
                                      history_feature_list,
                                      to_list=True)
    keys_emb_list = embedding_lookup(embedding_dict,
                                     features,
                                     history_feature_columns,
                                     history_fc_names,
                                     history_fc_names,
                                     to_list=True)
    dnn_input_emb_list = embedding_lookup(embedding_dict,
                                          features,
                                          sparse_feature_columns,
                                          mask_feat_list=history_feature_list,
                                          to_list=True)
    dense_value_list = get_dense_input(features, dense_feature_columns)

    sequence_embed_dict = varlen_embedding_lookup(
        embedding_dict, features, sparse_varlen_feature_columns)
    sequence_embed_list = get_varlen_pooling_list(
        sequence_embed_dict,
        features,
        sparse_varlen_feature_columns,
        to_list=True)

    dnn_input_emb_list += sequence_embed_list

    keys_emb = concat_func(keys_emb_list, mask=True)
    deep_input_emb = concat_func(dnn_input_emb_list)
    query_emb = concat_func(query_emb_list, mask=True)
    hist = AttentionSequencePoolingLayer(
        att_hidden_size,
        att_activation,
        weight_normalization=att_weight_normalization,
        supports_masking=True)([query_emb, keys_emb])

    deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])
    deep_input_emb = Flatten()(deep_input_emb)
    dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
    output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                 dnn_use_bn, seed)(dnn_input)
    final_logit = Dense(1, use_bias=False)(output)

    output = PredictionLayer(task)(final_logit)

    model = Model(inputs=inputs_list, outputs=output)
    return model
Esempio n. 2
0
def DeepFM(linear_feature_columns,
           dnn_feature_columns,
           embedding_size=8,
           use_fm=True,
           use_only_dnn=False,
           dnn_hidden_units=(128, 128),
           l2_reg_linear=0.00001,
           l2_reg_embedding=0.00001,
           l2_reg_dnn=0,
           init_std=0.0001,
           seed=1024,
           dnn_dropout=0,
           dnn_activation='relu',
           dnn_use_bn=False,
           task='binary'):
    """Instantiates the DeepFM Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_fm: bool,use FM part or not
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')

    fm_input = concat_fun(sparse_embedding_list, axis=1)
    fm_logit = FM()(fm_input)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    # dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
    #                dnn_use_bn, seed)(dnn_input)

    dnn_out_1 = Dense(128, dnn_activation)(dnn_input)
    dnn_out_2 = Dense(128, dnn_activation)(dnn_out_1)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_out_2)

    if use_only_dnn == True:
        final_logit = dnn_logit

    elif len(dnn_hidden_units) == 0 and use_fm == False:  # only linear
        final_logit = linear_logit
    elif len(dnn_hidden_units) == 0 and use_fm == True:  # linear + FM
        final_logit = tf.keras.layers.add([linear_logit, fm_logit])
    elif len(dnn_hidden_units) > 0 and use_fm == False:  # linear + Deep
        final_logit = tf.keras.layers.add([linear_logit, dnn_logit])
    elif len(dnn_hidden_units) > 0 and use_fm == True:  # linear + FM + Deep
        final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit])
    else:
        raise NotImplementedError

    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Esempio n. 3
0
def DSIN(feature_dim_dict,
         sess_feature_list,
         embedding_size=8,
         sess_max_count=5,
         sess_len_max=10,
         att_embedding_size=1,
         att_head_num=8,
         dnn_hidden_units=(200, 80),
         dnn_activation='sigmoid',
         l2_reg_dnn=0,
         l2_reg_embedding=1e-6,
         task='binary',
         dnn_dropout=0,
         init_std=0.0001,
         seed=1024,
         bias_encoding=False):
    check_feature_config_dict(feature_dim_dict)

    sparse_input, dense_input, user_behavior_input_dict, _, user_sess_length = get_input(
        feature_dim_dict, sess_feature_list, sess_max_count, sess_len_max)

    sparse_embedding_dict = {
        feat.name:
        Embedding(feat.dimension,
                  embedding_size,
                  embeddings_initializer=RandomNormal(mean=0.0,
                                                      stddev=init_std,
                                                      seed=seed),
                  embeddings_regularizer=l2(l2_reg_embedding),
                  name='sparse_emb_' + str(i) + '-' + feat.name,
                  mask_zero=(feat.name in sess_feature_list))
        for i, feat in enumerate(feature_dim_dict["sparse"])
    }

    query_emb_list = get_embedding_vec_list(sparse_embedding_dict,
                                            sparse_input,
                                            feature_dim_dict["sparse"],
                                            sess_feature_list,
                                            sess_feature_list)

    query_emb = concat_fun(query_emb_list)

    deep_input_emb_list = get_embedding_vec_list(
        sparse_embedding_dict,
        sparse_input,
        feature_dim_dict["sparse"],
        mask_feat_list=sess_feature_list)
    deep_input_emb = concat_fun(deep_input_emb_list)
    deep_input_emb = Flatten()(NoMask()(deep_input_emb))

    tr_input = sess_interest_division(sparse_embedding_dict,
                                      user_behavior_input_dict,
                                      feature_dim_dict['sparse'],
                                      sess_feature_list,
                                      sess_max_count,
                                      bias_encoding=bias_encoding)

    Self_Attention = Transformer(att_embedding_size,
                                 att_head_num,
                                 dropout_rate=0,
                                 use_layer_norm=False,
                                 use_positional_encoding=(not bias_encoding),
                                 seed=seed,
                                 supports_masking=True,
                                 blinding=True)
    sess_fea = sess_interest_extractor(tr_input, sess_max_count,
                                       Self_Attention)

    interest_attention_layer = AttentionSequencePoolingLayer(
        att_hidden_units=(64, 16),
        weight_normalization=True,
        supports_masking=False)([query_emb, sess_fea, user_sess_length])

    lstm_outputs = BiLSTM(
        len(sess_feature_list) * embedding_size,
        layers=2,
        res_layers=0,
        dropout_rate=0.2,
    )(sess_fea)
    lstm_attention_layer = AttentionSequencePoolingLayer(
        att_hidden_units=(64, 16),
        weight_normalization=True)([query_emb, lstm_outputs, user_sess_length])

    deep_input_emb = Concatenate()([
        deep_input_emb,
        Flatten()(interest_attention_layer),
        Flatten()(lstm_attention_layer)
    ])
    if len(dense_input) > 0:
        deep_input_emb = Concatenate()([deep_input_emb] +
                                       list(dense_input.values()))

    output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                 False, seed)(deep_input_emb)
    output = Dense(1, use_bias=False, activation=None)(output)
    output = PredictionLayer(task)(output)

    sess_input_list = []
    # sess_input_length_list = []
    for i in range(sess_max_count):
        sess_name = "sess_" + str(i)
        sess_input_list.extend(
            get_inputs_list([user_behavior_input_dict[sess_name]]))
        # sess_input_length_list.append(user_behavior_length_dict[sess_name])

    model_input_list = get_inputs_list(
        [sparse_input, dense_input]) + sess_input_list + [user_sess_length]

    model = Model(inputs=model_input_list, outputs=output)

    return model
Esempio n. 4
0
def DSSM(user_feature_columns,
         item_feature_columns,
         user_dnn_hidden_units=(64, 32),
         item_dnn_hidden_units=(64, 32),
         dnn_activation='tanh',
         dnn_use_bn=False,
         l2_reg_dnn=0,
         l2_reg_embedding=1e-6,
         dnn_dropout=0,
         init_std=0.0001,
         seed=1024,
         metric='cos'):

    embedding_matrix_dict = create_embedding_matrix(user_feature_columns +
                                                    item_feature_columns,
                                                    l2_reg_embedding,
                                                    init_std,
                                                    seed,
                                                    seq_mask_zero=True)

    user_features = build_input_features(user_feature_columns)
    user_inputs_list = list(user_features.values())
    user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns(
        user_features,
        user_feature_columns,
        l2_reg_embedding,
        init_std,
        seed,
        embedding_matrix_dict=embedding_matrix_dict)
    user_dnn_input = combined_dnn_input(user_sparse_embedding_list,
                                        user_dense_value_list)

    item_features = build_input_features(item_feature_columns)
    item_inputs_list = list(item_features.values())
    item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns(
        item_features,
        item_feature_columns,
        l2_reg_embedding,
        init_std,
        seed,
        embedding_matrix_dict=embedding_matrix_dict)
    item_dnn_input = combined_dnn_input(item_sparse_embedding_list,
                                        item_dense_value_list)

    user_dnn_out = DNN(
        user_dnn_hidden_units,
        dnn_activation,
        l2_reg_dnn,
        dnn_dropout,
        dnn_use_bn,
        seed,
    )(user_dnn_input)

    item_dnn_out = DNN(item_dnn_hidden_units, dnn_activation, l2_reg_dnn,
                       dnn_dropout, dnn_use_bn, seed)(item_dnn_input)

    score = Similarity(type=metric)([user_dnn_out, item_dnn_out])

    output = PredictionLayer("binary", False)(score)

    model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)

    plot_model(model, to_file='dnn.png', show_shapes=True)
    print("go")
    model.__setattr__("user_input", user_inputs_list)
    model.__setattr__("item_input", item_inputs_list)
    model.__setattr__("user_embedding", user_dnn_out)
    model.__setattr__("item_embedding", item_dnn_out)
    return model
Esempio n. 5
0
def xDeepFM(feature_dim_dict,
            embedding_size=8,
            seed=1024,
            init_std=0.0001,
            l2_reg_linear=0.00001,
            l2_reg_embedding=0.00001,
            cin_layer_size=(256, 256),
            cin_split_half=True,
            cin_activation='relu',
            hidden_size=(256, 256),
            activation='relu',
            keep_prob=1,
            use_bn=False,
            l2_reg_deep=0,
            final_activation='sigmoid',
            use_video=False,
            use_audio=False):

    check_feature_config_dict(feature_dim_dict)
    deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(
        feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear,
        init_std, seed, True)

    if use_video:
        video_input = tf.keras.layers.Input(shape=(128, ), name='video')
        video_emb = tf.keras.layers.Dense(
            embedding_size,
            use_bias=False,
            kernel_regularizer=l2(l2_reg_embedding))(video_input)
        video_emb = tf.keras.layers.Reshape(
            (1, embedding_size), input_shape=(embedding_size, ))(video_emb)
        deep_emb_list.append(video_emb)
        inputs_list.append(video_input)

    if use_audio:
        audio_input = tf.keras.layers.Input(shape=(128, ), name='audio')
        audio_emb = tf.keras.layers.Dense(
            embedding_size,
            use_bias=False,
            kernel_regularizer=l2(l2_reg_embedding))(audio_input)
        audio_emb = tf.keras.layers.Reshape(
            (1, embedding_size), input_shape=(embedding_size, ))(audio_emb)
        deep_emb_list.append(audio_emb)
        inputs_list.append(audio_input)

    fm_input = concat_fun(deep_emb_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half,
                       seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1,
            activation=None,
        )(exFM_out)

    deep_input = tf.keras.layers.Flatten()(fm_input)

    deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob, use_bn,
                   seed)(deep_input)
    deep_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(deep_out)

    final_logit = tf.keras.layers.add([linear_logit, deep_logit, exFM_logit])
    output = PredictionLayer(final_activation, name='output')(final_logit)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Esempio n. 6
0
File: dsin.py Progetto: zwcdp/DSIN
def DSIN(
    feature_dim_dict,
    sess_feature_list,
    embedding_size=8,
    sess_max_count=5,
    sess_len_max=10,
    bias_encoding=False,
    att_embedding_size=1,
    att_head_num=8,
    dnn_hidden_units=(200, 80),
    dnn_activation='sigmoid',
    dnn_dropout=0,
    dnn_use_bn=False,
    l2_reg_dnn=0,
    l2_reg_embedding=1e-6,
    init_std=0.0001,
    seed=1024,
    task='binary',
):
    """Instantiates the Deep Session Interest Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]}
    :param sess_feature_list: list,to indicate session feature sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]``
    :param embedding_size: positive integer,sparse feature embedding_size.
    :param sess_max_count: positive int, to indicate the max number of sessions
    :param sess_len_max: positive int, to indicate the max length of each session
    :param bias_encoding: bool. Whether use bias encoding or postional encoding
    :param att_embedding_size: positive int, the embedding size of each attention head
    :param att_head_num: positive int, the number of attention head
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param dnn_activation: Activation function to use in deep net
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.

    """
    check_feature_config_dict(feature_dim_dict)

    if (att_embedding_size * att_head_num !=
            len(sess_feature_list) * embedding_size):
        raise ValueError(
            "len(session_feature_lsit) * embedding_size must equal to att_embedding_size * att_head_num ,got %d * %d != %d *%d"
            % (len(sess_feature_list), embedding_size, att_embedding_size,
               att_head_num))

    sparse_input, dense_input, user_behavior_input_dict, _, user_sess_length = get_input(
        feature_dim_dict, sess_feature_list, sess_max_count, sess_len_max)

    sparse_embedding_dict = {
        feat.name:
        Embedding(feat.dimension,
                  embedding_size,
                  embeddings_initializer=RandomNormal(mean=0.0,
                                                      stddev=init_std,
                                                      seed=seed),
                  embeddings_regularizer=l2(l2_reg_embedding),
                  name='sparse_emb_' + str(i) + '-' + feat.name,
                  mask_zero=(feat.name in sess_feature_list))
        for i, feat in enumerate(feature_dim_dict["sparse"])
    }

    query_emb_list = get_embedding_vec_list(sparse_embedding_dict,
                                            sparse_input,
                                            feature_dim_dict["sparse"],
                                            sess_feature_list,
                                            sess_feature_list)

    query_emb = concat_fun(query_emb_list)

    deep_input_emb_list = get_embedding_vec_list(
        sparse_embedding_dict,
        sparse_input,
        feature_dim_dict["sparse"],
        mask_feat_list=sess_feature_list)
    deep_input_emb = concat_fun(deep_input_emb_list)
    deep_input_emb = Flatten()(NoMask()(deep_input_emb))

    tr_input = sess_interest_division(sparse_embedding_dict,
                                      user_behavior_input_dict,
                                      feature_dim_dict['sparse'],
                                      sess_feature_list,
                                      sess_max_count,
                                      bias_encoding=bias_encoding)

    Self_Attention = Transformer(att_embedding_size,
                                 att_head_num,
                                 dropout_rate=0,
                                 use_layer_norm=False,
                                 use_positional_encoding=(not bias_encoding),
                                 seed=seed,
                                 supports_masking=True,
                                 blinding=True)
    sess_fea = sess_interest_extractor(tr_input, sess_max_count,
                                       Self_Attention)

    interest_attention_layer = AttentionSequencePoolingLayer(
        att_hidden_units=(64, 16),
        weight_normalization=True,
        supports_masking=False)([query_emb, sess_fea, user_sess_length])

    lstm_outputs = BiLSTM(
        len(sess_feature_list) * embedding_size,
        layers=2,
        res_layers=0,
        dropout_rate=0.2,
    )(sess_fea)
    lstm_attention_layer = AttentionSequencePoolingLayer(
        att_hidden_units=(64, 16),
        weight_normalization=True)([query_emb, lstm_outputs, user_sess_length])

    deep_input_emb = Concatenate()([
        deep_input_emb,
        Flatten()(interest_attention_layer),
        Flatten()(lstm_attention_layer)
    ])
    if len(dense_input) > 0:
        deep_input_emb = Concatenate()([deep_input_emb] +
                                       list(dense_input.values()))

    output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                 dnn_use_bn, seed)(deep_input_emb)
    output = Dense(1, use_bias=False, activation=None)(output)
    output = PredictionLayer(task)(output)

    sess_input_list = []
    # sess_input_length_list = []
    for i in range(sess_max_count):
        sess_name = "sess_" + str(i)
        sess_input_list.extend(
            get_inputs_list([user_behavior_input_dict[sess_name]]))
        # sess_input_length_list.append(user_behavior_length_dict[sess_name])

    model_input_list = get_inputs_list(
        [sparse_input, dense_input]) + sess_input_list + [user_sess_length]

    model = Model(inputs=model_input_list, outputs=output)

    return model
def MMOE(dnn_feature_columns,
         num_tasks,
         tasks,
         num_experts=4,
         expert_dim=8,
         dnn_hidden_units=(128, 128),
         l2_reg_embedding=1e-5,
         l2_reg_dnn=0,
         task_dnn_units=None,
         seed=1024,
         dnn_dropout=0,
         dnn_activation='relu'):
    """Instantiates the Multi-gate Mixture-of-Experts architecture.

    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param num_tasks: integer, number of tasks, equal to number of outputs, must be greater than 1.
    :param tasks: list of str, indicating the loss of each tasks, ``"binary"`` for  binary logloss, ``"regression"`` for regression loss. e.g. ['binary', 'regression']
    :param num_experts: integer, number of experts.
    :param expert_dim: integer, the hidden units of each expert.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of shared-bottom DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param task_dnn_units: list,list of positive integer or empty list, the layer number and units in each layer of task-specific DNN
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN

    :return: a Keras model instance
    """
    if num_tasks <= 1:
        raise ValueError("num_tasks must be greater than 1")
    if len(tasks) != num_tasks:
        raise ValueError("num_tasks must be equal to the length of tasks")
    for task in tasks:
        if task not in ['binary', 'regression']:
            raise ValueError(
                "task must be binary or regression, {} is illegal".format(
                    task))

    features = build_input_features(dnn_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, l2_reg_embedding, seed)
    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_out = DNN(dnn_hidden_units,
                  dnn_activation,
                  l2_reg_dnn,
                  dnn_dropout,
                  False,
                  seed=seed)(dnn_input)
    mmoe_outs = MMOELayer(num_tasks, num_experts, expert_dim)(dnn_out)
    if task_dnn_units != None:
        mmoe_outs = [
            DNN(task_dnn_units,
                dnn_activation,
                l2_reg_dnn,
                dnn_dropout,
                False,
                seed=seed)(mmoe_out) for mmoe_out in mmoe_outs
        ]

    task_outputs = []
    for mmoe_out, task in zip(mmoe_outs, tasks):
        logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(mmoe_out)
        output = PredictionLayer(task)(logit)
        task_outputs.append(output)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=task_outputs)
    return model
Esempio n. 8
0
File: din.py Progetto: zwcdp/DSIN
def DIN(feature_dim_dict,
        seq_feature_list,
        embedding_size=8,
        hist_len_max=16,
        dnn_use_bn=False,
        dnn_hidden_units=(200, 80),
        dnn_activation='relu',
        att_hidden_size=(80, 40),
        att_activation="dice",
        att_weight_normalization=False,
        l2_reg_dnn=0,
        l2_reg_embedding=1e-6,
        dnn_dropout=0,
        init_std=0.0001,
        seed=1024,
        task='binary'):
    """Instantiates the Deep Interest Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]}
    :param seq_feature_list: list,to indicate  sequence sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]``
    :param embedding_size: positive integer,sparse feature embedding_size.
    :param hist_len_max: positive int, to indicate the max length of seq input
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param dnn_activation: Activation function to use in deep net
    :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net
    :param att_activation: Activation function to use in attention net
    :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.

    """
    check_feature_config_dict(feature_dim_dict)

    sparse_input, dense_input, user_behavior_input = get_input(
        feature_dim_dict, seq_feature_list, hist_len_max)

    sparse_embedding_dict = {
        feat.name:
        Embedding(feat.dimension,
                  embedding_size,
                  embeddings_initializer=RandomNormal(mean=0.0,
                                                      stddev=init_std,
                                                      seed=seed),
                  embeddings_regularizer=l2(l2_reg_embedding),
                  name='sparse_emb_' + str(i) + '-' + feat.name,
                  mask_zero=(feat.name in seq_feature_list))
        for i, feat in enumerate(feature_dim_dict["sparse"])
    }

    query_emb_list = get_embedding_vec_list(sparse_embedding_dict,
                                            sparse_input,
                                            feature_dim_dict['sparse'],
                                            seq_feature_list, seq_feature_list)

    keys_emb_list = get_embedding_vec_list(sparse_embedding_dict,
                                           user_behavior_input,
                                           feature_dim_dict['sparse'],
                                           seq_feature_list, seq_feature_list)

    deep_input_emb_list = get_embedding_vec_list(
        sparse_embedding_dict,
        sparse_input,
        feature_dim_dict['sparse'],
        mask_feat_list=seq_feature_list)

    keys_emb = concat_fun(keys_emb_list)
    deep_input_emb = concat_fun(deep_input_emb_list)

    query_emb = concat_fun(query_emb_list)

    hist = AttentionSequencePoolingLayer(
        att_hidden_size,
        att_activation,
        weight_normalization=att_weight_normalization,
        supports_masking=True)([query_emb, keys_emb])

    deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])
    deep_input_emb = Flatten()(deep_input_emb)
    if len(dense_input) > 0:
        deep_input_emb = Concatenate()([deep_input_emb] +
                                       list(dense_input.values()))

    output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                 dnn_use_bn, seed)(deep_input_emb)
    final_logit = Dense(1, use_bias=False)(output)

    output = PredictionLayer(task)(final_logit)
    model_input_list = get_inputs_list(
        [sparse_input, dense_input, user_behavior_input])

    model = Model(inputs=model_input_list, outputs=output)
    return model
Esempio n. 9
0
def Baseline(feature_dim_dict,
             attention_feature_name=None,
             with_linear=False,
             embedding_size=5,
             dnn_hidden_units=(135, 67),
             l2_reg_linear=0.00001,
             l2_reg_embedding=0.00001,
             l2_reg_dnn=0,
             init_std=0.0001,
             seed=1024,
             dnn_dropout=0,
             dnn_activation='relu',
             dnn_use_bn=False,
             task='binary'):
    """Instantiates the Baseline Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like
           {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """
    check_feature_config_dict(feature_dim_dict)

    deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = \
        preprocess_input_embedding(feature_dim_dict,
                                   embedding_size,
                                   l2_reg_embedding,
                                   l2_reg_linear, init_std,
                                   seed,
                                   create_linear_weight=True,
                                   use_var_attention=(
                                       True if attention_feature_name else False),
                                   attention_feature_name=attention_feature_name)

    linear_logit = get_linear_logit(linear_emb_list, dense_input_dict,
                                    l2_reg_linear)

    deep_input = concat_fun(deep_emb_list, axis=1)
    deep_input = tf.keras.layers.Flatten()(deep_input)
    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   dnn_use_bn, seed)(deep_input)
    deep_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(deep_out)

    if len(dnn_hidden_units) == 0:  # only linear
        final_logit = linear_logit
    elif len(dnn_hidden_units) > 0 and with_linear:  # linear + Deep
        final_logit = tf.keras.layers.add([linear_logit, deep_logit])
    elif len(dnn_hidden_units) > 0 and not with_linear:
        final_logit = deep_logit
    else:
        raise NotImplementedError

    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Esempio n. 10
0
def DSSM(user_feature_columns,
         item_feature_columns,
         user_dnn_hidden_units=(64, 32),
         item_dnn_hidden_units=(64, 32),
         dnn_activation='tanh',
         dnn_use_bn=False,
         l2_reg_dnn=0,
         l2_reg_embedding=1e-6,
         dnn_dropout=0,
         init_std=0.0001,
         seed=1024,
         metric='cos'):
    """Instantiates the Deep Structured Semantic Model architecture.

    :param user_feature_columns: An iterable containing user's features used by  the model.
    :param item_feature_columns: An iterable containing item's features used by  the model.
    :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower
    :param item_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of item tower
    :param dnn_activation: Activation function to use in deep net
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param metric: str, ``"cos"`` for  cosine  or  ``"ip"`` for inner product
    :return: A Keras model instance.

    """

    embedding_matrix_dict = create_embedding_matrix(user_feature_columns +
                                                    item_feature_columns,
                                                    l2_reg_embedding,
                                                    init_std,
                                                    seed,
                                                    seq_mask_zero=True)

    user_features = build_input_features(user_feature_columns)
    user_inputs_list = list(user_features.values())
    user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns(
        user_features,
        user_feature_columns,
        l2_reg_embedding,
        init_std,
        seed,
        embedding_matrix_dict=embedding_matrix_dict)
    user_dnn_input = combined_dnn_input(user_sparse_embedding_list,
                                        user_dense_value_list)

    item_features = build_input_features(item_feature_columns)
    item_inputs_list = list(item_features.values())
    item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns(
        item_features,
        item_feature_columns,
        l2_reg_embedding,
        init_std,
        seed,
        embedding_matrix_dict=embedding_matrix_dict)
    item_dnn_input = combined_dnn_input(item_sparse_embedding_list,
                                        item_dense_value_list)

    user_dnn_out = DNN(
        user_dnn_hidden_units,
        dnn_activation,
        l2_reg_dnn,
        dnn_dropout,
        dnn_use_bn,
        seed,
    )(user_dnn_input)

    item_dnn_out = DNN(item_dnn_hidden_units, dnn_activation, l2_reg_dnn,
                       dnn_dropout, dnn_use_bn, seed)(item_dnn_input)

    score = Similarity(type=metric)([user_dnn_out, item_dnn_out])

    output = PredictionLayer("binary", False)(score)

    model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)

    model.__setattr__("user_input", user_inputs_list)
    model.__setattr__("item_input", item_inputs_list)
    model.__setattr__("user_embedding", user_dnn_out)
    model.__setattr__("item_embedding", item_dnn_out)

    return model
Esempio n. 11
0
def xDeepFM_MTL(linear_feature_columns,
                dnn_feature_columns,
                dnn_hidden_units=(256, 256),
                task_net_size=(128, ),
                cin_layer_size=(
                    128,
                    128,
                ),
                cin_split_half=True,
                cin_activation='relu',
                l2_reg_linear=0.00001,
                l2_reg_embedding=0.00001,
                l2_reg_dnn=0,
                l2_reg_cin=0,
                seed=1024,
                dnn_dropout=0,
                dnn_activation='relu',
                dnn_use_bn=False,
                task='binary'):
    """Instantiates the xDeepFM architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param cin_layer_size: list,list of positive integer or empty list, the feature maps  in each hidden layer of Compressed Interaction Network
    :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
    :param cin_activation: activation function used on feature maps
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: L2 regularizer strength applied to deep net
    :param l2_reg_cin: L2 regularizer strength applied to CIN.
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, l2_reg_embedding, seed)

    fm_input = concat_func(sparse_embedding_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half,
                       l2_reg_cin, seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                exFM_out)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_output = DNN(dnn_hidden_units,
                     dnn_activation,
                     l2_reg_dnn,
                     dnn_dropout,
                     dnn_use_bn,
                     seed=seed)(dnn_input)

    finish_out = DNN(task_net_size)(dnn_output)
    finish_logit = tf.keras.layers.Dense(1, use_bias=False,
                                         activation=None)(finish_out)

    like_out = DNN(task_net_size)(dnn_output)
    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    finish_logit = tf.keras.layers.add(
        [linear_logit, finish_logit, exFM_logit])
    like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit])

    output_finish = PredictionLayer('binary', name='finish')(finish_logit)
    output_like = PredictionLayer('binary', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=[output_finish, output_like])
    return model
Esempio n. 12
0
def DIEN(feature_dim_dict, seq_feature_list, embedding_size=8, hist_len_max=16,
         gru_type="GRU", use_negsampling=False, alpha=1.0, use_bn=False, dnn_hidden_units=(200, 80),
         dnn_activation='relu',
         att_hidden_units=(64, 16), att_activation="dice", att_weight_normalization=True,
         l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'):
    """Instantiates the Deep Interest Evolution Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]}
    :param seq_feature_list: list,to indicate  sequence sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]``
    :param embedding_size: positive integer,sparse feature embedding_size.
    :param hist_len_max: positive int, to indicate the max length of seq input
    :param gru_type: str,can be GRU AIGRU AUGRU AGRU
    :param use_negsampling: bool, whether or not use negtive sampling
    :param alpha: float ,weight of auxiliary_loss
    :param use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param dnn_activation: Activation function to use in DNN
    :param att_hidden_units: list,list of positive integer , the layer number and units in each layer of attention net
    :param att_activation: Activation function to use in attention net
    :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.

    """
    check_feature_config_dict(feature_dim_dict)

    sparse_input, dense_input, user_behavior_input, user_behavior_length = get_input(
        feature_dim_dict, seq_feature_list, hist_len_max)
    sparse_embedding_dict = {feat.name: Embedding(feat.dimension, embedding_size,
                                                  embeddings_initializer=RandomNormal(
                                                      mean=0.0, stddev=init_std, seed=seed),
                                                  embeddings_regularizer=l2(
                                                      l2_reg_embedding),
                                                  name='sparse_emb_' + str(i) + '-' + feat.name) for i, feat in
                             enumerate(feature_dim_dict["sparse"])}

    query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"],
                                            return_feat_list=seq_feature_list)
    keys_emb_list = get_embedding_vec_list(sparse_embedding_dict, user_behavior_input, feature_dim_dict['sparse'],
                                           return_feat_list=seq_feature_list)
    deep_input_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict['sparse'])

    query_emb = concat_fun(query_emb_list)
    keys_emb = concat_fun(keys_emb_list)
    deep_input_emb = concat_fun(deep_input_emb_list)

    if use_negsampling:
        neg_user_behavior_input = OrderedDict()
        for i, feat in enumerate(seq_feature_list):
            neg_user_behavior_input[feat] = Input(shape=(hist_len_max,), name='neg_seq_' + str(i) + '-' + feat)

        neg_uiseq_embed_list = get_embedding_vec_list(sparse_embedding_dict, neg_user_behavior_input,
                                                      feature_dim_dict["sparse"], seq_feature_list, )
        # [sparse_embedding_dict[feat](
        # neg_user_behavior_input[feat]) for feat in seq_feature_list]
        neg_concat_behavior = concat_fun(neg_uiseq_embed_list)

    else:
        neg_concat_behavior = None

    hist, aux_loss_1 = interest_evolution(keys_emb, query_emb, user_behavior_length, gru_type=gru_type,
                                          use_neg=use_negsampling, neg_concat_behavior=neg_concat_behavior,
                                          embedding_size=embedding_size, att_hidden_size=att_hidden_units,
                                          att_activation=att_activation,
                                          att_weight_normalization=att_weight_normalization, )

    deep_input_emb = Concatenate()([deep_input_emb, hist])

    deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb)
    if len(dense_input) > 0:
        deep_input_emb = Concatenate()(
            [deep_input_emb] + list(dense_input.values()))

    output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
                 dnn_dropout, use_bn, seed)(deep_input_emb)
    final_logit = Dense(1, use_bias=False)(output)
    output = PredictionLayer(task)(final_logit)

    model_input_list = get_inputs_list(
        [sparse_input, dense_input, user_behavior_input])

    if use_negsampling:
        model_input_list += list(neg_user_behavior_input.values())

    model_input_list += [user_behavior_length]

    model = tf.keras.models.Model(inputs=model_input_list, outputs=output)

    if use_negsampling:
        model.add_loss(alpha * aux_loss_1)
    tf.keras.backend.get_session().run(tf.global_variables_initializer())
    return model
Esempio n. 13
0
def BST(feature_dim_dict, seq_feature_list, embedding_size=4, hist_len_max=16, use_bn=False, dnn_hidden_units=(200, 80),
        dnn_activation='relu', att_embedding_size=1, att_head_num=8,
        l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary'):
    """Instantiates the Deep Interest Evolution Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]}
    :param seq_feature_list: list,to indicate  sequence sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]``
    :param embedding_size: positive integer,sparse feature embedding_size.
    :param hist_len_max: positive int, to indicate the max length of seq input
    :param use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param dnn_activation: Activation function to use in DNN
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.

    """
    check_feature_config_dict(feature_dim_dict)

    sparse_input, dense_input, user_behavior_input, user_behavior_length = get_input(
        feature_dim_dict, seq_feature_list, hist_len_max)
    # sparse_embedding_dict = {feat.name: Embedding(feat.dimension, embedding_size,
    #                                               embeddings_initializer=RandomNormal(
    #                                                   mean=0.0, stddev=init_std, seed=seed),
    #                                               embeddings_regularizer=l2(
    #                                                   l2_reg_embedding),
    #                                               name='sparse_emb_' + str(i) + '-' + feat.name) for i, feat in
    #                          enumerate(feature_dim_dict["sparse"])}
    # print(sparse_embedding_dict)
    sparse_embedding_dict = {feat.name: Embedding(tf.cast(feat.dimension, tf.int32), embedding_size,
                                                  embeddings_initializer=RandomNormal(
                                                      mean=0.0, stddev=init_std, seed=seed),
                                                  embeddings_regularizer=l2(
                                                      l2_reg_embedding),
                                                  name='sparse_emb_' +
                                                       str(i) + '-' + feat.name,
                                                  mask_zero=(feat.name in seq_feature_list)) for i, feat in
                             enumerate(feature_dim_dict["sparse"])}
    # deep_emb_list = get_embedding_vec_list(
    # deep_sparse_emb_dict, sparse_input_dict, feature_dim_dict['sparse'])
    query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"],
                                            return_feat_list=seq_feature_list)
    keys_emb_list = get_embedding_vec_list(sparse_embedding_dict, user_behavior_input, feature_dim_dict['sparse'],
                                           return_feat_list=seq_feature_list)
    deep_input_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict['sparse'])

    query_emb = concat_fun(query_emb_list)
    keys_emb = concat_fun(keys_emb_list)
    print("prev: {0}".format(keys_emb))
    # hist_cap = Capsule(
    #     num_capsule=8, dim_capsule=2,
    #     routings=3, share_weights=True)(NoMask()(keys_emb))
    # print("now: {0}".format(hist_cap))
    # # exit(0)
    # # keys_emb = concat_fun(keys_emb_list)
    # hist_cap = Reshape([1, 16])(hist_cap)
    deep_input_emb = concat_fun(deep_input_emb_list)
    print("deep input emb: ", deep_input_emb)
    # print("hist_cap: ", hist_cap)
    Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False,
                                 use_positional_encoding=True, seed=seed, supports_masking=False,
                                 blinding=True)
    # print("now: {0}".format(hist))
    hists = []
    for key_emb in keys_emb_list:
        hist = Self_Attention([key_emb, key_emb, user_behavior_length, user_behavior_length])
        hists.append(hist)
    hist = concat_fun(hists)

    # Tensor("concatenate_2/concat:0", shape=(?, 50, 8), dtype=float32)
    # <tf.Tensor 'concatenate_3/concat:0' shape=(?, 4, 8) dtype=float32>
    deep_input_emb = Concatenate()([deep_input_emb, hist])
    # print(deep_input_emb)
    deep_input_emb = tf.keras.layers.Flatten()(NoMask()(deep_input_emb))
    if len(dense_input) > 0:
        deep_input_emb = Concatenate()(
            [deep_input_emb] + list(dense_input.values()))

    output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
                 dnn_dropout, use_bn, seed)(deep_input_emb)
    final_logit = Dense(1, use_bias=False)(output)
    output = PredictionLayer(task)(final_logit)

    model_input_list = get_inputs_list(
        [sparse_input, dense_input, user_behavior_input])

    model_input_list += [user_behavior_length]

    model = tf.keras.models.Model(inputs=model_input_list, outputs=output)

    tf.keras.backend.get_session().run(tf.global_variables_initializer())
    return model
Esempio n. 14
0
def DeepFM(linear_feature_columns,
           dnn_feature_columns,
           embedding_size=8,
           use_fm=True,
           only_dnn=False,
           dnn_hidden_units=(128, 128),
           l2_reg_linear=0.00001,
           l2_reg_embedding=0.00001,
           l2_reg_dnn=0,
           init_std=0.0001,
           seed=1024,
           dnn_dropout=0,
           dnn_activation='relu',
           dnn_use_bn=False,
           task='binary'):
    """Instantiates the DeepFM Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_fm: bool,use FM part or not
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    ## 为每个特征创建Input[1,]; feature == > {'feature1': Input[1,], ...}
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    ## [Input1, Input2, ... ]
    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)
    ## [feature_1对应的embedding层,下连接对应feature1的Input[1,]层,...], [feature_1对应的Input[1,]层,...]

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')
    ## 线性变换层,没有激活函数

    fm_input = concat_fun(sparse_embedding_list, axis=1)
    ## 稀疏embedding层concate在一起

    fm_logit = FM()(fm_input)
    ## FM的二次项部分输出,不包含一次项和bias

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    # dnn_out = Dense(128, dnn_activation, l2_reg_dnn, dnn_dropout,
    #               dnn_use_bn, seed)(dnn_input)

    dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                  dnn_use_bn, seed)(dnn_input)

    finish_out = Dense(128, dnn_activation,
                       kernel_regularizer=l2(l2_reg_dnn))(dnn_out)
    finish_logit = tf.keras.layers.Dense(1, use_bias=False,
                                         activation=None)(finish_out)

    like_out = Dense(128, dnn_activation,
                     kernel_regularizer=l2(l2_reg_dnn))(dnn_out)
    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_out)
    # if len(dnn_hidden_units) > 0 and only_dnn == True:
    #     final_logit = dnn_logit
    # elif len(dnn_hidden_units) == 0 and use_fm == False:  # only linear
    #     final_logit = linear_logit
    # elif len(dnn_hidden_units) == 0 and use_fm == True:  # linear + FM
    #     final_logit = tf.keras.layers.add([linear_logit, fm_logit])
    # elif len(dnn_hidden_units) > 0 and use_fm == False:  # linear + Deep
    #     final_logit = tf.keras.layers.add([linear_logit, dnn_logit])
    # elif len(dnn_hidden_units) > 0 and use_fm == True:  # linear + FM + Deep
    #     final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit])
    # else:
    #     raise NotImplementedError

    # finish_logit = tf.keras.layers.add([linear_logit, fm_logit, finish_logit])
    # like_logit = tf.keras.layers.add([linear_logit, fm_logit, like_logit])

    output_finish = PredictionLayer('binary',
                                    name='finish_output')(finish_logit)
    output_like = PredictionLayer('binary', name='like_output')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=[output_finish, output_like])
    return model
Esempio n. 15
0
def xDeepFM_MTL(
    linear_feature_columns,
    dnn_feature_columns,
    embedding_size=8,
    dnn_hidden_units=(256, 256),
    cin_layer_size=(
        256,
        256,
    ),
    cin_split_half=True,
    init_std=0.0001,
    l2_reg_dnn=0,
    dnn_dropout=0,
    dnn_activation='relu',
    dnn_use_bn=False,
    task_net_size=(128, ),
    l2_reg_linear=0.00001,
    l2_reg_embedding=0.00001,
    seed=1024,
):
    # check_feature_config_dict(feature_dim_dict)
    if len(task_net_size) < 1:
        raise ValueError('task_net_size must be at least one layer')

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')

    fm_input = concat_fun(sparse_embedding_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, 0,
                       seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1,
            activation=None,
        )(exFM_out)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   dnn_use_bn, seed)(dnn_input)

    like_out = DNN(task_net_size)(deep_out)
    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit])

    output_like = PredictionLayer('binary', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output_like)
    return model
def create_model(linear_feature_columns,
                 dnn_feature_columns,
                 fm_group=[DEFAULT_GROUP_NAME],
                 dnn_hidden_units=(128, 128),
                 l2_reg_linear=0.00001,
                 l2_reg_embedding=0.00001,
                 l2_reg_dnn=0,
                 seed=1024,
                 dnn_dropout=0,
                 dnn_activation='relu',
                 dnn_use_bn=False,
                 task='binary'):

    K.clear_session()
    #!################################################################################################################
    inputs_all = [
        #         get_input_feature_layer(name = 'user_0',feature_shape = dense_feature_size),
        #                  get_input_feature_layer(name = 'item_0',feature_shape = dense_feature_size),
        get_input_feature_layer(name='user_1',
                                feature_shape=dense_feature_size),
        get_input_feature_layer(name='item_1',
                                feature_shape=dense_feature_size)
    ]
    # slotid_nettype
    #     layer_user_0 = inputs_all[0]
    #     layer_user_0 = K.expand_dims(layer_user_0, 1)
    #     layer_item_0 = inputs_all[1]
    #     layer_item_0 = K.expand_dims(layer_item_0, 1)
    layer_user_1 = inputs_all[0]
    layer_user_1 = K.expand_dims(layer_user_1, 1)
    layer_item_1 = inputs_all[1]
    layer_item_1 = K.expand_dims(layer_item_1, 1)
    #     cross_emb_out0 = cross_net(layer_user_0,layer_item_0)
    cross_emb_out1 = cross_net(layer_user_1, layer_item_1)
    #     cross_emb_out = tf.keras.layers.concatenate([cross_emb_out0,cross_emb_out1])
    cross_emb_out = tf.squeeze(cross_emb_out1, [1])
    #!################################################################################################################
    seq_inputs_dict = get_seq_input_layers(cols=arr_name_list)
    inputs_all = inputs_all + list(seq_inputs_dict.values())  # 输入层list
    masks = tf.equal(seq_inputs_dict['task_id'], 0)
    # 普通序列+label序列
    layers2concat = []
    for index, col in enumerate(arr_name_list):
        print(col, 'get embedding!')
        emb_layer = get_emb_layer(col,
                                  trainable=TRAINABLE_DICT[col],
                                  emb_matrix=id_list_dict_emb_all[col][1])
        x = emb_layer(seq_inputs_dict[col])
        if conv1d_info_dict[col] > -1:
            cov_layer = tf.keras.layers.Conv1D(filters=conv1d_info_dict[col],
                                               kernel_size=1,
                                               activation='relu')
            x = cov_layer(x)
        layers2concat.append(x)
    x = keras.layers.concatenate(layers2concat)
    #!################################################################################################################
    #!mix1
    x = trans_net(x, masks, hidden_unit=256)
    max_pool = tf.keras.layers.GlobalMaxPooling1D()
    average_pool = tf.keras.layers.GlobalAveragePooling1D()
    xmaxpool = max_pool(x)
    xmeanpool = average_pool(x)

    trans_output = tf.keras.layers.concatenate([xmaxpool, xmeanpool])

    #!################################################################################################################
    #!mix2
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)

    mix = concatenate([cross_emb_out, trans_output, dnn_input],
                      axis=-1)  # !#mix

    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(mix)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    #!################################################################################################################

    model = Model(inputs=inputs_all + [features], outputs=[output])
    print(model.summary())
    return model
Esempio n. 17
0
def DSSM(user_dnn_feature_columns,
         item_dnn_feature_columns,
         gamma=1,
         dnn_use_bn=True,
         dnn_hidden_units=(300, 300, 128),
         dnn_activation='tanh',
         l2_reg_dnn=0,
         l2_reg_embedding=1e-6,
         dnn_dropout=0,
         init_std=0.0001,
         seed=1024,
         task='binary'):
    """Instantiates the Deep Structured Semantic Model architecture.
    :param user_dnn_feature_columns:An iterable containing user's features used by deep part of the model.
    :param item_dnn_feature_columns:An iterable containing item's the features used by deep part of the model.
    :param gamma: smoothing factor in the softmax function for DSSM
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param dnn_activation: Activation function to use in deep net
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """
    user_features = build_input_features(user_dnn_feature_columns)
    user_inputs_list = list(user_features.values())
    user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns(
        user_features, user_dnn_feature_columns, l2_reg_embedding, init_std,
        seed)
    user_dnn_input = combined_dnn_input(user_sparse_embedding_list,
                                        user_dense_value_list)

    item_features = build_input_features(item_dnn_feature_columns)
    item_inputs_list = list(item_features.values())
    item_sparse_embedding_list, item_dense_value_list = input_from_feature_columns(
        item_features, item_dnn_feature_columns, l2_reg_embedding, init_std,
        seed)
    item_dnn_input = combined_dnn_input(item_sparse_embedding_list,
                                        item_dense_value_list)

    user_dnn_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed,
                       name="user_embedding")(user_dnn_input)

    item_dnn_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed,
                       name="item_embedding")(item_dnn_input)

    score = Cosine_Similarity(user_dnn_out, item_dnn_out, gamma=gamma)

    output = PredictionLayer(task, False)(score)

    model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)

    return model
Esempio n. 18
0
def xDeepFM_MTL(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(256, 256,),
                cin_split_half=True,
                task_net_size=(128,), l2_reg_linear=0.00001, l2_reg_embedding=0.00001,
                seed=1024, ):
    """

    :param feature_dim_dict: 特征词典,包括特征名和特征列表
    :param embedding_size:
    :param hidden_size:
    :param cin_layer_size:
    :param cin_split_half:
    :param task_net_size: 网络层数
    :param l2_reg_linear:
    :param l2_reg_embedding:
    :param seed:
    :return:
    """
    # 判断sparse 和dense feature结构是否正确
    check_feature_config_dict(feature_dim_dict)
    if len(task_net_size) < 1:
        raise ValueError('task_net_size must be at least one layer')

    # Todo, add text sequence embedding
    deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(
        feature_dim_dict, embedding_size, l2_reg_embedding, l2_reg_linear, 0.0001, seed)

    # video_input = tf.keras.layers.Input((128,))
    # inputs_list.append(video_input)

    # TODO, add other feature
    if 'txt' in feature_dim_dict:
        # txt_input = OrderedDict()
        for i, feat in enumerate(feature_dim_dict["txt"]):
            txt_input = tf.keras.layers.Input(
                shape=(feat.dimension,), name='txt_' + str(i) + '-' + feat.name)
            inputs_list.append(txt_input)

    fm_input = concat_fun(deep_emb_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, 'relu',
                       cin_split_half, seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(1, activation=None, )(exFM_out)

    deep_input = tf.keras.layers.Flatten()(fm_input)
    deep_out = MLP(hidden_size)(deep_input)

    finish_out = MLP(task_net_size)(deep_out)
    finish_logit = tf.keras.layers.Dense(
        1, use_bias=False, activation=None)(finish_out)

    like_out = MLP(task_net_size)(deep_out)
    like_logit = tf.keras.layers.Dense(
        1, use_bias=False, activation=None)(like_out)

    finish_logit = tf.keras.layers.add(
        [linear_logit, finish_logit, exFM_logit])
    like_logit = tf.keras.layers.add(
        [linear_logit, like_logit, exFM_logit])

    output_finish = PredictionLayer('sigmoid', name='finish')(finish_logit)
    output_like = PredictionLayer('sigmoid', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=[
                                  output_finish, output_like])
    return model
Esempio n. 19
0
def DeepAutoInt(
    linear_feature_columns,
    dnn_feature_columns,
    att_layer_num=3,
    att_embedding_size=8,
    att_head_num=2,
    att_res=True,
    dnn_hidden_units=(256, 256),
    dnn_activation='relu',
    l2_reg_linear=1e-5,
    l2_reg_embedding=1e-5,
    l2_reg_dnn=0,
    dnn_use_bn=False,
    dnn_dropout=0,
    seed=1024,
    fm_group=[DEFAULT_GROUP_NAME],
    task='binary',
):
    """Instantiates the AutoInt Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param att_layer_num: int.The InteractingLayer number to be used.
    :param att_embedding_size: int.The embedding size in multi-head self-attention network.
    :param att_head_num: int.The head number in multi-head  self-attention network.
    :param att_res: bool.Whether or not use standard residual connections before output.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param dnn_activation: Activation function to use in DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param dnn_use_bn:  bool. Whether use BatchNormalization before activation or not in DNN
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    if len(dnn_hidden_units) <= 0 and att_layer_num <= 0:
        raise ValueError("Either hidden_layer or att_layer_num must > 0")

    features = build_input_features(dnn_feature_columns)
    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)
    sparse_embedding_list = list(
        chain.from_iterable(group_embedding_dict.values()))

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    # sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
    #                                                                      l2_reg_embedding, seed)
    att_input = concat_func(sparse_embedding_list, axis=1)

    for _ in range(att_layer_num):
        att_input = InteractingLayer(att_embedding_size, att_head_num,
                                     att_res)(att_input)
    att_output = tf.keras.layers.Flatten()(att_input)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    if len(dnn_hidden_units
           ) > 0 and att_layer_num > 0:  # Deep & Interacting Layer
        deep_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed=seed)(dnn_input)
        stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                stack_out)
    elif len(dnn_hidden_units) > 0:  # Only Deep
        deep_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed=seed)(dnn_input)
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                deep_out)
    elif att_layer_num > 0:  # Only Interacting Layer
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                att_output)
    else:  # Error
        raise NotImplementedError
    # final_logit = tf.keras.layers.Dense(
    #     1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(att_output)
    final_logit = add_func([linear_logit, fm_logit, final_logit])

    output = PredictionLayer(task)(final_logit)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)

    return model