コード例 #1
0
def DeepFM2(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128),
            l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, seed=1024, dnn_dropout=0,
            dnn_activation='relu', dnn_use_bn=False, task='binary', use_attention=True, attention_factor=8,
            l2_reg_att=1e-5, afm_dropout=0):
    """Instantiates the DeepFM Network architecture.

    :param afm_dropout:
    :param l2_reg_att:
    :param attention_factor:
    :param use_attention:
    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param fm_group: list, group_name of features that will be used to do feature interactions.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(
        linear_feature_columns + dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding,
                                                                        seed, support_group=True)

    if use_attention:
        fm_logit = add_func([AFMLayer(attention_factor, l2_reg_att, afm_dropout,
                                      seed)(list(v)) for k, v in group_embedding_dict.items() if k in fm_group])
    else:
        fm_logit = add_func([FM()(concat_func(v, axis=1))
                             for k, v in group_embedding_dict.items() if k in fm_group])
    # fm_logit = add_func([FM()(concat_func(v, axis=1))
    #                      for k, v in group_embedding_dict.items() if k in fm_group])

    dnn_input = combined_dnn_input(list(chain.from_iterable(
        group_embedding_dict.values())), dense_value_list)
    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input)
    dnn_logit = tf.keras.layers.Dense(
        1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed=seed))(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])

    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
コード例 #2
0
def MMOE(dnn_feature_columns, num_tasks, tasks, num_experts=4, expert_dim=8, dnn_hidden_units=(128, 128),
         l2_reg_embedding=1e-5, l2_reg_dnn=0, task_dnn_units=None, seed=1024, dnn_dropout=0, dnn_activation='relu'):
    """Instantiates the Multi-gate Mixture-of-Experts architecture.

    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param num_tasks: integer, number of tasks, equal to number of outputs, must be greater than 1.
    :param tasks: list of str, indicating the loss of each tasks, ``"binary"`` for  binary logloss, ``"regression"`` for regression loss. e.g. ['binary', 'regression']
    :param num_experts: integer, number of experts.
    :param expert_dim: integer, the hidden units of each expert.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of shared-bottom DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param task_dnn_units: list,list of positive integer or empty list, the layer number and units in each layer of task-specific DNN
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN

    :return: a Keras model instance
    """
    if num_tasks <= 1:
        raise ValueError("num_tasks must be greater than 1")
    if len(tasks) != num_tasks:
        raise ValueError("num_tasks must be equal to the length of tasks")
    for task in tasks:
        if task not in ['binary', 'regression']:
            raise ValueError("task must be binary or regression, {} is illegal".format(task))

    features = build_input_features(dnn_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
                                                                         l2_reg_embedding, seed)
    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                  False, seed=seed)(dnn_input)
    mmoe_outs = MMOELayer(num_tasks, num_experts, expert_dim)(dnn_out)
    if task_dnn_units != None:
        mmoe_outs = [DNN(task_dnn_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed)(mmoe_out) for mmoe_out in
                     mmoe_outs]

    task_outputs = []
    for mmoe_out, task in zip(mmoe_outs, tasks):
        logit = tf.keras.layers.Dense(
            1, use_bias=False, activation=None)(mmoe_out)
        output = PredictionLayer(task)(logit)
        task_outputs.append(output)

    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=task_outputs)
    return model
コード例 #3
0
ファイル: deepfm_trans.py プロジェクト: yrqUni/DiGIX-2020
def M(emb1,
      emb1_label,
      emb2,
      emb2_label,
      emb3,
      emb3_label,
      emb4,
      emb4_label,
      emb5,
      emb5_label,
      linear_feature_columns,
      dnn_feature_columns,
      fm_group=[DEFAULT_GROUP_NAME],
      dnn_hidden_units=(128, 128),
      l2_reg_linear=0.00001,
      l2_reg_embedding=0.00001,
      l2_reg_dnn=0,
      seed=1024,
      dnn_dropout=0,
      dnn_activation='relu',
      dnn_use_bn=False,
      task='binary'):

    #!################################################################################################################

    feed_forward_size_trans_1 = 2048
    max_seq_len_trans_1 = 40
    model_dim_trans_1 = 128

    input_trans_1 = Input(shape=(max_seq_len_trans_1, ),
                          name='input_trans_1_layer')
    input_trans_1_label = Input(shape=(max_seq_len_trans_1, ),
                                name='input_trans_1_label_layer')

    x = Embedding(input_dim=5307 + 1,
                  output_dim=128,
                  weights=[emb1],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_1)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb1_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_1_label)

    encodings = PositionEncoding(model_dim_trans_1)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_1, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_1, feed_forward_size_trans_1)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_1 = Dense(5,
                           activation='softmax',
                           name='output_trans_1_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_2 = 2048
    max_seq_len_trans_2 = 40
    model_dim_trans_2 = 128

    input_trans_2 = Input(shape=(max_seq_len_trans_2, ),
                          name='input_trans_2_layer')
    input_trans_2_label = Input(shape=(max_seq_len_trans_2, ),
                                name='input_trans_2_label_layer')

    x = Embedding(input_dim=101 + 1,
                  output_dim=128,
                  weights=[emb2],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_2)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb2_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_2_label)

    encodings = PositionEncoding(model_dim_trans_2)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_2, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_2, feed_forward_size_trans_2)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_2 = Dense(5,
                           activation='softmax',
                           name='output_trans_2_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_3 = 2048
    max_seq_len_trans_3 = 40
    model_dim_trans_3 = 128

    input_trans_3 = Input(shape=(max_seq_len_trans_3, ),
                          name='input_trans_3_layer')
    input_trans_3_label = Input(shape=(max_seq_len_trans_3, ),
                                name='input_trans_3_label_layer')

    x = Embedding(input_dim=8 + 1,
                  output_dim=128,
                  weights=[emb3],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_3)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb3_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_3_label)

    encodings = PositionEncoding(model_dim_trans_3)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_3, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_3, feed_forward_size_trans_3)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_3 = Dense(5,
                           activation='softmax',
                           name='output_trans_3_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_4 = 2048
    max_seq_len_trans_4 = 40
    model_dim_trans_4 = 128

    input_trans_4 = Input(shape=(max_seq_len_trans_4, ),
                          name='input_trans_4_layer')
    input_trans_4_label = Input(shape=(max_seq_len_trans_4, ),
                                name='input_trans_4_label_layer')

    x = Embedding(input_dim=38 + 1,
                  output_dim=128,
                  weights=[emb4],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_4)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb4_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_4_label)

    encodings = PositionEncoding(model_dim_trans_4)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_4, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_4, feed_forward_size_trans_4)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_4 = Dense(5,
                           activation='softmax',
                           name='output_trans_4_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_5 = 2048
    max_seq_len_trans_5 = 40
    model_dim_trans_5 = 128

    input_trans_5 = Input(shape=(max_seq_len_trans_5, ),
                          name='input_trans_5_layer')
    input_trans_5_label = Input(shape=(max_seq_len_trans_5, ),
                                name='input_trans_5_label_layer')

    x = Embedding(input_dim=4317 + 1,
                  output_dim=128,
                  weights=[emb5],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_5)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb5_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_5_label)

    encodings = PositionEncoding(model_dim_trans_5)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_5, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_5, feed_forward_size_trans_5)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_5 = Dense(5,
                           activation='softmax',
                           name='output_trans_5_layer')(encodings)

    #!################################################################################################################

    trans_output = concatenate([output_trans_1, output_trans_2], axis=-1)
    trans_output = concatenate([trans_output, output_trans_3], axis=-1)
    trans_output = concatenate([trans_output, output_trans_4], axis=-1)
    trans_output = concatenate([trans_output, output_trans_5], axis=-1)
    # trans_output = Dense(2, activation='softmax', name='output_trans')(trans_output)

    #!################################################################################################################
    #!mix2
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)

    mix = concatenate([trans_output, dnn_input], axis=-1)  #!#mix

    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(mix)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    #!################################################################################################################

    model = Model(inputs=[
        input_trans_1, input_trans_1_label, input_trans_2, input_trans_2_label,
        input_trans_3, input_trans_3_label, input_trans_4, input_trans_4_label,
        input_trans_5, input_trans_5_label, features
    ],
                  outputs=[output])

    model.compile(optimizer=optimizers.Adam(2.5e-4),
                  loss={'prediction_layer': losses.binary_crossentropy},
                  metrics=['AUC'])
    return model
コード例 #4
0
ファイル: reconstruct_sota.py プロジェクト: yrqUni/DiGIX-2020
def create_model(linear_feature_columns,
                 dnn_feature_columns,
                 fm_group=[DEFAULT_GROUP_NAME],
                 dnn_hidden_units=(128, 128),
                 l2_reg_linear=0.00001,
                 l2_reg_embedding=0.00001,
                 l2_reg_dnn=0,
                 seed=1024,
                 dnn_dropout=0,
                 dnn_activation='relu',
                 dnn_use_bn=False,
                 task='binary'):

    K.clear_session()
    #!################################################################################################################
    inputs_all = [
        get_input_feature_layer(name='slotid_nettype',
                                feature_shape=dense_feature_size)
    ]
    # slotid_nettype
    layer_slotid_nettype = inputs_all[0]
    layer_slotid_nettype = K.expand_dims(layer_slotid_nettype, 1)
    #!################################################################################################################
    # seq_inputs_dict = get_cross_seq_input_layers(cols=cross_arr_name_list)
    # inputs_all = inputs_all + list(seq_inputs_dict.values())  # 输入层list 做交叉

    # cross_emb_out = []
    # last_col = ''
    # for index, col in enumerate(cross_arr_name_list):
    #     #         print(col, 'get embedding!')
    #     emb_layer = get_emb_layer(
    #         col, trainable=False, emb_matrix=dict_cross_emb_all[col])
    #     x = emb_layer(inputs_all[1+index])
    #     if col.split('_')[-1] == 'i':
    #         cross_user_item_i = x
    #         last_col = col
    #         continue
    #     else:
    #         print(f'crossing net add {last_col} and {col}')
    #         cross_emb_out.append(
    #             cross_net(cross_user_item_i, x, layer_slotid_nettype, hidden_unit=4))
    # cross_emb_out = tf.keras.layers.concatenate(cross_emb_out)
    # cross_emb_out = tf.squeeze(cross_emb_out, [1])
    #!################################################################################################################
    # seq_inputs_dict = get_seq_input_layers(cols=arr_name_list)
    # inputs_all = inputs_all+list(seq_inputs_dict.values())  # 输入层list
    # masks = tf.equal(seq_inputs_dict['task_id'], 0)
    # # 普通序列+label序列
    # layers2concat = []
    # for index, col in enumerate(arr_name_list):
    #     print(col, 'get embedding!')
    #     emb_layer = get_emb_layer(
    #         col, trainable=TRAINABLE_DICT[col], emb_matrix=id_list_dict_emb_all[col][1])
    #     x = emb_layer(seq_inputs_dict[col])
    #     if conv1d_info_dict[col] > -1:
    #         cov_layer = tf.keras.layers.Conv1D(filters=conv1d_info_dict[col],
    #                                            kernel_size=1,
    #                                            activation='relu')
    #         x = cov_layer(x)
    #     layers2concat.append(x)
    # x = tf.keras.layers.concatenate(layers2concat)
    #!################################################################################################################
    #!mix1
    # x = trans_net(x, masks, hidden_unit=256)
    # max_pool = tf.keras.layers.GlobalMaxPooling1D()
    # average_pool = tf.keras.layers.GlobalAveragePooling1D()
    # xmaxpool = max_pool(x)
    # xmeanpool = average_pool(x)

    # trans_output = tf.keras.layers.concatenate([xmaxpool, xmeanpool])

    #!################################################################################################################
    #!mix2
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)

    # mix = concatenate([cross_emb_out, trans_output,
    #    dnn_input], axis=-1)  # !#mix
    mix = dnn_input

    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(mix)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    #!################################################################################################################

    # model = Model(inputs=inputs_all+[features],
    model = Model(inputs=inputs_list, outputs=[output])
    print(model.summary())
    return model
コード例 #5
0
def MIND(user_feature_columns,
         item_feature_columns,
         num_sampled=5,
         k_max=2,
         p=1.0,
         dynamic_k=False,
         user_dnn_hidden_units=(64, 32),
         dnn_activation='relu',
         dnn_use_bn=False,
         l2_reg_dnn=0,
         l2_reg_embedding=1e-6,
         dnn_dropout=0,
         output_activation='linear',
         seed=1024):
    """Instantiates the MIND Model architecture.

    :param user_feature_columns: An iterable containing user's features used by  the model.
    :param item_feature_columns: An iterable containing item's features used by  the model.
    :param num_sampled: int, the number of classes to randomly sample per batch.
    :param k_max: int, the max size of user interest embedding
    :param p: float,the parameter for adjusting the attention distribution in LabelAwareAttention.
    :param dynamic_k: bool, whether or not use dynamic interest number
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower
    :param dnn_activation: Activation function to use in deep net
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param l2_reg_dnn:  L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout:  float in [0,1), the probability we will drop out a given DNN coordinate.
    :param seed: integer ,to use as random seed.
    :param output_activation: Activation function to use in output layer
    :return: A Keras model instance.

    """

    if len(item_feature_columns) > 1:
        raise ValueError("Now MIND only support 1 item feature like item_id")
    item_feature_column = item_feature_columns[0]
    item_feature_name = item_feature_column.name
    item_vocabulary_size = item_feature_columns[0].vocabulary_size
    item_embedding_dim = item_feature_columns[0].embedding_dim
    # item_index = Input(tensor=tf.constant([list(range(item_vocabulary_size))]))

    history_feature_list = [item_feature_name]

    features = build_input_features(user_feature_columns)
    sparse_feature_columns = list(
        filter(lambda x: isinstance(x, SparseFeat),
               user_feature_columns)) if user_feature_columns else []
    dense_feature_columns = list(
        filter(lambda x: isinstance(x, DenseFeat),
               user_feature_columns)) if user_feature_columns else []
    varlen_sparse_feature_columns = list(
        filter(lambda x: isinstance(x, VarLenSparseFeat),
               user_feature_columns)) if user_feature_columns else []
    history_feature_columns = []
    sparse_varlen_feature_columns = []
    history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list))
    for fc in varlen_sparse_feature_columns:
        feature_name = fc.name
        if feature_name in history_fc_names:
            history_feature_columns.append(fc)
        else:
            sparse_varlen_feature_columns.append(fc)
    seq_max_len = history_feature_columns[0].maxlen
    inputs_list = list(features.values())

    embedding_matrix_dict = create_embedding_matrix(user_feature_columns +
                                                    item_feature_columns,
                                                    l2_reg_embedding,
                                                    seed=seed,
                                                    prefix="")

    item_features = build_input_features(item_feature_columns)

    query_emb_list = embedding_lookup(embedding_matrix_dict,
                                      item_features,
                                      item_feature_columns,
                                      history_feature_list,
                                      history_feature_list,
                                      to_list=True)
    keys_emb_list = embedding_lookup(embedding_matrix_dict,
                                     features,
                                     history_feature_columns,
                                     history_fc_names,
                                     history_fc_names,
                                     to_list=True)
    dnn_input_emb_list = embedding_lookup(embedding_matrix_dict,
                                          features,
                                          sparse_feature_columns,
                                          mask_feat_list=history_feature_list,
                                          to_list=True)
    dense_value_list = get_dense_input(features, dense_feature_columns)

    sequence_embed_dict = varlen_embedding_lookup(
        embedding_matrix_dict, features, sparse_varlen_feature_columns)
    sequence_embed_list = get_varlen_pooling_list(
        sequence_embed_dict,
        features,
        sparse_varlen_feature_columns,
        to_list=True)

    dnn_input_emb_list += sequence_embed_list

    # keys_emb = concat_func(keys_emb_list, mask=True)
    # query_emb = concat_func(query_emb_list, mask=True)

    history_emb = PoolingLayer()(NoMask()(keys_emb_list))
    target_emb = PoolingLayer()(NoMask()(query_emb_list))

    # target_emb_size = target_emb.get_shape()[-1].value
    # max_len = history_emb.get_shape()[1].value
    hist_len = features['hist_len']

    high_capsule = CapsuleLayer(input_units=item_embedding_dim,
                                out_units=item_embedding_dim,
                                max_len=seq_max_len,
                                k_max=k_max)((history_emb, hist_len))

    if len(dnn_input_emb_list) > 0 or len(dense_value_list) > 0:
        user_other_feature = combined_dnn_input(dnn_input_emb_list,
                                                dense_value_list)

        other_feature_tile = tf.keras.layers.Lambda(
            tile_user_otherfeat, arguments={'k_max':
                                            k_max})(user_other_feature)

        user_deep_input = Concatenate()(
            [NoMask()(other_feature_tile), high_capsule])
    else:
        user_deep_input = high_capsule

    user_embeddings = DNN(user_dnn_hidden_units,
                          dnn_activation,
                          l2_reg_dnn,
                          dnn_dropout,
                          dnn_use_bn,
                          output_activation=output_activation,
                          seed=seed,
                          name="user_embedding")(user_deep_input)
    item_inputs_list = list(item_features.values())

    item_embedding_matrix = embedding_matrix_dict[item_feature_name]

    item_index = EmbeddingIndex(list(range(item_vocabulary_size)))(
        item_features[item_feature_name])

    item_embedding_weight = NoMask()(item_embedding_matrix(item_index))

    pooling_item_embedding_weight = PoolingLayer()([item_embedding_weight])

    if dynamic_k:
        user_embedding_final = LabelAwareAttention(
            k_max=k_max,
            pow_p=p,
        )((user_embeddings, target_emb, hist_len))
    else:
        user_embedding_final = LabelAwareAttention(
            k_max=k_max,
            pow_p=p,
        )((user_embeddings, target_emb))

    output = SampledSoftmaxLayer(num_sampled=num_sampled)([
        pooling_item_embedding_weight, user_embedding_final,
        item_features[item_feature_name]
    ])
    model = Model(inputs=inputs_list + item_inputs_list, outputs=output)

    model.__setattr__("user_input", inputs_list)
    model.__setattr__("user_embedding", user_embeddings)

    model.__setattr__("item_input", item_inputs_list)
    model.__setattr__(
        "item_embedding",
        get_item_embedding(pooling_item_embedding_weight,
                           item_features[item_feature_name]))

    return model
コード例 #6
0
    def _model_fn(features, labels, mode, config):
        train_flag = (mode == tf.estimator.ModeKeys.TRAIN)
        with variable_scope(DNN_SCOPE_NAME):
            sparse_feature_columns = []
            dense_feature_columns = []
            varlen_sparse_feature_columns = []

            for feat in dnn_feature_columns:

                new_feat_name = list(feat.parse_example_spec.keys())[0]
                if new_feat_name in ['hist_price_id', 'hist_des_id']:
                    varlen_sparse_feature_columns.append(
                        VarLenSparseFeat(SparseFeat(new_feat_name,
                                                    vocabulary_size=100,
                                                    embedding_dim=32,
                                                    use_hash=False),
                                         maxlen=3))
                elif is_embedding(feat):
                    sparse_feature_columns.append(
                        SparseFeat(new_feat_name,
                                   vocabulary_size=feat[0]._num_buckets + 1,
                                   embedding_dim=feat.dimension))
                else:
                    dense_feature_columns.append(DenseFeat(new_feat_name))

            history_feature_columns = []
            sparse_varlen_feature_columns = []
            history_fc_names = list(
                map(lambda x: "hist_" + x, history_feature_list))
            for fc in varlen_sparse_feature_columns:
                feature_name = fc.name
                if feature_name in history_fc_names:
                    history_feature_columns.append(fc)
                else:
                    sparse_varlen_feature_columns.append(fc)
            my_feature_columns = sparse_feature_columns + dense_feature_columns + varlen_sparse_feature_columns
            embedding_dict = create_embedding_matrix(my_feature_columns,
                                                     l2_reg_embedding,
                                                     seed,
                                                     prefix="")

            query_emb_list = embedding_lookup(embedding_dict,
                                              features,
                                              sparse_feature_columns,
                                              history_feature_list,
                                              history_feature_list,
                                              to_list=True)
            print('query_emb_list', query_emb_list)
            print('embedding_dict', embedding_dict)
            print('haha')
            print('history_feature_columns', history_feature_columns)
            print('haha')
            keys_emb_list = embedding_lookup(embedding_dict,
                                             features,
                                             history_feature_columns,
                                             history_fc_names,
                                             history_fc_names,
                                             to_list=True)
            print('keys_emb_list', keys_emb_list)
            dnn_input_emb_list = embedding_lookup(
                embedding_dict,
                features,
                sparse_feature_columns,
                mask_feat_list=history_feature_list,
                to_list=True)
            print('dnn_input_emb_list', dnn_input_emb_list)
            dense_value_list = get_dense_input(features, dense_feature_columns)
            sequence_embed_dict = varlen_embedding_lookup(
                embedding_dict, features, sparse_varlen_feature_columns)
            sequence_embed_list = get_varlen_pooling_list(
                sequence_embed_dict,
                features,
                sparse_varlen_feature_columns,
                to_list=True)

            dnn_input_emb_list += sequence_embed_list

            keys_emb = concat_func(keys_emb_list, mask=True)
            deep_input_emb = concat_func(dnn_input_emb_list)
            query_emb = concat_func(query_emb_list, mask=True)
            hist = AttentionSequencePoolingLayer(
                att_hidden_size,
                att_activation,
                weight_normalization=att_weight_normalization,
                supports_masking=True)([query_emb, keys_emb])

            deep_input_emb = tf.keras.layers.Concatenate()(
                [NoMask()(deep_input_emb), hist])
            deep_input_emb = tf.keras.layers.Flatten()(deep_input_emb)
            dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
            output = DNN(dnn_hidden_units,
                         dnn_activation,
                         l2_reg_dnn,
                         dnn_dropout,
                         dnn_use_bn,
                         seed=seed)(dnn_input)
            final_logit = tf.keras.layers.Dense(
                1,
                use_bias=False,
                kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                    output)
        #             logits_list.append(final_logit)
        #         logits = add_func(logits_list)
        #             print(labels)
        #             tf.summary.histogram(final_logit + '/final_logit', final_logit)
        return deepctr_model_fn(features,
                                mode,
                                final_logit,
                                labels,
                                task,
                                linear_optimizer,
                                dnn_optimizer,
                                training_chief_hooks=training_chief_hooks)
コード例 #7
0
ファイル: youtubednn.py プロジェクト: zlin-zou/DeepMatch
def YoutubeDNN(user_feature_columns, item_feature_columns, num_sampled=5,
               user_dnn_hidden_units=(64, 32),
               dnn_activation='relu', dnn_use_bn=False,
               l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, output_activation='linear', seed=1024, ):
    """Instantiates the YoutubeDNN Model architecture.

    :param user_feature_columns: An iterable containing user's features used by  the model.
    :param item_feature_columns: An iterable containing item's features used by  the model.
    :param num_sampled: int, the number of classes to randomly sample per batch.
    :param user_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of user tower
    :param dnn_activation: Activation function to use in deep net
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param seed: integer ,to use as random seed.
    :param output_activation: Activation function to use in output layer
    :return: A Keras model instance.

    """

    if len(item_feature_columns) > 1:
        raise ValueError("Now YoutubeNN only support 1 item feature like item_id")
    item_feature_name = item_feature_columns[0].name
    item_vocabulary_size = item_feature_columns[0].vocabulary_size

    embedding_matrix_dict = create_embedding_matrix(user_feature_columns + item_feature_columns, l2_reg_embedding,
                                                    seed=seed)

    user_features = build_input_features(user_feature_columns)
    user_inputs_list = list(user_features.values())
    user_sparse_embedding_list, user_dense_value_list = input_from_feature_columns(user_features, user_feature_columns,
                                                                                   l2_reg_embedding, seed=seed,
                                                                                   embedding_matrix_dict=embedding_matrix_dict)
    user_dnn_input = combined_dnn_input(user_sparse_embedding_list, user_dense_value_list)

    item_features = build_input_features(item_feature_columns)
    item_inputs_list = list(item_features.values())
    user_dnn_out = DNN(user_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                       dnn_use_bn, output_activation=output_activation, seed=seed)(user_dnn_input)

    item_index = EmbeddingIndex(list(range(item_vocabulary_size)))(item_features[item_feature_name])

    item_embedding_matrix = embedding_matrix_dict[
        item_feature_name]
    item_embedding_weight = NoMask()(item_embedding_matrix(item_index))

    pooling_item_embedding_weight = PoolingLayer()([item_embedding_weight])

    output = SampledSoftmaxLayer(num_sampled=num_sampled)(
        [pooling_item_embedding_weight, user_dnn_out, item_features[item_feature_name]])
    model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)

    model.__setattr__("user_input", user_inputs_list)
    model.__setattr__("user_embedding", user_dnn_out)

    model.__setattr__("item_input", item_inputs_list)
    model.__setattr__("item_embedding",
                      get_item_embedding(pooling_item_embedding_weight, item_features[item_feature_name]))

    return model
コード例 #8
0
def create_model(linear_feature_columns,
                 dnn_feature_columns,
                 fm_group=[DEFAULT_GROUP_NAME],
                 dnn_hidden_units=(128, 128),
                 l2_reg_linear=0.00001,
                 l2_reg_embedding=0.00001,
                 l2_reg_dnn=0,
                 seed=1024,
                 dnn_dropout=0,
                 dnn_activation='relu',
                 dnn_use_bn=False,
                 task='binary'):

    K.clear_session()
    #!################################################################################################################
    inputs_all = [
        #         get_input_feature_layer(name = 'user_0',feature_shape = dense_feature_size),
        #                  get_input_feature_layer(name = 'item_0',feature_shape = dense_feature_size),
        get_input_feature_layer(name='user_1',
                                feature_shape=dense_feature_size),
        get_input_feature_layer(name='item_1',
                                feature_shape=dense_feature_size)
    ]
    # slotid_nettype
    #     layer_user_0 = inputs_all[0]
    #     layer_user_0 = K.expand_dims(layer_user_0, 1)
    #     layer_item_0 = inputs_all[1]
    #     layer_item_0 = K.expand_dims(layer_item_0, 1)
    layer_user_1 = inputs_all[0]
    layer_user_1 = K.expand_dims(layer_user_1, 1)
    layer_item_1 = inputs_all[1]
    layer_item_1 = K.expand_dims(layer_item_1, 1)
    #     cross_emb_out0 = cross_net(layer_user_0,layer_item_0)
    cross_emb_out1 = cross_net(layer_user_1, layer_item_1)
    #     cross_emb_out = tf.keras.layers.concatenate([cross_emb_out0,cross_emb_out1])
    cross_emb_out = tf.squeeze(cross_emb_out1, [1])
    #!################################################################################################################
    seq_inputs_dict = get_seq_input_layers(cols=arr_name_list)
    inputs_all = inputs_all + list(seq_inputs_dict.values())  # 输入层list
    masks = tf.equal(seq_inputs_dict['task_id'], 0)
    # 普通序列+label序列
    layers2concat = []
    for index, col in enumerate(arr_name_list):
        print(col, 'get embedding!')
        emb_layer = get_emb_layer(col,
                                  trainable=TRAINABLE_DICT[col],
                                  emb_matrix=id_list_dict_emb_all[col][1])
        x = emb_layer(seq_inputs_dict[col])
        if conv1d_info_dict[col] > -1:
            cov_layer = tf.keras.layers.Conv1D(filters=conv1d_info_dict[col],
                                               kernel_size=1,
                                               activation='relu')
            x = cov_layer(x)
        layers2concat.append(x)
    x = keras.layers.concatenate(layers2concat)
    #!################################################################################################################
    #!mix1
    x = trans_net(x, masks, hidden_unit=256)
    max_pool = tf.keras.layers.GlobalMaxPooling1D()
    average_pool = tf.keras.layers.GlobalAveragePooling1D()
    xmaxpool = max_pool(x)
    xmeanpool = average_pool(x)

    trans_output = tf.keras.layers.concatenate([xmaxpool, xmeanpool])

    #!################################################################################################################
    #!mix2
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)

    mix = concatenate([cross_emb_out, trans_output, dnn_input],
                      axis=-1)  # !#mix

    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(mix)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    #!################################################################################################################

    model = Model(inputs=inputs_all + [features], outputs=[output])
    print(model.summary())
    return model
コード例 #9
0
def xDeepFM_MTL(linear_feature_columns,
                dnn_feature_columns,
                dnn_hidden_units=(256, 256),
                task_net_size=(128, ),
                cin_layer_size=(
                    128,
                    128,
                ),
                cin_split_half=True,
                cin_activation='relu',
                l2_reg_linear=0.00001,
                l2_reg_embedding=0.00001,
                l2_reg_dnn=0,
                l2_reg_cin=0,
                seed=1024,
                dnn_dropout=0,
                dnn_activation='relu',
                dnn_use_bn=False,
                task='binary'):
    """Instantiates the xDeepFM architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param cin_layer_size: list,list of positive integer or empty list, the feature maps  in each hidden layer of Compressed Interaction Network
    :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
    :param cin_activation: activation function used on feature maps
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: L2 regularizer strength applied to deep net
    :param l2_reg_cin: L2 regularizer strength applied to CIN.
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, l2_reg_embedding, seed)

    fm_input = concat_func(sparse_embedding_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half,
                       l2_reg_cin, seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                exFM_out)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_output = DNN(dnn_hidden_units,
                     dnn_activation,
                     l2_reg_dnn,
                     dnn_dropout,
                     dnn_use_bn,
                     seed=seed)(dnn_input)

    finish_out = DNN(task_net_size)(dnn_output)
    finish_logit = tf.keras.layers.Dense(1, use_bias=False,
                                         activation=None)(finish_out)

    like_out = DNN(task_net_size)(dnn_output)
    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    finish_logit = tf.keras.layers.add(
        [linear_logit, finish_logit, exFM_logit])
    like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit])

    output_finish = PredictionLayer('binary', name='finish')(finish_logit)
    output_like = PredictionLayer('binary', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=[output_finish, output_like])
    return model
コード例 #10
0
ファイル: deepautoint.py プロジェクト: Launche/sflen
def DeepAutoInt(
    linear_feature_columns,
    dnn_feature_columns,
    att_layer_num=3,
    att_embedding_size=8,
    att_head_num=2,
    att_res=True,
    dnn_hidden_units=(256, 256),
    dnn_activation='relu',
    l2_reg_linear=1e-5,
    l2_reg_embedding=1e-5,
    l2_reg_dnn=0,
    dnn_use_bn=False,
    dnn_dropout=0,
    seed=1024,
    fm_group=[DEFAULT_GROUP_NAME],
    task='binary',
):
    """Instantiates the AutoInt Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param att_layer_num: int.The InteractingLayer number to be used.
    :param att_embedding_size: int.The embedding size in multi-head self-attention network.
    :param att_head_num: int.The head number in multi-head  self-attention network.
    :param att_res: bool.Whether or not use standard residual connections before output.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param dnn_activation: Activation function to use in DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param dnn_use_bn:  bool. Whether use BatchNormalization before activation or not in DNN
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    if len(dnn_hidden_units) <= 0 and att_layer_num <= 0:
        raise ValueError("Either hidden_layer or att_layer_num must > 0")

    features = build_input_features(dnn_feature_columns)
    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)
    sparse_embedding_list = list(
        chain.from_iterable(group_embedding_dict.values()))

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    # sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
    #                                                                      l2_reg_embedding, seed)
    att_input = concat_func(sparse_embedding_list, axis=1)

    for _ in range(att_layer_num):
        att_input = InteractingLayer(att_embedding_size, att_head_num,
                                     att_res)(att_input)
    att_output = tf.keras.layers.Flatten()(att_input)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    if len(dnn_hidden_units
           ) > 0 and att_layer_num > 0:  # Deep & Interacting Layer
        deep_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed=seed)(dnn_input)
        stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                stack_out)
    elif len(dnn_hidden_units) > 0:  # Only Deep
        deep_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed=seed)(dnn_input)
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                deep_out)
    elif att_layer_num > 0:  # Only Interacting Layer
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                att_output)
    else:  # Error
        raise NotImplementedError
    # final_logit = tf.keras.layers.Dense(
    #     1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(att_output)
    final_logit = add_func([linear_logit, fm_logit, final_logit])

    output = PredictionLayer(task)(final_logit)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)

    return model