Пример #1
0
def DeepFM2(linear_feature_columns, dnn_feature_columns, fm_group=[DEFAULT_GROUP_NAME], dnn_hidden_units=(128, 128),
            l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0, seed=1024, dnn_dropout=0,
            dnn_activation='relu', dnn_use_bn=False, task='binary', use_attention=True, attention_factor=8,
            l2_reg_att=1e-5, afm_dropout=0):
    """Instantiates the DeepFM Network architecture.

    :param afm_dropout:
    :param l2_reg_att:
    :param attention_factor:
    :param use_attention:
    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param fm_group: list, group_name of features that will be used to do feature interactions.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(
        linear_feature_columns + dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding,
                                                                        seed, support_group=True)

    if use_attention:
        fm_logit = add_func([AFMLayer(attention_factor, l2_reg_att, afm_dropout,
                                      seed)(list(v)) for k, v in group_embedding_dict.items() if k in fm_group])
    else:
        fm_logit = add_func([FM()(concat_func(v, axis=1))
                             for k, v in group_embedding_dict.items() if k in fm_group])
    # fm_logit = add_func([FM()(concat_func(v, axis=1))
    #                      for k, v in group_embedding_dict.items() if k in fm_group])

    dnn_input = combined_dnn_input(list(chain.from_iterable(
        group_embedding_dict.values())), dense_value_list)
    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input)
    dnn_logit = tf.keras.layers.Dense(
        1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed=seed))(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])

    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Пример #2
0
def MMOE(dnn_feature_columns, num_tasks, tasks, num_experts=4, expert_dim=8, dnn_hidden_units=(128, 128),
         l2_reg_embedding=1e-5, l2_reg_dnn=0, task_dnn_units=None, seed=1024, dnn_dropout=0, dnn_activation='relu'):
    """Instantiates the Multi-gate Mixture-of-Experts architecture.

    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param num_tasks: integer, number of tasks, equal to number of outputs, must be greater than 1.
    :param tasks: list of str, indicating the loss of each tasks, ``"binary"`` for  binary logloss, ``"regression"`` for regression loss. e.g. ['binary', 'regression']
    :param num_experts: integer, number of experts.
    :param expert_dim: integer, the hidden units of each expert.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of shared-bottom DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param task_dnn_units: list,list of positive integer or empty list, the layer number and units in each layer of task-specific DNN
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN

    :return: a Keras model instance
    """
    if num_tasks <= 1:
        raise ValueError("num_tasks must be greater than 1")
    if len(tasks) != num_tasks:
        raise ValueError("num_tasks must be equal to the length of tasks")
    for task in tasks:
        if task not in ['binary', 'regression']:
            raise ValueError("task must be binary or regression, {} is illegal".format(task))

    features = build_input_features(dnn_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
                                                                         l2_reg_embedding, seed)
    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                  False, seed=seed)(dnn_input)
    mmoe_outs = MMOELayer(num_tasks, num_experts, expert_dim)(dnn_out)
    if task_dnn_units != None:
        mmoe_outs = [DNN(task_dnn_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed)(mmoe_out) for mmoe_out in
                     mmoe_outs]

    task_outputs = []
    for mmoe_out, task in zip(mmoe_outs, tasks):
        logit = tf.keras.layers.Dense(
            1, use_bias=False, activation=None)(mmoe_out)
        output = PredictionLayer(task)(logit)
        task_outputs.append(output)

    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=task_outputs)
    return model
Пример #3
0
def M(emb1,
      emb1_label,
      emb2,
      emb2_label,
      emb3,
      emb3_label,
      emb4,
      emb4_label,
      emb5,
      emb5_label,
      linear_feature_columns,
      dnn_feature_columns,
      fm_group=[DEFAULT_GROUP_NAME],
      dnn_hidden_units=(128, 128),
      l2_reg_linear=0.00001,
      l2_reg_embedding=0.00001,
      l2_reg_dnn=0,
      seed=1024,
      dnn_dropout=0,
      dnn_activation='relu',
      dnn_use_bn=False,
      task='binary'):

    #!################################################################################################################

    feed_forward_size_trans_1 = 2048
    max_seq_len_trans_1 = 40
    model_dim_trans_1 = 128

    input_trans_1 = Input(shape=(max_seq_len_trans_1, ),
                          name='input_trans_1_layer')
    input_trans_1_label = Input(shape=(max_seq_len_trans_1, ),
                                name='input_trans_1_label_layer')

    x = Embedding(input_dim=5307 + 1,
                  output_dim=128,
                  weights=[emb1],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_1)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb1_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_1_label)

    encodings = PositionEncoding(model_dim_trans_1)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_1, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_1, feed_forward_size_trans_1)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_1 = Dense(5,
                           activation='softmax',
                           name='output_trans_1_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_2 = 2048
    max_seq_len_trans_2 = 40
    model_dim_trans_2 = 128

    input_trans_2 = Input(shape=(max_seq_len_trans_2, ),
                          name='input_trans_2_layer')
    input_trans_2_label = Input(shape=(max_seq_len_trans_2, ),
                                name='input_trans_2_label_layer')

    x = Embedding(input_dim=101 + 1,
                  output_dim=128,
                  weights=[emb2],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_2)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb2_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_2_label)

    encodings = PositionEncoding(model_dim_trans_2)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_2, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_2, feed_forward_size_trans_2)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_2 = Dense(5,
                           activation='softmax',
                           name='output_trans_2_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_3 = 2048
    max_seq_len_trans_3 = 40
    model_dim_trans_3 = 128

    input_trans_3 = Input(shape=(max_seq_len_trans_3, ),
                          name='input_trans_3_layer')
    input_trans_3_label = Input(shape=(max_seq_len_trans_3, ),
                                name='input_trans_3_label_layer')

    x = Embedding(input_dim=8 + 1,
                  output_dim=128,
                  weights=[emb3],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_3)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb3_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_3_label)

    encodings = PositionEncoding(model_dim_trans_3)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_3, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_3, feed_forward_size_trans_3)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_3 = Dense(5,
                           activation='softmax',
                           name='output_trans_3_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_4 = 2048
    max_seq_len_trans_4 = 40
    model_dim_trans_4 = 128

    input_trans_4 = Input(shape=(max_seq_len_trans_4, ),
                          name='input_trans_4_layer')
    input_trans_4_label = Input(shape=(max_seq_len_trans_4, ),
                                name='input_trans_4_label_layer')

    x = Embedding(input_dim=38 + 1,
                  output_dim=128,
                  weights=[emb4],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_4)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb4_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_4_label)

    encodings = PositionEncoding(model_dim_trans_4)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_4, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_4, feed_forward_size_trans_4)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_4 = Dense(5,
                           activation='softmax',
                           name='output_trans_4_layer')(encodings)

    #!################################################################################################################

    feed_forward_size_trans_5 = 2048
    max_seq_len_trans_5 = 40
    model_dim_trans_5 = 128

    input_trans_5 = Input(shape=(max_seq_len_trans_5, ),
                          name='input_trans_5_layer')
    input_trans_5_label = Input(shape=(max_seq_len_trans_5, ),
                                name='input_trans_5_label_layer')

    x = Embedding(input_dim=4317 + 1,
                  output_dim=128,
                  weights=[emb5],
                  trainable=False,
                  input_length=40,
                  mask_zero=True)(input_trans_5)

    x_label = Embedding(input_dim=2 + 1,
                        output_dim=128,
                        weights=[emb5_label],
                        trainable=False,
                        input_length=40,
                        mask_zero=True)(input_trans_5_label)

    encodings = PositionEncoding(model_dim_trans_5)(x)
    encodings = Add()([x, encodings])
    encodings = Add()([x_label, encodings])

    # encodings = x
    masks = tf.equal(input_trans_5, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        4, 32)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim_trans_5, feed_forward_size_trans_5)
    ff_out = ff(attention_out)
    # Add & Norm
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    output_trans_5 = Dense(5,
                           activation='softmax',
                           name='output_trans_5_layer')(encodings)

    #!################################################################################################################

    trans_output = concatenate([output_trans_1, output_trans_2], axis=-1)
    trans_output = concatenate([trans_output, output_trans_3], axis=-1)
    trans_output = concatenate([trans_output, output_trans_4], axis=-1)
    trans_output = concatenate([trans_output, output_trans_5], axis=-1)
    # trans_output = Dense(2, activation='softmax', name='output_trans')(trans_output)

    #!################################################################################################################
    #!mix2
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)

    mix = concatenate([trans_output, dnn_input], axis=-1)  #!#mix

    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(mix)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    #!################################################################################################################

    model = Model(inputs=[
        input_trans_1, input_trans_1_label, input_trans_2, input_trans_2_label,
        input_trans_3, input_trans_3_label, input_trans_4, input_trans_4_label,
        input_trans_5, input_trans_5_label, features
    ],
                  outputs=[output])

    model.compile(optimizer=optimizers.Adam(2.5e-4),
                  loss={'prediction_layer': losses.binary_crossentropy},
                  metrics=['AUC'])
    return model
Пример #4
0
def create_model(linear_feature_columns,
                 dnn_feature_columns,
                 fm_group=[DEFAULT_GROUP_NAME],
                 dnn_hidden_units=(128, 128),
                 l2_reg_linear=0.00001,
                 l2_reg_embedding=0.00001,
                 l2_reg_dnn=0,
                 seed=1024,
                 dnn_dropout=0,
                 dnn_activation='relu',
                 dnn_use_bn=False,
                 task='binary'):

    K.clear_session()
    #!################################################################################################################
    inputs_all = [
        get_input_feature_layer(name='slotid_nettype',
                                feature_shape=dense_feature_size)
    ]
    # slotid_nettype
    layer_slotid_nettype = inputs_all[0]
    layer_slotid_nettype = K.expand_dims(layer_slotid_nettype, 1)
    #!################################################################################################################
    # seq_inputs_dict = get_cross_seq_input_layers(cols=cross_arr_name_list)
    # inputs_all = inputs_all + list(seq_inputs_dict.values())  # 输入层list 做交叉

    # cross_emb_out = []
    # last_col = ''
    # for index, col in enumerate(cross_arr_name_list):
    #     #         print(col, 'get embedding!')
    #     emb_layer = get_emb_layer(
    #         col, trainable=False, emb_matrix=dict_cross_emb_all[col])
    #     x = emb_layer(inputs_all[1+index])
    #     if col.split('_')[-1] == 'i':
    #         cross_user_item_i = x
    #         last_col = col
    #         continue
    #     else:
    #         print(f'crossing net add {last_col} and {col}')
    #         cross_emb_out.append(
    #             cross_net(cross_user_item_i, x, layer_slotid_nettype, hidden_unit=4))
    # cross_emb_out = tf.keras.layers.concatenate(cross_emb_out)
    # cross_emb_out = tf.squeeze(cross_emb_out, [1])
    #!################################################################################################################
    # seq_inputs_dict = get_seq_input_layers(cols=arr_name_list)
    # inputs_all = inputs_all+list(seq_inputs_dict.values())  # 输入层list
    # masks = tf.equal(seq_inputs_dict['task_id'], 0)
    # # 普通序列+label序列
    # layers2concat = []
    # for index, col in enumerate(arr_name_list):
    #     print(col, 'get embedding!')
    #     emb_layer = get_emb_layer(
    #         col, trainable=TRAINABLE_DICT[col], emb_matrix=id_list_dict_emb_all[col][1])
    #     x = emb_layer(seq_inputs_dict[col])
    #     if conv1d_info_dict[col] > -1:
    #         cov_layer = tf.keras.layers.Conv1D(filters=conv1d_info_dict[col],
    #                                            kernel_size=1,
    #                                            activation='relu')
    #         x = cov_layer(x)
    #     layers2concat.append(x)
    # x = tf.keras.layers.concatenate(layers2concat)
    #!################################################################################################################
    #!mix1
    # x = trans_net(x, masks, hidden_unit=256)
    # max_pool = tf.keras.layers.GlobalMaxPooling1D()
    # average_pool = tf.keras.layers.GlobalAveragePooling1D()
    # xmaxpool = max_pool(x)
    # xmeanpool = average_pool(x)

    # trans_output = tf.keras.layers.concatenate([xmaxpool, xmeanpool])

    #!################################################################################################################
    #!mix2
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)

    # mix = concatenate([cross_emb_out, trans_output,
    #    dnn_input], axis=-1)  # !#mix
    mix = dnn_input

    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(mix)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    #!################################################################################################################

    # model = Model(inputs=inputs_all+[features],
    model = Model(inputs=inputs_list, outputs=[output])
    print(model.summary())
    return model
Пример #5
0
def NCF(user_feature_columns,
        item_feature_columns,
        user_gmf_embedding_dim=20,
        item_gmf_embedding_dim=20,
        user_mlp_embedding_dim=20,
        item_mlp_embedding_dim=20,
        dnn_use_bn=False,
        dnn_hidden_units=(64, 32),
        dnn_activation='relu',
        l2_reg_dnn=0,
        l2_reg_embedding=1e-6,
        dnn_dropout=0,
        seed=1024):
    """Instantiates the NCF Model architecture.

    :param user_feature_columns: A dict containing user's features and features'dim.
    :param item_feature_columns: A dict containing item's features and features'dim.
    :param user_gmf_embedding_dim: int.
    :param item_gmf_embedding_dim: int.
    :param user_mlp_embedding_dim: int.
    :param item_mlp_embedding_dim: int.
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param dnn_activation: Activation function to use in deep net
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param seed: integer ,to use as random seed.
    :return: A Keras model instance.

    """

    user_dim = len(user_feature_columns) * user_gmf_embedding_dim
    item_dim = len(item_feature_columns) * item_gmf_embedding_dim
    dim = (user_dim * item_dim) / (math.gcd(user_dim, item_dim))
    user_gmf_embedding_dim = int(dim / len(user_feature_columns))
    item_gmf_embedding_dim = int(dim / len(item_feature_columns))

    # Generalized Matrix Factorization (GMF) Part
    user_gmf_feature_columns = [
        SparseFeat(feat,
                   vocabulary_size=size,
                   embedding_dim=user_gmf_embedding_dim)
        for feat, size in user_feature_columns.items()
    ]
    user_features = build_input_features(user_gmf_feature_columns)
    user_inputs_list = list(user_features.values())
    user_gmf_sparse_embedding_list, user_gmf_dense_value_list = input_from_feature_columns(
        user_features,
        user_gmf_feature_columns,
        l2_reg_embedding,
        seed=seed,
        prefix='gmf_')
    user_gmf_input = combined_dnn_input(user_gmf_sparse_embedding_list, [])
    user_gmf_out = Lambda(lambda x: x,
                          name="user_gmf_embedding")(user_gmf_input)

    item_gmf_feature_columns = [
        SparseFeat(feat,
                   vocabulary_size=size,
                   embedding_dim=item_gmf_embedding_dim)
        for feat, size in item_feature_columns.items()
    ]
    item_features = build_input_features(item_gmf_feature_columns)
    item_inputs_list = list(item_features.values())
    item_gmf_sparse_embedding_list, item_gmf_dense_value_list = input_from_feature_columns(
        item_features,
        item_gmf_feature_columns,
        l2_reg_embedding,
        seed=seed,
        prefix='gmf_')
    item_gmf_input = combined_dnn_input(item_gmf_sparse_embedding_list, [])
    item_gmf_out = Lambda(lambda x: x,
                          name="item_gmf_embedding")(item_gmf_input)

    gmf_out = Multiply()([user_gmf_out, item_gmf_out])

    # Multi-Layer Perceptron (MLP) Part
    user_mlp_feature_columns = [
        SparseFeat(feat,
                   vocabulary_size=size,
                   embedding_dim=user_mlp_embedding_dim)
        for feat, size in user_feature_columns.items()
    ]
    user_mlp_sparse_embedding_list, user_mlp_dense_value_list = input_from_feature_columns(
        user_features,
        user_mlp_feature_columns,
        l2_reg_embedding,
        seed=seed,
        prefix='mlp_')
    user_mlp_input = combined_dnn_input(user_mlp_sparse_embedding_list,
                                        user_mlp_dense_value_list)
    user_mlp_out = Lambda(lambda x: x,
                          name="user_mlp_embedding")(user_mlp_input)

    item_mlp_feature_columns = [
        SparseFeat(feat,
                   vocabulary_size=size,
                   embedding_dim=item_mlp_embedding_dim)
        for feat, size in item_feature_columns.items()
    ]

    item_mlp_sparse_embedding_list, item_mlp_dense_value_list = input_from_feature_columns(
        item_features,
        item_mlp_feature_columns,
        l2_reg_embedding,
        seed=seed,
        prefix='mlp_')
    item_mlp_input = combined_dnn_input(item_mlp_sparse_embedding_list,
                                        item_mlp_dense_value_list)
    item_mlp_out = Lambda(lambda x: x,
                          name="item_mlp_embedding")(item_mlp_input)

    mlp_input = Concatenate(axis=1)([user_mlp_out, item_mlp_out])
    mlp_out = DNN(dnn_hidden_units,
                  dnn_activation,
                  l2_reg_dnn,
                  dnn_dropout,
                  dnn_use_bn,
                  seed=seed,
                  name="mlp_embedding")(mlp_input)

    # Fusion of GMF and MLP
    neumf_input = Concatenate(axis=1)([gmf_out, mlp_out])
    neumf_out = DNN(hidden_units=[1], activation='sigmoid',
                    seed=seed)(neumf_input)
    output = Lambda(lambda x: x, name='neumf_out')(neumf_out)

    # output = PredictionLayer(task, False)(neumf_out)

    model = Model(inputs=user_inputs_list + item_inputs_list, outputs=output)

    return model
def create_model(linear_feature_columns,
                 dnn_feature_columns,
                 fm_group=[DEFAULT_GROUP_NAME],
                 dnn_hidden_units=(128, 128),
                 l2_reg_linear=0.00001,
                 l2_reg_embedding=0.00001,
                 l2_reg_dnn=0,
                 seed=1024,
                 dnn_dropout=0,
                 dnn_activation='relu',
                 dnn_use_bn=False,
                 task='binary'):

    K.clear_session()
    #!################################################################################################################
    inputs_all = [
        #         get_input_feature_layer(name = 'user_0',feature_shape = dense_feature_size),
        #                  get_input_feature_layer(name = 'item_0',feature_shape = dense_feature_size),
        get_input_feature_layer(name='user_1',
                                feature_shape=dense_feature_size),
        get_input_feature_layer(name='item_1',
                                feature_shape=dense_feature_size)
    ]
    # slotid_nettype
    #     layer_user_0 = inputs_all[0]
    #     layer_user_0 = K.expand_dims(layer_user_0, 1)
    #     layer_item_0 = inputs_all[1]
    #     layer_item_0 = K.expand_dims(layer_item_0, 1)
    layer_user_1 = inputs_all[0]
    layer_user_1 = K.expand_dims(layer_user_1, 1)
    layer_item_1 = inputs_all[1]
    layer_item_1 = K.expand_dims(layer_item_1, 1)
    #     cross_emb_out0 = cross_net(layer_user_0,layer_item_0)
    cross_emb_out1 = cross_net(layer_user_1, layer_item_1)
    #     cross_emb_out = tf.keras.layers.concatenate([cross_emb_out0,cross_emb_out1])
    cross_emb_out = tf.squeeze(cross_emb_out1, [1])
    #!################################################################################################################
    seq_inputs_dict = get_seq_input_layers(cols=arr_name_list)
    inputs_all = inputs_all + list(seq_inputs_dict.values())  # 输入层list
    masks = tf.equal(seq_inputs_dict['task_id'], 0)
    # 普通序列+label序列
    layers2concat = []
    for index, col in enumerate(arr_name_list):
        print(col, 'get embedding!')
        emb_layer = get_emb_layer(col,
                                  trainable=TRAINABLE_DICT[col],
                                  emb_matrix=id_list_dict_emb_all[col][1])
        x = emb_layer(seq_inputs_dict[col])
        if conv1d_info_dict[col] > -1:
            cov_layer = tf.keras.layers.Conv1D(filters=conv1d_info_dict[col],
                                               kernel_size=1,
                                               activation='relu')
            x = cov_layer(x)
        layers2concat.append(x)
    x = keras.layers.concatenate(layers2concat)
    #!################################################################################################################
    #!mix1
    x = trans_net(x, masks, hidden_unit=256)
    max_pool = tf.keras.layers.GlobalMaxPooling1D()
    average_pool = tf.keras.layers.GlobalAveragePooling1D()
    xmaxpool = max_pool(x)
    xmeanpool = average_pool(x)

    trans_output = tf.keras.layers.concatenate([xmaxpool, xmeanpool])

    #!################################################################################################################
    #!mix2
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)

    mix = concatenate([cross_emb_out, trans_output, dnn_input],
                      axis=-1)  # !#mix

    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(mix)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    #!################################################################################################################

    model = Model(inputs=inputs_all + [features], outputs=[output])
    print(model.summary())
    return model
Пример #7
0
def xDeepFM_MTL(linear_feature_columns,
                dnn_feature_columns,
                dnn_hidden_units=(256, 256),
                task_net_size=(128, ),
                cin_layer_size=(
                    128,
                    128,
                ),
                cin_split_half=True,
                cin_activation='relu',
                l2_reg_linear=0.00001,
                l2_reg_embedding=0.00001,
                l2_reg_dnn=0,
                l2_reg_cin=0,
                seed=1024,
                dnn_dropout=0,
                dnn_activation='relu',
                dnn_use_bn=False,
                task='binary'):
    """Instantiates the xDeepFM architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param cin_layer_size: list,list of positive integer or empty list, the feature maps  in each hidden layer of Compressed Interaction Network
    :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
    :param cin_activation: activation function used on feature maps
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: L2 regularizer strength applied to deep net
    :param l2_reg_cin: L2 regularizer strength applied to CIN.
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, l2_reg_embedding, seed)

    fm_input = concat_func(sparse_embedding_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half,
                       l2_reg_cin, seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                exFM_out)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_output = DNN(dnn_hidden_units,
                     dnn_activation,
                     l2_reg_dnn,
                     dnn_dropout,
                     dnn_use_bn,
                     seed=seed)(dnn_input)

    finish_out = DNN(task_net_size)(dnn_output)
    finish_logit = tf.keras.layers.Dense(1, use_bias=False,
                                         activation=None)(finish_out)

    like_out = DNN(task_net_size)(dnn_output)
    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    finish_logit = tf.keras.layers.add(
        [linear_logit, finish_logit, exFM_logit])
    like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit])

    output_finish = PredictionLayer('binary', name='finish')(finish_logit)
    output_like = PredictionLayer('binary', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=[output_finish, output_like])
    return model
Пример #8
0
def DeepAutoInt(
    linear_feature_columns,
    dnn_feature_columns,
    att_layer_num=3,
    att_embedding_size=8,
    att_head_num=2,
    att_res=True,
    dnn_hidden_units=(256, 256),
    dnn_activation='relu',
    l2_reg_linear=1e-5,
    l2_reg_embedding=1e-5,
    l2_reg_dnn=0,
    dnn_use_bn=False,
    dnn_dropout=0,
    seed=1024,
    fm_group=[DEFAULT_GROUP_NAME],
    task='binary',
):
    """Instantiates the AutoInt Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param att_layer_num: int.The InteractingLayer number to be used.
    :param att_embedding_size: int.The embedding size in multi-head self-attention network.
    :param att_head_num: int.The head number in multi-head  self-attention network.
    :param att_res: bool.Whether or not use standard residual connections before output.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param dnn_activation: Activation function to use in DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param dnn_use_bn:  bool. Whether use BatchNormalization before activation or not in DNN
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    if len(dnn_hidden_units) <= 0 and att_layer_num <= 0:
        raise ValueError("Either hidden_layer or att_layer_num must > 0")

    features = build_input_features(dnn_feature_columns)
    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        seed,
        support_group=True)
    sparse_embedding_list = list(
        chain.from_iterable(group_embedding_dict.values()))

    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    # sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
    #                                                                      l2_reg_embedding, seed)
    att_input = concat_func(sparse_embedding_list, axis=1)

    for _ in range(att_layer_num):
        att_input = InteractingLayer(att_embedding_size, att_head_num,
                                     att_res)(att_input)
    att_output = tf.keras.layers.Flatten()(att_input)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    if len(dnn_hidden_units
           ) > 0 and att_layer_num > 0:  # Deep & Interacting Layer
        deep_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed=seed)(dnn_input)
        stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                stack_out)
    elif len(dnn_hidden_units) > 0:  # Only Deep
        deep_out = DNN(dnn_hidden_units,
                       dnn_activation,
                       l2_reg_dnn,
                       dnn_dropout,
                       dnn_use_bn,
                       seed=seed)(dnn_input)
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                deep_out)
    elif att_layer_num > 0:  # Only Interacting Layer
        final_logit = tf.keras.layers.Dense(
            1,
            use_bias=False,
            kernel_initializer=tf.keras.initializers.glorot_normal(seed))(
                att_output)
    else:  # Error
        raise NotImplementedError
    # final_logit = tf.keras.layers.Dense(
    #     1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(att_output)
    final_logit = add_func([linear_logit, fm_logit, final_logit])

    output = PredictionLayer(task)(final_logit)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)

    return model