Exemple #1
0
def CCPM(feature_dim_dict, embedding_size=8, conv_kernel_width=(6, 5), conv_filters=(4, 4), dnn_hidden_units=(256,),
         l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_dnn=0, dnn_dropout=0, init_std=0.0001, seed=1024,
         task='binary', ):
    """Instantiates the Convolutional Click Prediction Model architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param conv_kernel_width: list,list of positive integer or empty list,the width of filter in each conv layer.
    :param conv_filters: list,list of positive integer or empty list,the number of filters in each conv layer.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN.
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    check_feature_config_dict(feature_dim_dict)
    if len(conv_kernel_width) != len(conv_filters):
        raise ValueError(
            "conv_kernel_width must have same element with conv_filters")

    deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(feature_dim_dict,
                                                                                               embedding_size,
                                                                                               l2_reg_embedding,
                                                                                               l2_reg_linear, init_std,
                                                                                               seed,
                                                                                               create_linear_weight=True)

    linear_logit = get_linear_logit(
        linear_emb_list, dense_input_dict, l2_reg_linear)
    n = len(deep_emb_list)
    l = len(conv_filters)

    conv_input = concat_fun(deep_emb_list, axis=1)
    pooling_result = tf.keras.layers.Lambda(
        lambda x: tf.expand_dims(x, axis=3))(conv_input)

    for i in range(1, l + 1):
        filters = conv_filters[i - 1]
        width = conv_kernel_width[i - 1]
        k = max(1, int((1 - pow(i / l, l - i)) * n)) if i < l else 3

        conv_result = tf.keras.layers.Conv2D(filters=filters, kernel_size=(width, 1), strides=(1, 1), padding='same',
                                             activation='tanh', use_bias=True, )(pooling_result)
        pooling_result = KMaxPooling(
            k=min(k, conv_result.shape[1].value), axis=1)(conv_result)

    flatten_result = tf.keras.layers.Flatten()(pooling_result)
    final_logit = DNN(dnn_hidden_units, l2_reg=l2_reg_dnn,
                      dropout_rate=dnn_dropout)(flatten_result)
    final_logit = tf.keras.layers.Dense(1, use_bias=False)(final_logit)

    final_logit = tf.keras.layers.add([final_logit, linear_logit])
    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Exemple #2
0
def AFM(
    feature_dim_dict,
    embedding_size=8,
    use_attention=True,
    attention_factor=8,
    l2_reg_linear=1e-5,
    l2_reg_embedding=1e-5,
    l2_reg_att=1e-5,
    afm_dropout=0,
    init_std=0.0001,
    seed=1024,
    task='binary',
):
    """Instantiates the Attentonal Factorization Machine architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_attention: bool,whether use attention or not,if set to ``False``.it is the same as **standard Factorization Machine**
    :param attention_factor: positive integer,units in attention net
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_att: float. L2 regularizer strength applied to attention net
    :param afm_dropout: float in [0,1), Fraction of the attention net output units to dropout.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    check_feature_config_dict(feature_dim_dict)

    deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(
        feature_dim_dict,
        embedding_size,
        l2_reg_embedding,
        l2_reg_linear,
        init_std,
        seed,
        create_linear_weight=True)

    linear_logit = get_linear_logit(linear_emb_list, dense_input_dict,
                                    l2_reg_linear)

    fm_input = concat_fun(deep_emb_list, axis=1)
    if use_attention:
        fm_logit = AFMLayer(attention_factor, l2_reg_att, afm_dropout,
                            seed)(deep_emb_list, )
    else:
        fm_logit = FM()(fm_input)

    final_logit = tf.keras.layers.add([linear_logit, fm_logit])
    output = PredictionLayer(task)(final_logit)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Exemple #3
0
def FNN(
    feature_dim_dict,
    embedding_size=8,
    dnn_hidden_units=(128, 128),
    l2_reg_embedding=1e-5,
    l2_reg_linear=1e-5,
    l2_reg_dnn=0,
    init_std=0.0001,
    seed=1024,
    dnn_dropout=0,
    dnn_activation='relu',
    task='binary',
):
    """Instantiates the Factorization-supported Neural Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_linear: float. L2 regularizer strength applied to linear weight
    :param l2_reg_dnn: float . L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """
    check_feature_config_dict(feature_dim_dict)

    deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(
        feature_dim_dict,
        embedding_size,
        l2_reg_embedding,
        l2_reg_linear,
        init_std,
        seed,
        create_linear_weight=True)

    linear_logit = get_linear_logit(linear_emb_list, dense_input_dict,
                                    l2_reg_linear)

    deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list))
    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   False, seed)(deep_input)
    deep_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(deep_out)
    final_logit = tf.keras.layers.add([deep_logit, linear_logit])
    output = PredictionLayer(task)(final_logit)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Exemple #4
0
def preprocess_input_embedding(
    feature_dim_dict,
    embedding_size,
    l2_reg_embedding,
    l2_reg_linear,
    init_std,
    seed,
    return_linear_logit=True,
):
    sparse_input_dict, dense_input_dict = create_singlefeat_inputdict(
        feature_dim_dict)
    sequence_input_dict, sequence_input_len_dict, sequence_max_len_dict = create_varlenfeat_inputdict(
        feature_dim_dict)
    inputs_list, deep_emb_list, linear_emb_list = get_inputs_embedding(
        feature_dim_dict,
        embedding_size,
        l2_reg_embedding,
        l2_reg_linear,
        init_std,
        seed,
        sparse_input_dict,
        dense_input_dict,
        sequence_input_dict,
        sequence_input_len_dict,
        sequence_max_len_dict,
        return_linear_logit,
        prefix='')
    _, fg_deep_emb_list, _ = get_inputs_embedding(feature_dim_dict,
                                                  embedding_size,
                                                  l2_reg_embedding,
                                                  l2_reg_linear,
                                                  init_std,
                                                  seed,
                                                  sparse_input_dict,
                                                  dense_input_dict,
                                                  sequence_input_dict,
                                                  sequence_input_len_dict,
                                                  sequence_max_len_dict,
                                                  False,
                                                  prefix='fg')
    if return_linear_logit:
        linear_logit = get_linear_logit(linear_emb_list, dense_input_dict,
                                        l2_reg_linear)
    else:
        linear_logit = None
    return deep_emb_list, fg_deep_emb_list, linear_logit, inputs_list
Exemple #5
0
def DeepFM(feature_dim_dict,
           embedding_size=8,
           use_fm=True,
           dnn_hidden_units=(128, 128),
           l2_reg_linear=0.00001,
           l2_reg_embedding=0.00001,
           l2_reg_dnn=0,
           init_std=0.0001,
           seed=1024,
           dnn_dropout=0,
           dnn_activation='relu',
           dnn_use_bn=False,
           task='binary'):
    """Instantiates the DeepFM Network architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_fm: bool,use FM part or not
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """
    check_feature_config_dict(feature_dim_dict)

    deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(
        feature_dim_dict,
        embedding_size,
        l2_reg_embedding,
        l2_reg_linear,
        init_std,
        seed,
        create_linear_weight=True)

    linear_logit = get_linear_logit(linear_emb_list, dense_input_dict,
                                    l2_reg_linear)  # 各种输入线性转化并相加的结果

    fm_input = concat_fun(deep_emb_list,
                          axis=1)  # 各种特征进行embedding之后再拼接 (?,X,embedding_size)
    deep_input = tf.keras.layers.Flatten()(fm_input)  # (?,X*embedding_size)
    fm_out = FM()(fm_input)
    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   dnn_use_bn, seed)(deep_input)
    deep_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(deep_out)

    if len(dnn_hidden_units) == 0 and use_fm == False:  # only linear
        final_logit = linear_logit
    elif len(dnn_hidden_units) == 0 and use_fm == True:  # linear + FM
        final_logit = tf.keras.layers.add([linear_logit, fm_out])
    elif len(dnn_hidden_units) > 0 and use_fm == False:  # linear + Deep
        final_logit = tf.keras.layers.add([linear_logit, deep_logit])
    elif len(dnn_hidden_units) > 0 and use_fm == True:  # linear + FM + Deep
        final_logit = tf.keras.layers.add([linear_logit, fm_out, deep_logit])
    else:
        raise NotImplementedError

    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Exemple #6
0
def xDeepFM(
    feature_dim_dict,
    embedding_size=8,
    dnn_hidden_units=(256, 256),
    cin_layer_size=(
        128,
        128,
    ),
    cin_split_half=True,
    cin_activation='relu',
    l2_reg_linear=0.00001,
    l2_reg_embedding=0.00001,
    l2_reg_dnn=0,
    l2_reg_cin=0,
    init_std=0.0001,
    seed=1024,
    dnn_dropout=0,
    dnn_activation='relu',
    dnn_use_bn=False,
    task='binary',
):
    """Instantiates the xDeepFM architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param cin_layer_size: list,list of positive integer or empty list, the feature maps  in each hidden layer of Compressed Interaction Network
    :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
    :param cin_activation: activation function used on feature maps
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: L2 regularizer strength applied to deep net
    :param l2_reg_cin: L2 regularizer strength applied to CIN.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """
    check_feature_config_dict(feature_dim_dict)

    deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(
        feature_dim_dict,
        embedding_size,
        l2_reg_embedding,
        l2_reg_linear,
        init_std,
        seed,
        create_linear_weight=True)

    linear_logit = get_linear_logit(linear_emb_list, dense_input_dict,
                                    l2_reg_linear)

    fm_input = concat_fun(deep_emb_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, cin_activation, cin_split_half,
                       l2_reg_cin, seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1,
            activation=None,
        )(exFM_out)

    deep_input = tf.keras.layers.Flatten()(fm_input)
    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   dnn_use_bn, seed)(deep_input)
    deep_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(deep_out)

    if len(dnn_hidden_units) == 0 and len(cin_layer_size) == 0:  # only linear
        final_logit = linear_logit
    elif len(
            dnn_hidden_units) == 0 and len(cin_layer_size) > 0:  # linear + CIN
        final_logit = tf.keras.layers.add([linear_logit, exFM_logit])
    elif len(dnn_hidden_units) > 0 and len(
            cin_layer_size) == 0:  # linear + Deep
        final_logit = tf.keras.layers.add([linear_logit, deep_logit])
    elif len(dnn_hidden_units) > 0 and len(
            cin_layer_size) > 0:  # linear + CIN + Deep
        final_logit = tf.keras.layers.add(
            [linear_logit, deep_logit, exFM_logit])
    else:
        raise NotImplementedError

    output = PredictionLayer(task)(final_logit)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
Exemple #7
0
def NFFM(
    feature_dim_dict,
    embedding_size=4,
    dnn_hidden_units=(128, 128),
    l2_reg_embedding=1e-5,
    l2_reg_linear=1e-5,
    l2_reg_dnn=0,
    dnn_dropout=0,
    init_std=0.0001,
    seed=1024,
    include_linear=True,
    use_bn=True,
    reduce_sum=False,
    task='binary',
):
    """Instantiates the Field-aware Neural Factorization Machine architecture.

    :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
    :param embedding_size: positive integer,sparse feature embedding_size
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part.
    :param l2_reg_dnn: float . L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param include_linear: bool,whether include linear term or not
    :param use_bn: bool,whether use bn after ffm out or not
    :param reduce_sum: bool,whether apply reduce_sum on cross vector
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    check_feature_config_dict(feature_dim_dict)
    if 'sequence' in feature_dim_dict and len(
            feature_dim_dict['sequence']) > 0:
        raise ValueError("now sequence input is not supported in NFFM"
                         )  #TODO:support sequence input

    sparse_input_dict, dense_input_dict = create_singlefeat_inputdict(
        feature_dim_dict)

    sparse_embedding, dense_embedding, linear_embedding = create_embedding_dict(
        feature_dim_dict,
        embedding_size,
        init_std,
        seed,
        l2_reg_embedding,
        l2_reg_linear,
    )

    embed_list = []
    for i, j in itertools.combinations(feature_dim_dict['sparse'], 2):
        i_input = sparse_input_dict[i.name]
        if i.hash_flag:
            i_input = Hash(i.dimension)(i_input)
        j_input = sparse_input_dict[j.name]
        if j.hash_flag:
            j_input = Hash(j.dimension)(j_input)

        element_wise_prod = multiply([
            sparse_embedding[i.name][j.name](i_input),
            sparse_embedding[j.name][i.name](j_input)
        ])
        if reduce_sum:
            element_wise_prod = Lambda(lambda element_wise_prod: K.sum(
                element_wise_prod, axis=-1))(element_wise_prod)
        embed_list.append(element_wise_prod)
    for i, j in itertools.combinations(feature_dim_dict['dense'], 2):
        element_wise_prod = multiply([
            dense_embedding[i.name][j.name](dense_input_dict[i.name]),
            dense_embedding[j.name][i.name](dense_input_dict[j.name])
        ])
        if reduce_sum:
            element_wise_prod = Lambda(lambda element_wise_prod: K.sum(
                element_wise_prod, axis=-1))(element_wise_prod)
        embed_list.append(
            Lambda(lambda x: K.expand_dims(x, axis=1))(element_wise_prod))

    for i in feature_dim_dict['sparse']:
        i_input = sparse_input_dict[i.name]
        if i.hash_flag:
            i_input = Hash(i.dimension)(i_input)
        for j in feature_dim_dict['dense']:
            element_wise_prod = multiply([
                sparse_embedding[i.name][j.name](i_input),
                dense_embedding[j.name][i.name](dense_input_dict[j.name])
            ])

            if reduce_sum:
                element_wise_prod = Lambda(lambda element_wise_prod: K.sum(
                    element_wise_prod, axis=-1))(element_wise_prod)
            embed_list.append(element_wise_prod)

    ffm_out = tf.keras.layers.Flatten()(concat_fun(embed_list, axis=1))
    if use_bn:
        ffm_out = tf.keras.layers.BatchNormalization()(ffm_out)
    ffm_out = DNN(dnn_hidden_units,
                  l2_reg=l2_reg_dnn,
                  dropout_rate=dnn_dropout)(ffm_out)
    final_logit = Dense(1, use_bias=False)(ffm_out)

    linear_emb_list = get_embedding_vec_list(linear_embedding,
                                             sparse_input_dict,
                                             feature_dim_dict['sparse'])

    linear_logit = get_linear_logit(linear_emb_list, dense_input_dict,
                                    l2_reg_linear)

    if include_linear:
        final_logit = add([final_logit, linear_logit])

    output = PredictionLayer(task)(final_logit)

    inputs_list = get_inputs_list([sparse_input_dict, dense_input_dict])
    model = Model(inputs=inputs_list, outputs=output)
    return model