示例#1
0
def LR(linear_feature_columns,
       dnn_feature_columns,
       dnn_hidden_units=(128, 128),
       l2_reg_linear=1e-5,
       l2_reg_embedding=1e-5,
       l2_reg_dnn=0,
       init_std=0.0001,
       seed=1024,
       dnn_dropout=0,
       dnn_activation='relu',
       task='binary'):
    """Instantiates the Wide&Deep Learning architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to wide part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)

    output = PredictionLayer(task)(linear_logit)

    model = Model(inputs=inputs_list, outputs=output)
    return model
示例#2
0
def DeepFM(linear_feature_columns,
           dnn_feature_columns,
           embedding_size=8,
           use_fm=True,
           use_only_dnn=False,
           dnn_hidden_units=(128, 128),
           l2_reg_linear=0.00001,
           l2_reg_embedding=0.00001,
           l2_reg_dnn=0,
           init_std=0.0001,
           seed=1024,
           dnn_dropout=0,
           dnn_activation='relu',
           dnn_use_bn=False,
           task='binary'):
    """Instantiates the DeepFM Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_fm: bool,use FM part or not
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')

    fm_input = concat_fun(sparse_embedding_list, axis=1)
    fm_logit = FM()(fm_input)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                  dnn_use_bn, seed)(dnn_input)
    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_out)

    if use_only_dnn == True:
        final_logit = dnn_logit

    elif len(dnn_hidden_units) == 0 and use_fm == False:  # only linear
        final_logit = linear_logit
    elif len(dnn_hidden_units) == 0 and use_fm == True:  # linear + FM
        final_logit = tf.keras.layers.add([linear_logit, fm_logit])
    elif len(dnn_hidden_units) > 0 and use_fm == False:  # linear + Deep
        final_logit = tf.keras.layers.add([linear_logit, dnn_logit])
    elif len(dnn_hidden_units) > 0 and use_fm == True:  # linear + FM + Deep
        final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit])
    else:
        raise NotImplementedError

    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
示例#3
0
def DeepFM(linear_feature_columns,
           dnn_feature_columns,
           embedding_size=8,
           use_fm=True,
           only_dnn=False,
           dnn_hidden_units=(128, 128),
           l2_reg_linear=0.00001,
           l2_reg_embedding=0.00001,
           l2_reg_dnn=0,
           init_std=0.0001,
           seed=1024,
           dnn_dropout=0,
           dnn_activation='relu',
           dnn_use_bn=False,
           task='binary'):
    """Instantiates the DeepFM Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_fm: bool,use FM part or not
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    ## 为每个特征创建Input[1,]; feature == > {'feature1': Input[1,], ...}
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    ## [Input1, Input2, ... ]
    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)
    ## [feature_1对应的embedding层,下连接对应feature1的Input[1,]层,...], [feature_1对应的Input[1,]层,...]

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')

    # linear_logit_finish = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std,
    #                                 seed=seed, prefix='linear_finish')

    # linear_logit_like = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std,
    #                                 seed=seed, prefix='linear_like')
    ## 线性变换层,没有激活函数

    fm_input = concat_fun(sparse_embedding_list, axis=1)
    ## 稀疏embedding层concate在一起

    fm_logit = FM()(fm_input)
    # fm_logit_finish = FM()(fm_input)
    # fm_logit_like = FM()(fm_input)

    ## FM的二次项部分输出,不包含一次项和bias

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    # dnn_out = Dense(128, dnn_activation, l2_reg_dnn, dnn_dropout,
    #               dnn_use_bn, seed)(dnn_input)

    dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                  dnn_use_bn, seed)(dnn_input)
    mmoe_out = MMoE(units=16, num_experts=8, num_tasks=2)(dnn_out)

    [finish_in, like_in] = mmoe_out

    finish_out_1 = Dense(128,
                         dnn_activation,
                         kernel_regularizer=l2(l2_reg_dnn))(finish_in)
    finish_out = Dense(128, dnn_activation,
                       kernel_regularizer=l2(l2_reg_dnn))(finish_out_1)
    finish_logit = tf.keras.layers.Dense(1, use_bias=False,
                                         activation=None)(finish_out)

    like_out_1 = Dense(128, dnn_activation,
                       kernel_regularizer=l2(l2_reg_dnn))(like_in)
    like_out = Dense(128, dnn_activation,
                     kernel_regularizer=l2(l2_reg_dnn))(like_out_1)

    # finish_logit_stop_grad = Lambda(lambda x: stop_gradient(x))(finish_out)
    # like_out_finish = concat_fun([like_out, finish_logit_stop_grad])

    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_out)
    # if len(dnn_hidden_units) > 0 and only_dnn == True:
    #     final_logit = dnn_logit
    # elif len(dnn_hidden_units) == 0 and use_fm == False:  # only linear
    #     final_logit = linear_logit
    # elif len(dnn_hidden_units) == 0 and use_fm == True:  # linear + FM
    #     final_logit = tf.keras.layers.add([linear_logit, fm_logit])
    # elif len(dnn_hidden_units) > 0 and use_fm == False:  # linear + Deep
    #     final_logit = tf.keras.layers.add([linear_logit, dnn_logit])
    # elif len(dnn_hidden_units) > 0 and use_fm == True:  # linear + FM + Deep
    #     final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit])
    # else:
    #     raise NotImplementedError

    finish_logit = tf.keras.layers.add([linear_logit, fm_logit, finish_logit])
    like_logit = tf.keras.layers.add([linear_logit, fm_logit, like_logit])

    output_finish = PredictionLayer('binary', name='finish')(finish_logit)
    output_like = PredictionLayer('binary', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=[output_finish, output_like])
    return model
示例#4
0
def MT_xDeepFM(linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(256, 256),
            cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001,
            l2_reg_embedding=0.00001, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0,
            dnn_activation='relu', dnn_use_bn=False, task='binary'):
    """Instantiates the xDeepFM architecture.

    :param flag_columns:
    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param embedding_size: positive integer,sparse feature embedding_size
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
    :param cin_layer_size: list,list of positive integer or empty list, the feature maps  in each hidden layer of Compressed Interaction Network
    :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
    :param cin_activation: activation function used on feature maps
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: L2 regularizer strength applied to deep net
    :param l2_reg_cin: L2 regularizer strength applied to CIN.
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """


    features = build_input_features(linear_feature_columns + dnn_feature_columns)
    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(features,dnn_feature_columns,
                                                                              embedding_size,
                                                                              l2_reg_embedding,init_std,
                                                                              seed)

    linear_logit = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std,
                                    seed=seed, prefix='linear')

    fm_input = concat_fun(sparse_embedding_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, cin_activation,
                       cin_split_half, l2_reg_cin, seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(4, activation=None, )(exFM_out)

    dnn_input = combined_dnn_input(sparse_embedding_list,dense_value_list)

    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   dnn_use_bn, seed)(dnn_input)
    deep_logit = tf.keras.layers.Dense(
        4, use_bias=False, activation=None)(deep_out)

    if len(dnn_hidden_units) == 0 and len(cin_layer_size) == 0:  # only linear
        final_logit = linear_logit
    elif len(dnn_hidden_units) == 0 and len(cin_layer_size) > 0:  # linear + CIN
        final_logit = tf.keras.layers.add([linear_logit, exFM_logit])
    elif len(dnn_hidden_units) > 0 and len(cin_layer_size) == 0:  # linear + Deep
        final_logit = tf.keras.layers.add([linear_logit, deep_logit])
    elif len(dnn_hidden_units) > 0 and len(cin_layer_size) > 0:  # linear + CIN + Deep
        final_logit = tf.keras.layers.add(
            [linear_logit, deep_logit, exFM_logit])
    else:
        raise NotImplementedError


    output_units = PredictionLayer(task)(final_logit)
    # output = None
    # for i in range(len(flag_columns)):
    #     print(i)
    #     selected_index = [0, 1] if flag_columns[i] else [2, 3]
    #     if output != None:
    #         output = tf.concat([output, tf.reshape(tf.gather(output_units[i, :], selected_index), (1, -1))], axis=0)
    #     else:
    #         output = tf.reshape(tf.gather(output_units[i, :], selected_index), (1, -1))
    finish = tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,0]+\
             tf.cast(features['u_region_id'], dtype=tf.float32)*output_units[:,1]
    like = tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,2]+\
           tf.cast(1-features['u_region_id'], dtype=tf.float32)*output_units[:,3]
    # mask = tf.cond(pred=tf.equal(features['u_region_id'], tf.constant(value = 1, dtype = tf.int32)),
    #                true_fn=lambda: [True, True, False, False], false_fn=lambda: [False, False, True, True])
    # output = tf.reshape(tf.boolean_mask(output_units, mask), shape=[-1, 2])
    # finish = output[:, 0]
    # like = output[:, 1]
    # print(output)

    model = tf.keras.models.Model(inputs=inputs_list, outputs=[finish, like])
    return model
示例#5
0
文件: model_more.py 项目: yysys/test
def xDeepFM_MTL(
    linear_feature_columns,
    dnn_feature_columns,
    gate_feature_columns,
    embedding_size=8,
    dnn_hidden_units=(256, 256),
    cin_layer_size=(
        256,
        256,
    ),
    cin_split_half=True,
    init_std=0.0001,
    l2_reg_dnn=0,
    dnn_dropout=0,
    dnn_activation='relu',
    dnn_use_bn=False,
    task_net_size=(128, ),
    l2_reg_linear=0.00001,
    l2_reg_embedding=0.00001,
    seed=1024,
):
    # check_feature_config_dict(feature_dim_dict)
    if len(task_net_size) < 1:
        raise ValueError('task_net_size must be at least one layer')

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns + gate_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)
    gate = get_dense_input(features, gate_feature_columns)[0]

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')

    fm_input = concat_fun(sparse_embedding_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, 0,
                       seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1,
            activation=None,
        )(exFM_out)

    # dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
    dnn_input = tf.keras.layers.Flatten()(fm_input)
    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   dnn_use_bn, seed)(dnn_input)

    finish_out1 = DNN(task_net_size)(deep_out)
    finish_logit1 = tf.keras.layers.Dense(1, use_bias=False,
                                          activation=None)(finish_out1)

    like_out1 = DNN(task_net_size)(deep_out)
    like_logit1 = tf.keras.layers.Dense(1, use_bias=False,
                                        activation=None)(like_out1)

    finish_out2 = DNN(task_net_size)(deep_out)
    finish_logit2 = tf.keras.layers.Dense(1, use_bias=False,
                                          activation=None)(finish_out2)

    like_out2 = DNN(task_net_size)(deep_out)
    like_logit2 = tf.keras.layers.Dense(1, use_bias=False,
                                        activation=None)(like_out2)

    # condition = tf.placeholder("float32", shape=[None, 1], name="condition")

    finish_logit = gate * finish_logit1 + (1.0 - gate) * finish_logit2
    like_logit = gate * like_logit1 + (1.0 - gate) * like_logit2

    print(np.shape(like_logit))

    finish_logit = tf.keras.layers.add(
        [linear_logit, finish_logit, exFM_logit])
    like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit])

    output_finish = PredictionLayer('binary', name='finish')(finish_logit)
    output_like = PredictionLayer('binary', name='like')(like_logit)

    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=[output_finish, output_like])
    return model
def DeepFM(linear_feature_columns,
           dnn_feature_columns,
           fm_group=[DEFAULT_GROUP_NAME],
           dnn_hidden_units=(128, 128),
           l2_reg_linear=0.00001,
           l2_reg_embedding=0.00001,
           l2_reg_dnn=0,
           init_std=0.0001,
           seed=1024,
           dnn_dropout=0,
           dnn_activation='relu',
           dnn_use_bn=False,
           task='binary',
           use_image=False,
           use_text=False,
           embedding_size=128):
    """Instantiates the DeepFM Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param fm_group: list, group_name of features that will be used to do feature interactions.
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """
    train_path = '../data/underexpose_train'
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    group_embedding_dict, dense_value_list = input_from_feature_columns(
        features,
        dnn_feature_columns,
        l2_reg_embedding,
        init_std,
        seed,
        support_group=True)

    if use_image:
        video_input = tf.keras.layers.Input(shape=(128, ), name='image')
        video_emb = tf.keras.layers.Dense(
            embedding_size,
            use_bias=False,
            kernel_regularizer=l2(l2_reg_embedding))(video_input)
        video_emb = tf.keras.layers.Reshape(
            (1, embedding_size), input_shape=(embedding_size, ))(video_emb)
        group_embedding_dict[DEFAULT_GROUP_NAME].append(video_emb)
        inputs_list.append(video_input)

    if use_text:
        audio_input = tf.keras.layers.Input(shape=(128, ), name='text')
        audio_emb = tf.keras.layers.Dense(
            embedding_size,
            use_bias=False,
            kernel_regularizer=l2(l2_reg_embedding))(audio_input)
        audio_emb = tf.keras.layers.Reshape(
            (1, embedding_size), input_shape=(embedding_size, ))(audio_emb)
        group_embedding_dict[DEFAULT_GROUP_NAME].append(audio_emb)
        inputs_list.append(audio_input)

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear',
                                    l2_reg=l2_reg_linear)
    fm_logit = add_func([
        FM()(concat_func(v, axis=1)) for k, v in group_embedding_dict.items()
        if k in fm_group
    ])

    dnn_input = combined_dnn_input(
        list(chain.from_iterable(group_embedding_dict.values())),
        dense_value_list)
    dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                     dnn_use_bn, seed)(dnn_input)
    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_output)

    final_logit = add_func([linear_logit, fm_logit, dnn_logit])

    output = PredictionLayer(task)(final_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
    return model
示例#7
0
def xDeepFM_MTL(
    linear_feature_columns,
    dnn_feature_columns,
    embedding_size=8,
    dnn_hidden_units=(256, 256),
    cin_layer_size=(
        256,
        256,
    ),
    cin_split_half=True,
    init_std=0.0001,
    l2_reg_dnn=0,
    dnn_dropout=0,
    dnn_activation='relu',
    dnn_use_bn=False,
    task_net_size=(128, ),
    l2_reg_linear=0.00001,
    l2_reg_embedding=0.00001,
    seed=1024,
):
    # check_feature_config_dict(feature_dim_dict)
    if len(task_net_size) < 1:
        raise ValueError('task_net_size must be at least one layer')

    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')

    fm_input = concat_fun(sparse_embedding_list, axis=1)

    if len(cin_layer_size) > 0:
        exFM_out = CIN(cin_layer_size, 'relu', cin_split_half, 0,
                       seed)(fm_input)
        exFM_logit = tf.keras.layers.Dense(
            1,
            activation=None,
        )(exFM_out)

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                   dnn_use_bn, seed)(dnn_input)

    like_out = DNN(task_net_size)(deep_out)
    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    like_logit = tf.keras.layers.add([linear_logit, like_logit, exFM_logit])

    output_like = PredictionLayer('binary', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list, outputs=output_like)
    return model