Exemple #1
0
def main():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label, output_info = data_preparation()
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    # Set up the input layer
    input_layer = Input(shape=(num_features,))

    # Set up MMoE layer
    mmoe_layers = MMoE(
        units=4,
        num_experts=8,
        num_tasks=2
    )(input_layer)

    output_layers = []

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(
            units=8,
            activation='relu',
            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(
            units=output_info[index][0],
            name=output_info[index][1],
            activation='softmax',
            kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    adam_optimizer = Adam()
    model.compile(
        loss={'income': 'binary_crossentropy', 'marital': 'binary_crossentropy'},
        optimizer=adam_optimizer,
        metrics=['accuracy']
    )

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(
        x=train_data,
        y=train_label,
        validation_data=(validation_data, validation_label),
        callbacks=[
            ROCCallback(
                training_data=(train_data, train_label),
                validation_data=(validation_data, validation_label),
                test_data=(test_data, test_label)
            )
        ],
        epochs=100
    )
Exemple #2
0
def main():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation()
    print(np.shape(train_label))
    num_features = train_data.shape[1]


    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    # Set up the input layer
    input_layer = Input(shape=(num_features,))

    # Set up MMoE layer
    mmoe_layers = MMoE(
        units=16,
        num_experts=8,
        num_tasks=2
    )(input_layer)

    output_layers = []

    output_info = ['y0', 'y1']

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(
            units=8,
            activation='relu',
            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(
            units=1,
            name=output_info[index],
            activation='linear',
            kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    learning_rates = [1e-4, 1e-3, 1e-2]
    adam_optimizer = Adam(lr=learning_rates[0])
    model.compile(
        loss={'y0': 'mean_squared_error', 'y1': 'mean_squared_error'},
        optimizer=adam_optimizer,
        metrics=[metrics.mae]
    )

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(
        x=train_data,
        y=train_label,
        validation_data=(validation_data, validation_label),
        epochs=100
    )
def create_model(num_features):
    from keras.layers import Input, Dense
    from keras.models import Model
    from keras.models import Sequential
    from mmoe import MMoE
    from keras.initializers import VarianceScaling

    # Set up the input layer
    input_layer = Input(shape=(num_features,))

    # Set up MMoE layer
    mmoe_layers = MMoE(
        units=16,
        num_experts=8,
        num_tasks=1
    )(input_layer)

    output_layers = []

    output_info = ['y0']

    # Build tower layer from MMoE layer
    
    tower_layer = Dense(
        units=8,
        activation='relu',
        kernel_initializer=VarianceScaling())(mmoe_layers)
    output_layer = Dense(
        units=1,
        name=output_info[0],
        activation='linear',
        kernel_initializer=VarianceScaling())(tower_layer)
    output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)

    return model
Exemple #4
0
def DeepFM(linear_feature_columns,
           dnn_feature_columns,
           embedding_size=8,
           use_fm=True,
           only_dnn=False,
           dnn_hidden_units=(128, 128),
           l2_reg_linear=0.00001,
           l2_reg_embedding=0.00001,
           l2_reg_dnn=0,
           init_std=0.0001,
           seed=1024,
           dnn_dropout=0,
           dnn_activation='relu',
           dnn_use_bn=False,
           task='binary'):
    """Instantiates the DeepFM Network architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param embedding_size: positive integer,sparse feature embedding_size
    :param use_fm: bool,use FM part or not
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to linear part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param init_std: float,to use as the initialize std of embedding vector
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    ## 为每个特征创建Input[1,]; feature == > {'feature1': Input[1,], ...}
    features = build_input_features(linear_feature_columns +
                                    dnn_feature_columns)

    ## [Input1, Input2, ... ]
    inputs_list = list(features.values())

    sparse_embedding_list, dense_value_list = input_from_feature_columns(
        features, dnn_feature_columns, embedding_size, l2_reg_embedding,
        init_std, seed)
    ## [feature_1对应的embedding层,下连接对应feature1的Input[1,]层,...], [feature_1对应的Input[1,]层,...]

    linear_logit = get_linear_logit(features,
                                    linear_feature_columns,
                                    l2_reg=l2_reg_linear,
                                    init_std=init_std,
                                    seed=seed,
                                    prefix='linear')

    # linear_logit_finish = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std,
    #                                 seed=seed, prefix='linear_finish')

    # linear_logit_like = get_linear_logit(features, linear_feature_columns, l2_reg=l2_reg_linear, init_std=init_std,
    #                                 seed=seed, prefix='linear_like')
    ## 线性变换层,没有激活函数

    fm_input = concat_fun(sparse_embedding_list, axis=1)
    ## 稀疏embedding层concate在一起

    fm_logit = FM()(fm_input)
    # fm_logit_finish = FM()(fm_input)
    # fm_logit_like = FM()(fm_input)

    ## FM的二次项部分输出,不包含一次项和bias

    dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)

    # dnn_out = Dense(128, dnn_activation, l2_reg_dnn, dnn_dropout,
    #               dnn_use_bn, seed)(dnn_input)

    dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                  dnn_use_bn, seed)(dnn_input)
    mmoe_out = MMoE(units=16, num_experts=8, num_tasks=2)(dnn_out)

    [finish_in, like_in] = mmoe_out

    finish_out_1 = Dense(128,
                         dnn_activation,
                         kernel_regularizer=l2(l2_reg_dnn))(finish_in)
    finish_out = Dense(128, dnn_activation,
                       kernel_regularizer=l2(l2_reg_dnn))(finish_out_1)
    finish_logit = tf.keras.layers.Dense(1, use_bias=False,
                                         activation=None)(finish_out)

    like_out_1 = Dense(128, dnn_activation,
                       kernel_regularizer=l2(l2_reg_dnn))(like_in)
    like_out = Dense(128, dnn_activation,
                     kernel_regularizer=l2(l2_reg_dnn))(like_out_1)

    # finish_logit_stop_grad = Lambda(lambda x: stop_gradient(x))(finish_out)
    # like_out_finish = concat_fun([like_out, finish_logit_stop_grad])

    like_logit = tf.keras.layers.Dense(1, use_bias=False,
                                       activation=None)(like_out)

    dnn_logit = tf.keras.layers.Dense(1, use_bias=False,
                                      activation=None)(dnn_out)
    # if len(dnn_hidden_units) > 0 and only_dnn == True:
    #     final_logit = dnn_logit
    # elif len(dnn_hidden_units) == 0 and use_fm == False:  # only linear
    #     final_logit = linear_logit
    # elif len(dnn_hidden_units) == 0 and use_fm == True:  # linear + FM
    #     final_logit = tf.keras.layers.add([linear_logit, fm_logit])
    # elif len(dnn_hidden_units) > 0 and use_fm == False:  # linear + Deep
    #     final_logit = tf.keras.layers.add([linear_logit, dnn_logit])
    # elif len(dnn_hidden_units) > 0 and use_fm == True:  # linear + FM + Deep
    #     final_logit = tf.keras.layers.add([linear_logit, fm_logit, dnn_logit])
    # else:
    #     raise NotImplementedError

    finish_logit = tf.keras.layers.add([linear_logit, fm_logit, finish_logit])
    like_logit = tf.keras.layers.add([linear_logit, fm_logit, like_logit])

    output_finish = PredictionLayer('binary', name='finish')(finish_logit)
    output_like = PredictionLayer('binary', name='like')(like_logit)
    model = tf.keras.models.Model(inputs=inputs_list,
                                  outputs=[output_finish, output_like])
    return model
Exemple #5
0
    dnn_feature_columns,
    embedding_size=8,
    l2_reg=0.00001,
    init_std=0.0001,
    seed=1024)

dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
# print('test_model_input info')
# print(len(test_model_input))
# print(type(test_model_input[0]))
# print(len(test_model_input[0]))
# print('num_features:',len(test_model_input[0]) )

# MMoE
mmoe_input = tf.keras.layers.Dense(units=128, activation='relu')(dnn_input)
mmoe_layers = MMoE(units=16, num_experts=8, num_tasks=2)(mmoe_input)

print('passed')
output_layers = []

# Build tower layer from MMoE layer
output_info = ['finish', 'like']
for index, task_layer in enumerate(mmoe_layers):
    tower_layer = tf.keras.layers.Dense(units=128,
                                        activation='relu')(task_layer)
    output_layer = tf.keras.layers.Dense(units=1,
                                         name=output_info[index],
                                         activation='sigmoid')(tower_layer)

    output_layers.append(output_layer)
Exemple #6
0
def train_ranking_model(df):

    train, val_test = train_test_split(df, test_size=0.3)
    val, test = train_test_split(val_test, test_size=0.5)

    train_label = [
        train[col_name].values for col_name in
        ['user_click', 'user_rating', 'user_like', 'time_spend']
    ]
    # Tensorflow - ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type float)
    train_data = [
        np.asarray(np.squeeze(train[['video_emb'
                                     ]].values.tolist())).astype(np.float32),
        np.asarray(np.squeeze(train[['user_emb'
                                     ]].values.tolist())).astype(np.float32),
        train[['view_count']].values
    ]  # todo: user demographics, device, time, and location
    # np.asarray(train[['view_count']].values.tolist()).astype(np.float32)]

    validation_label = [
        val[col_name].values for col_name in
        ['user_click', 'user_rating', 'user_like', 'time_spend']
    ]
    validation_data = [
        np.asarray(np.squeeze(val[['video_emb'
                                   ]].values.tolist())).astype(np.float32),
        np.asarray(np.squeeze(val[['user_emb'
                                   ]].values.tolist())).astype(np.float32),
        val[['view_count']].values
    ]
    # np.asarray(val[['view_count']].values.tolist()).astype(np.float32)]

    test_label = [
        test[col_name].values for col_name in
        ['user_click', 'user_rating', 'user_like', 'time_spend']
    ]
    test_data = [
        np.asarray(np.squeeze(test[['video_emb'
                                    ]].values.tolist())).astype(np.float32),
        np.asarray(np.squeeze(test[['user_emb'
                                    ]].values.tolist())).astype(np.float32),
        test[['view_count']].values
    ]
    # np.asarray(test[['view_count']].values.tolist()).astype(np.float32)]

    print("Output is user_click, user_rating, user_like and time_spend...")
    output_info = [(1, 'user_click'), (1, 'user_rating'), (1, 'user_like'),
                   (1, 'time_spend')
                   ]  # the rating is categorical or regression?

    output_activation = [
        'softmax', 'linear', 'softmax', 'linear'
    ]  # None (linear) activation for regression task; softmax for classification

    print('Training data shape = {}'.format(train.shape))
    print('Validation data shape = {}'.format(val.shape))
    print('Test data shape = {}'.format(test.shape))

    # Set up the input layer
    input_video_emb = Input(shape=(768, ))
    input_user_emb = Input(shape=(768, ))
    input_other_features = Input(shape=(1, ))
    input = Concatenate()(
        [input_video_emb, input_user_emb, input_other_features])
    input_layer = ReLU()(input)

    # add the shared ReLu layer
    # Set up MMoE layer
    mmoe_layers = MMoE(units=4, num_experts=8, num_tasks=4)(input_layer)

    output_layers = []

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(units=8,
                            activation='relu',
                            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(units=output_info[index][0],
                             name=output_info[index][1],
                             activation=output_activation[index],
                             kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    # model = Model(inputs=[input_video_tags,input_video_title,input_video_desp,input_video_view], outputs=output_layers)
    model = Model(
        inputs=[input_video_emb, input_user_emb, input_other_features],
        outputs=output_layers)
    adam_optimizer = Adam()
    model.compile(loss={
        'user_click': 'binary_crossentropy',
        'user_rating': 'MSE',
        'user_like': 'binary_crossentropy',
        'time_spend': 'MSE'
    },
                  optimizer=adam_optimizer,
                  metrics=['accuracy'])

    # Print out model architecture summary
    model.summary()

    # Train the model,
    model.fit(x=train_data,
              y=train_label,
              validation_data=(validation_data, validation_label),
              callbacks=[
                  ROCCallback(training_data=(train_data, train_label),
                              validation_data=(validation_data,
                                               validation_label),
                              test_data=(test_data, test_label))
              ],
              epochs=100)

    return model
Exemple #7
0
    features,
    dnn_feature_columns,
    embedding_size=8,
    l2_reg=0.00001,
    init_std=0.0001,
    seed=1024)

dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
# print('test_model_input info')
# print(len(test_model_input))
# print(type(test_model_input[0]))
# print(len(test_model_input[0]))
# print('num_features:',len(test_model_input[0]) )

# MMoE
mmoe_layers = MMoE(units=16, num_experts=8, num_tasks=8)(dnn_input)

print('passed')
mmoe_cat_layer = concat_fun(mmoe_layers)

mmoe_high_layers = MMoEdiffGate(units=16, num_experts=8,
                                num_tasks=2)([mmoe_cat_layer, dnn_input])

output_layers = []

# Build tower layer from MMoE layer
output_info = ['finish', 'like']
for index, task_layer in enumerate(mmoe_high_layers):
    tower_layer = tf.keras.layers.Dense(units=128,
                                        activation='relu')(task_layer)
    output_layer = tf.keras.layers.Dense(units=1,
Exemple #8
0
def main():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label, output_info,dict_train_labels,\
    dict_test_labels,dict_validation_labels= data_preparation()

    train_data_list = gd.get_list('train_result.csv')
    test_data_list = gd.get_list('test_result.csv')
    validation_data_list = gd.get_list('validation_result.csv')

    # train data

    train_datas = np.array(train_data)
    train_datas_list = train_datas.tolist()
    list_train = []

    for i in range(np.shape(train_data_list)[0]):
        list_train.append(train_data_list[i] + train_datas_list[i])

    data_train = DataFrame(list_train)
    # test data
    test_datas = np.array(test_data)
    test_datas_list = test_datas.tolist()
    list_test = []

    for i in range(np.shape(test_data_list)[0]):
        list_test.append(test_data_list[i] + test_datas_list[i])

    data_test = DataFrame(list_test)

    # validation data
    validation_datas = np.array(validation_data)
    validation_datas_list = validation_datas.tolist()
    list_validation = []

    for i in range(np.shape(validation_data_list)[0]):
        list_validation.append(validation_data_list[i] +
                               validation_datas_list[i])

    data_validation = DataFrame(list_validation)

    num_features = data_train.shape[1]

    print('Training data shape = {}'.format(data_train.shape))
    print('Validation data shape = {}'.format(data_validation.shape))
    print('Test data shape = {}'.format(data_test.shape))

    # Set up the input layer
    input_layer = Input(shape=(num_features, ))

    # Set up MMoE layer
    mmoe_layers = MMoE(units=4, num_experts=8, num_tasks=2)(input_layer)

    output_layers = []

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(units=8,
                            activation='relu',
                            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(units=output_info[index][0],
                             name=output_info[index][1],
                             activation='softmax',
                             kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    adam_optimizer = Adam()
    model.compile(loss={
        'income': 'binary_crossentropy',
        'marital': 'binary_crossentropy'
    },
                  optimizer=adam_optimizer,
                  metrics=['accuracy'])

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(x=data_train,
              y=train_label,
              validation_data=(data_validation, validation_label),
              callbacks=[
                  ROCCallback(training_data=(data_train, train_label),
                              validation_data=(data_validation,
                                               validation_label),
                              test_data=(data_test, test_label))
              ],
              epochs=100)
Exemple #9
0
def train_ranking_model():  # def train_ranking_model(df):
    train_data = process_data("offline_train")
    val_test_data = process_data("evaluate")
    val_data, test_data = train_test_split(val_test_data, test_size=0.5)
    train_data_label = [
        train_data[col_name].values
        for col_name in ['read_comment', 'like', 'click_avatar', 'forward']
    ]
    train_data_feature = []
    for i in range(512):
        feed_emb = np.asarray(
            np.squeeze(train_data[['feed_emb_' + str(i + 1)
                                   ]].values.tolist())).astype(np.float32)
        train_data_feature.append(feed_emb)
    for b in FEA_COLUMN_LIST:
        feed_b = np.asarray(np.squeeze(
            train_data[[b + "sum"]].values.tolist())).astype(np.float32)
        train_data_feature.append(feed_b)
        user_b = np.asarray(
            np.squeeze(train_data[[b + "sum_user"
                                   ]].values.tolist())).astype(np.float32)
        train_data_feature.append(user_b)

    val_data_label = [
        val_data[col_name].values
        for col_name in ['read_comment', 'like', 'click_avatar', 'forward']
    ]
    val_data_feature = []
    for i in range(512):
        feed_emb = np.asarray(
            np.squeeze(val_data[['feed_emb_' + str(i + 1)
                                 ]].values.tolist())).astype(np.float32)
        val_data_feature.append(feed_emb)
    for b in FEA_COLUMN_LIST:
        feed_b = np.asarray(np.squeeze(
            val_data[[b + "sum"]].values.tolist())).astype(np.float32)
        val_data_feature.append(feed_b)
        user_b = np.asarray(
            np.squeeze(val_data[[b + "sum_user"
                                 ]].values.tolist())).astype(np.float32)
        val_data_feature.append(user_b)

    test_data_label = [
        test_data[col_name].values
        for col_name in ['read_comment', 'like', 'click_avatar', 'forward']
    ]
    test_data_feature = []
    for i in range(512):
        feed_emb = np.asarray(
            np.squeeze(test_data[['feed_emb_' + str(i + 1)
                                  ]].values.tolist())).astype(np.float32)
        test_data_feature.append(feed_emb)
    for b in FEA_COLUMN_LIST:
        feed_b = np.asarray(np.squeeze(
            test_data[[b + "sum"]].values.tolist())).astype(np.float32)
        test_data_feature.append(feed_b)
        user_b = np.asarray(
            np.squeeze(test_data[[b + "sum_user"
                                  ]].values.tolist())).astype(np.float32)
        test_data_feature.append(user_b)

    # train, val_test = train_test_split(df, test_size=0.3)
    # val, test = train_test_split(val_test, test_size=0.5)

    # train_label = [train[col_name].values for col_name in ['user_click', 'user_rating', 'user_like', 'time_spend']]
    # # Tensorflow - ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type float)
    # train_data = [np.asarray(np.squeeze(train[['video_emb']].values.tolist())).astype(np.float32),
    #               np.asarray(np.squeeze(train[['user_emb']].values.tolist())).astype(np.float32),
    #               train[['view_count']].values]  # todo: user demographics, device, time, and location
    #               # np.asarray(train[['view_count']].values.tolist()).astype(np.float32)]
    #
    # validation_label = [val[col_name].values for col_name in ['user_click', 'user_rating', 'user_like', 'time_spend']]
    # validation_data = [np.asarray(np.squeeze(val[['video_emb']].values.tolist())).astype(np.float32),
    #                    np.asarray(np.squeeze(val[['user_emb']].values.tolist())).astype(np.float32),
    #                    val[['view_count']].values]
    #               # np.asarray(val[['view_count']].values.tolist()).astype(np.float32)]

    # test_label = [test[col_name].values for col_name in ['user_click', 'user_rating', 'user_like', 'time_spend']]
    # test_data = [np.asarray(np.squeeze(test[['video_emb']].values.tolist())).astype(np.float32),
    #              np.asarray(np.squeeze(test[['user_emb']].values.tolist())).astype(np.float32),
    #              test[['view_count']].values]
    #               # np.asarray(test[['view_count']].values.tolist()).astype(np.float32)]

    print("Output is user_click, user_rating, user_like and time_spend...")
    output_info = [(1, 'read_comment'), (1, 'like'), (1, 'click_avatar'),
                   (1, 'forward')]  # the rating is categorical or regression?

    output_activation = [
        'softmax', 'softmax', 'softmax', 'softmax'
    ]  # None (linear) activation for regression task; softmax for classification

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(val_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    # Set up the input layer
    input_feature = []
    for i in range(512):
        input_feed_embedding = Input(shape=(1, ))
        input_feature.append(input_feed_embedding)
    for b in FEA_COLUMN_LIST:
        input_feed_sum = Input(shape=(1, ))
        input_user_sum = Input(shape=(1, ))
        input_feature.append(input_feed_sum)
        input_feature.append(input_user_sum)
    input = Concatenate()(input_feature)
    input_layer = ReLU()(input)

    # add the shared ReLu layer
    # Set up MMoE layer
    mmoe_layers = MMoE(units=4, num_experts=8, num_tasks=4)(input_layer)

    output_layers = []

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(units=8,
                            activation='relu',
                            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(units=output_info[index][0],
                             name=output_info[index][1],
                             activation=output_activation[index],
                             kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    # model = Model(inputs=[input_video_tags,input_video_title,input_video_desp,input_video_view], outputs=output_layers)
    model = Model(inputs=input_feature, outputs=output_layers)
    adam_optimizer = Adam()
    model.compile(loss={
        'read_comment': 'binary_crossentropy',
        'like': 'binary_crossentropy',
        'click_avatar': 'binary_crossentropy',
        'forward': 'binary_crossentropy'
    },
                  optimizer=adam_optimizer,
                  metrics=['accuracy'])

    # Print out model architecture summary
    model.summary()

    # Train the model,
    model.fit(x=train_data_feature,
              y=train_data_label,
              validation_data=(val_data_feature, val_data_label),
              callbacks=[
                  ROCCallback(training_data=(train_data_feature,
                                             train_data_label),
                              validation_data=(val_data_feature,
                                               val_data_label),
                              test_data=(test_data_feature, test_data_label))
              ],
              epochs=100)

    return model