def main():
    input_params = {
        'use_text': True,  # 是否使用text
        'use_img': True,  # 是否使用img
        'target': 'isclick'  # 预测目标
    }
    feature_columns, train_model_input, train_labels, test_model_input, test_labels = get_input(
        **input_params)
    iterations = 10  # 跑多次取平均
    for i in range(iterations):
        print(f'iteration {i + 1}/{iterations}')

        model = DeepFM(feature_columns,
                       feature_columns,
                       use_image=input_params["use_img"],
                       use_text=input_params["use_text"],
                       embedding_size=10)
        model.compile("adagrad",
                      "binary_crossentropy",
                      metrics=["binary_crossentropy"])

        history = model.fit(train_model_input,
                            train_labels,
                            batch_size=4096,
                            epochs=1,
                            verbose=1,
                            validation_data=(test_model_input, test_labels))
예제 #2
0
model.compile(loss=tf.keras.losses.binary_crossentropy,
              optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
              metrics=METRICS)

# ---------早停法 -----
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_auc',
                                                  verbose=1,
                                                  patience=10,
                                                  mode='max',
                                                  restore_best_weights=True)

model.fit(
    train_X,
    train_y,
    epochs=epochs,
    # callbacks=[checkpoint],
    callbacks=[early_stopping, checkpoint],
    batch_size=batch_size,
    validation_split=0.1,
    validation_data=(val_X, val_y))  # class_weight={0:1, 1:3}, # 样本均衡

print('test AUC: %f' % model.evaluate(test_X, test_y)[1])

# ------------- model evaluation in test dataset ----

train_predictions_weighted = model.predict(train_X, batch_size=batch_size)
test_predictions_weighted = model.predict(test_X, batch_size=batch_size)

# ------------- confusion matrix
from sklearn.metrics import confusion_matrix, roc_curve
import matplotlib.pyplot as plt
예제 #3
0
    v_reg = 1e-4
    hidden_units = [256, 128, 64]
    output_dim = 1
    activation = 'relu'

    model = DeepFM(k, w_reg, v_reg, hidden_units, output_dim, activation)
    optimizer = optimizers.SGD(0.01)

    train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    train_dataset = train_dataset.batch(32).prefetch(
        tf.data.experimental.AUTOTUNE)

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.fit(train_dataset, epochs=100)
    logloss, auc = model.evaluate(X_test, y_test)
    print('logloss {}\nAUC {}'.format(round(logloss, 2), round(auc, 2)))
    model.summary()

    # summary_writer = tf.summary.create_file_writer('E:\\PycharmProjects\\tensorboard')
    # for i in range(500):
    #     with tf.GradientTape() as tape:
    #         y_pre = model(X_train)
    #         loss = tf.reduce_mean(losses.binary_crossentropy(y_true=y_train, y_pred=y_pre))
    #         print(loss.numpy())
    #     with summary_writer.as_default():
    #         tf.summary.scalar("loss", loss, step=i)
    #     grad = tape.gradient(loss, model.variables)
    #     optimizer.apply_gradients(grads_and_vars=zip(grad, model.variables))
예제 #4
0
    # ============================Build Model==========================
    mirrored_strategy = tf.distribute.MirroredStrategy()
    with mirrored_strategy.scope():
        model = DeepFM(feature_columns,
                       hidden_units=hidden_units,
                       dnn_dropout=dnn_dropout)
        model.summary()
        # ============================Compile============================
        model.compile(loss=binary_crossentropy,
                      optimizer=Adam(learning_rate=learning_rate),
                      metrics=[AUC()])
    # ============================model checkpoint======================
    # check_path = '../save/deepfm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
    # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,
    #                                                 verbose=1, period=5)
    # ==============================Fit==============================
    model.fit(
        train_X,
        train_y,
        epochs=epochs,
        callbacks=[
            EarlyStopping(monitor='val_loss',
                          patience=2,
                          restore_best_weights=True)
        ],  # checkpoint,
        batch_size=batch_size,
        validation_split=0.1)
    # ===========================Test==============================
    print('test AUC: %f' %
          model.evaluate(test_X, test_y, batch_size=batch_size)[1])
예제 #5
0
                                                         embed_dim=embed_dim,
                                                         read_part=read_part,
                                                         sample_num=sample_num,
                                                         test_size=test_size)
    train_X, train_y = train
    test_X, test_y = test
    # ============================Build Model==========================
    model = DeepFM(feature_columns,
                   k=k,
                   hidden_units=hidden_units,
                   dnn_dropout=dnn_dropout)
    model.summary()
    # ============================model checkpoint======================
    # check_path = '../save/deepfm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
    # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,
    #                                                 verbose=1, period=5)
    # ============================Compile============================
    model.compile(loss=binary_crossentropy,
                  optimizer=Adam(learning_rate=learning_rate),
                  metrics=[AUC()])
    # ==============================Fit==============================
    model.fit(
        train_X,
        train_y,
        epochs=epochs,
        # callbacks=[checkpoint],
        batch_size=batch_size,
        validation_split=0.1)
    # ===========================Test==============================
    print('test AUC: %f' % model.evaluate(test_X, test_y)[1])