def main():
    input_params = {
        'use_text': True,  # 是否使用text
        'use_img': True,  # 是否使用img
        'target': 'isclick'  # 预测目标
    }
    feature_columns, train_model_input, train_labels, test_model_input, test_labels = get_input(
        **input_params)
    iterations = 10  # 跑多次取平均
    for i in range(iterations):
        print(f'iteration {i + 1}/{iterations}')

        model = DeepFM(feature_columns,
                       feature_columns,
                       use_image=input_params["use_img"],
                       use_text=input_params["use_text"],
                       embedding_size=10)
        model.compile("adagrad",
                      "binary_crossentropy",
                      metrics=["binary_crossentropy"])

        history = model.fit(train_model_input,
                            train_labels,
                            batch_size=4096,
                            epochs=1,
                            verbose=1,
                            validation_data=(test_model_input, test_labels))
Пример #2
0
check_path = './save/deepfm_weight.epoch_{epoch:4d}.val_loss_{val_loss:.4f}.ckpt'
checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path,
                                                save_weights_only=True,
                                                verbose=1,
                                                period=5)

# ------------ model evaluate ------------
METRICS = [
    tf.keras.metrics.BinaryAccuracy(name='accuracy'),
    tf.keras.metrics.Precision(name='precision'),
    tf.keras.metrics.Recall(name='recall'),
    tf.keras.metrics.AUC(name='auc'),
]

model.compile(loss=tf.keras.losses.binary_crossentropy,
              optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
              metrics=METRICS)

# ---------早停法 -----
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_auc',
                                                  verbose=1,
                                                  patience=10,
                                                  mode='max',
                                                  restore_best_weights=True)

model.fit(
    train_X,
    train_y,
    epochs=epochs,
    # callbacks=[checkpoint],
    callbacks=[early_stopping, checkpoint],
Пример #3
0
                                                      embed_dim=embed_dim,
                                                      read_part=read_part,
                                                      sample_num=sample_num,
                                                      test_size=test_size)
 train_X, train_y = train
 test_X, test_y = test
 # ============================Build Model==========================
 mirrored_strategy = tf.distribute.MirroredStrategy()
 with mirrored_strategy.scope():
     model = DeepFM(feature_columns,
                    hidden_units=hidden_units,
                    dnn_dropout=dnn_dropout)
     model.summary()
     # ============================Compile============================
     model.compile(loss=binary_crossentropy,
                   optimizer=Adam(learning_rate=learning_rate),
                   metrics=[AUC()])
 # ============================model checkpoint======================
 # check_path = '../save/deepfm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
 # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,
 #                                                 verbose=1, period=5)
 # ==============================Fit==============================
 model.fit(
     train_X,
     train_y,
     epochs=epochs,
     callbacks=[
         EarlyStopping(monitor='val_loss',
                       patience=2,
                       restore_best_weights=True)
     ],  # checkpoint,
Пример #4
0
    k = 10
    w_reg = 1e-4
    v_reg = 1e-4
    hidden_units = [256, 128, 64]
    output_dim = 1
    activation = 'relu'

    model = DeepFM(k, w_reg, v_reg, hidden_units, output_dim, activation)
    optimizer = optimizers.SGD(0.01)

    train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
    train_dataset = train_dataset.batch(32).prefetch(
        tf.data.experimental.AUTOTUNE)

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.fit(train_dataset, epochs=100)
    logloss, auc = model.evaluate(X_test, y_test)
    print('logloss {}\nAUC {}'.format(round(logloss, 2), round(auc, 2)))
    model.summary()

    # summary_writer = tf.summary.create_file_writer('E:\\PycharmProjects\\tensorboard')
    # for i in range(500):
    #     with tf.GradientTape() as tape:
    #         y_pre = model(X_train)
    #         loss = tf.reduce_mean(losses.binary_crossentropy(y_true=y_train, y_pred=y_pre))
    #         print(loss.numpy())
    #     with summary_writer.as_default():
    #         tf.summary.scalar("loss", loss, step=i)
    #     grad = tape.gradient(loss, model.variables)