Exemplo n.º 1
0
                                  embedding_dim=64)(input)
item_emb_mlp = LatentFactorMapper(column_id=1,
                                  num_of_entities=item_num,
                                  embedding_dim=64)(input)

# Step 2.2: Setup interactors to handle models
innerproduct_output = InnerProductInteraction()([user_emb_gmf, item_emb_gmf])
mlp_output = MLPInteraction()([user_emb_mlp, item_emb_mlp])

# Step 2.3: Setup optimizer to handle the target task
output = RatingPredictionOptimizer()([innerproduct_output, mlp_output])
model = RPRecommender(inputs=input, outputs=output)

# Step 3: Build the searcher, which provides search algorithm
searcher = Search(model=model,
                  tuner='greedy',  # random, greedy
                  tuner_params={"max_trials": 5, 'overwrite': True}
                  )

# Step 4: Use the searcher to search the recommender
searcher.search(x=[train_X_categorical],
                y=train_y,
                x_val=[val_X_categorical],
                y_val=val_y,
                objective='val_mse',
                batch_size=1024,
                epochs=1,
                callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=1)])
logger.info('Validation Accuracy (mse): {}'.format(searcher.evaluate(x=val_X_categorical,
                                                                     y_true=val_y)))

# Step 5: Evaluate the searched model
Exemplo n.º 2
0
# Step 2.2: Setup interactors to handle models
attention_output = SelfAttentionInteraction()(
    [dense_feat_emb, sparse_feat_emb])
bottom_mlp_output = MLPInteraction()([dense_feat_emb])
top_mlp_output = MLPInteraction()([attention_output, bottom_mlp_output])

# Step 2.3: Setup optimizer to handle the target task
output = CTRPredictionOptimizer()(top_mlp_output)
model = CTRRecommender(inputs=[dense_input_node, sparse_input_node],
                       outputs=output)

# Step 3: Build the searcher, which provides search algorithm
searcher = Search(
    model=model,
    tuner='random',
    tuner_params={
        'max_trials': 2,
        'overwrite': True
    },
)

# Step 4: Use the searcher to search the recommender
searcher.search(x=[train_X_numerical, train_X_categorical],
                y=train_y,
                x_val=[val_X_numerical, val_X_categorical],
                y_val=val_y,
                objective='val_BinaryCrossentropy',
                batch_size=10000,
                epochs=2,
                callbacks=[
                    tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     patience=1)
Exemplo n.º 3
0
    if args.model == 'mf':
        model = build_mf(user_num, item_num)
    if args.model == 'mlp':
        model = build_mlp(user_num, item_num)
    if args.model == 'gmf':
        model = build_gmf(user_num, item_num)
    if args.model == 'neumf':
        model = build_neumf(user_num, item_num)
    if args.model == 'autorec':
        model = build_autorec(user_num, item_num)

    # search and predict.
    searcher = Search(
        model=model,
        tuner=args.search,  ## hyperband, bayesian
        tuner_params={
            'max_trials': args.trials,
            'overwrite': True
        })

    start_time = time.time()
    searcher.search(x=train_X,
                    y=train_y,
                    x_val=val_X,
                    y_val=val_y,
                    objective='val_mse',
                    batch_size=args.batch_size,
                    epochs=args.epochs,
                    callbacks=[
                        tf.keras.callbacks.EarlyStopping(
                            monitor='val_loss', patience=args.early_stop)
Exemplo n.º 4
0
                                  id_num=10000,
                                  embedding_dim=64)(input)
item_emb_mlp = LatentFactorMapper(feat_column_id=1,
                                  id_num=10000,
                                  embedding_dim=64)(input)
innerproduct_output = ElementwiseInteraction(elementwise_type="innerporduct")(
    [user_emb_gmf, item_emb_gmf])
mlp_output = MLPInteraction()([user_emb_mlp, item_emb_mlp])
output = PointWiseOptimizer()([innerproduct_output, mlp_output])
model = CTRRecommender(inputs=input, outputs=output)

# AutoML search and predict.
searcher = Search(
    model=model,
    tuner='random',
    tuner_params={
        'max_trials': 10,
        'overwrite': True
    },
)
searcher.search(x=train_X,
                y=train_y,
                x_val=val_X,
                y_val=val_y,
                objective='val_BinaryCrossentropy',
                batch_size=256,
                epochs=20,
                callbacks=[
                    tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     patience=1)
                ])
logger.info('Predicted Ratings: {}'.format(searcher.predict(x=val_X)))
Exemplo n.º 5
0
# build the pipeline.
input = Input(shape=[2])
user_emb = LatentFactorMapper(feat_column_id=0,
                              id_num=user_num,
                              embedding_dim=64)(input)
item_emb = LatentFactorMapper(feat_column_id=1,
                              id_num=item_num,
                              embedding_dim=64)(input)
output1 = HyperInteraction()([user_emb, item_emb])
output2 = HyperInteraction()([output1, user_emb, item_emb])
output3 = HyperInteraction()([output1, output2, user_emb, item_emb])
output4 = HyperInteraction()([output1, output2, output3, user_emb, item_emb])
output = RatingPredictionOptimizer()(output4)
model = RPRecommender(inputs=input, outputs=output)

# AutoML search and predict.
searcher = Search(model=model,
                  tuner='random',  ## hyperband, bayesian
                  tuner_params={'max_trials': 100, 'overwrite': True},)
searcher.search(x=train_X,
                y=train_y,
                x_val=val_X,
                y_val=val_y,
                objective='val_mse',
                batch_size=1024,
                epochs=10,
                callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=1)])
logger.info('Predicting Val Dataset Accuracy (mse): {}'.format(searcher.evaluate(x=val_X, y_true=val_y)))
logger.info('Predicting Test Dataset Accuracy (mse): {}'.format(searcher.evaluate(x=test_X, y_true=test_y)))
Exemplo n.º 6
0
user_emb_mlp = LatentFactorMapper(column_id=0,
                                  num_of_entities=10000,
                                  embedding_dim=64)(input)
item_emb_mlp = LatentFactorMapper(column_id=1,
                                  num_of_entities=10000,
                                  embedding_dim=64)(input)
innerproduct_output = InnerProductInteraction()([user_emb_gmf, item_emb_gmf])
mlp_output = MLPInteraction()([user_emb_mlp, item_emb_mlp])
output = CTRPredictionOptimizer()([innerproduct_output, mlp_output])
model = CTRRecommender(inputs=input, outputs=output)

# AutoML search and predict.
searcher = Search(
    model=model,
    tuner='random',
    tuner_params={
        'max_trials': 10,
        'overwrite': True
    },
)
searcher.search(x=[criteo.get_x_categorical(train_X)],
                y=train_y,
                x_val=[criteo.get_x_categorical(val_X)],
                y_val=val_y,
                objective='val_BinaryCrossentropy',
                batch_size=256,
                epochs=20,
                callbacks=[
                    tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     patience=1)
                ])
logger.info('Predicted Ratings: {}'.format(
Exemplo n.º 7
0
# build the pipeline.
input = Input(shape=[2])
user_emb = LatentFactorMapper(feat_column_id=0,
                              id_num=user_num,
                              embedding_dim=64)(input)
item_emb = LatentFactorMapper(feat_column_id=1,
                              id_num=item_num,
                              embedding_dim=64)(input)
output = ElementwiseInteraction(elementwise_type="innerporduct")(
    [user_emb, item_emb])
output = RatingPredictionOptimizer()(output)
model = RPRecommender(inputs=input, outputs=output)

# AutoML search and predict
searcher = Search(
    model=model,
    tuner='greedy',  # hyperband, greedy, bayesian
    tuner_params={"max_trials": 5})

searcher.search(x=train_X,
                y=train_y,
                x_val=val_X,
                y_val=val_y,
                objective='val_mse',
                batch_size=1024,
                epochs=10,
                callbacks=[
                    tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     patience=1)
                ])
logger.info('Predicting Val Dataset Accuracy (mse): {}'.format(
    searcher.evaluate(x=val_X, y_true=val_y)))
Exemplo n.º 8
0
    if args.model == 'deepfm':
        output = build_deepfm(emb_dict)
    if args.model == 'crossnet':
        output = build_neumf(emb_dict)
    if args.model == 'autoint':
        output = build_autorec(emb_dict)
    if args.model == 'autorec':
        output = build_autorec(emb_dict)

    # Step 2.3: Setup optimizer to handle the target task
    output = CTRPredictionOptimizer()(output)
    model = CTRRecommender(inputs=input, outputs=output)

    # Step 3: Build the searcher, which provides search algorithm
    searcher = Search(model=model,
                      tuner=args.search,
                      tuner_params={'max_trials': args.trials, 'overwrite': True}
                      )

    # Step 4: Use the searcher to search the recommender
    start_time = time.time()
    searcher.search(x=train_X,
                    y=train_y,
                    x_val=val_X,
                    y_val=val_y,
                    objective='val_BinaryCrossentropy',
                    batch_size=args.batch_size,
                    epochs=1,
                    callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=1)]
                    )
    end_time = time.time()
    print("running time:", end_time - start_time)