示例#1
0
}

if __name__ == '__main__':
    ### step 3 : storing the results of every iteration
    bayes_trials = Trials()
    MAX_EVALS = 100

    # Optimize
    best = fmin(fn=objective,
                space=item_cbf_space,
                algo=hp.tpe.suggest,
                max_evals=MAX_EVALS,
                trials=bayes_trials,
                verbose=True,
                points_to_evaluate={
                    "topK": 1,
                    "shrink": 13,
                    "similarity": "cosine",
                    "normalize": True
                })

    ### best will the return the the best hyperparameter set

    params = space_eval(item_cbf_space, best)

    print("Best parameters:", params)
    RunRecommender.evaluate_on_test_set(ItemCBF,
                                        params,
                                        Kfold=4,
                                        parallel_fit=True)
示例#2
0
                space=search_space,
                algo=hp.tpe.suggest,
                max_evals=MAX_EVALS,
                trials=bayes_trials,
                verbose=True,
                points_to_evaluate=opt_eval)

    params = space_eval(search_space, best)

    ### best will the return the the best hyperparameter set

    print("Best parameters:")
    print(params)
    params["topK"] = int(params["topK"])

    ### best will the return the the best hyperparameter set

    params = space_eval(search_space, best)
    print("############### Best parameters ###############")
    print(params)

    print("############### Test set performance ###############")
    RunRecommender.evaluate_on_test_set(AlternatingLeastSquare,
                                        params,
                                        parallel_fit=False,
                                        Kfold=N_K_FOLD,
                                        parallelize_evaluation=True,
                                        user_group="warm")

    computer_sleep(verbose=False)
        'topK': 56
    }

    # RunRecommender.evaluate_on_validation_set(RP3betaRecommender, opt)
    # Optimize
    best = fmin(fn=objective,
                space=als_space,
                algo=hp.tpe.suggest,
                max_evals=MAX_EVALS,
                trials=bayes_trials,
                verbose=True,
                points_to_evaluate=[opt])

    ### best will the return the the best hyperparameter set

    best = space_eval(als_space, best)

    print("############### Best parameters ###############")

    print(best)
    RunRecommender.evaluate_on_test_set(RP3betaRecommender,
                                        best,
                                        Kfold=N_KFOLD,
                                        parallelize_evaluation=True,
                                        user_group="warm")

    computer_sleep(verbose=False)
    """ 
    MAP to beat: .03581871259565834 val
    .041 test
    """
示例#4
0
        if exclude_seen:
            unseen_items_mask = np.in1d(self.popular_items,
                                        self.URM_CSR[user_id].indices,
                                        assume_unique=True,
                                        invert=True)

            unseen_items = self.popular_items[unseen_items_mask]

            recommended_items = unseen_items[0:at]

        else:
            recommended_items = self.popular_items[0:at]

        return recommended_items


if __name__ == "__main__":

    # evaluator = Evaluator()
    # evaluator.split_data_randomly_2()

    top_popular = TopPopRecommender

    map10 = RunRecommender.evaluate_on_test_set(top_popular, {},
                                                Kfold=4,
                                                parallel_fit=True,
                                                user_group="cold")

    #print('{0:.128f}'.format(map10))
    print(map10)
if __name__ == '__main__':
    ### step 3 : storing the results of every iteration
    bayes_trials = Trials()

    # Optimize
    best = fmin(fn=objective,
                space=slim_bpr_space,
                algo=hp.tpe.suggest,
                max_evals=MAX_EVALS,
                trials=bayes_trials,
                verbose=True)

    ### best will the return the the best hyperparameter set

    params = space_eval(slim_bpr_space, best)

    params["topK"] = int(params["topK"])
    params["batch_size"] = int(params["batch_size"])
    params["random_seed"] = 1234
    params["epochs"] = 30

    print("############### Best parameters ###############")

    print(params)
    RunRecommender.evaluate_on_test_set(SLIM_BPR_Cython,
                                        params,
                                        Kfold=N_KFOLD,
                                        parallelize_evaluation=False,
                                        user_group="warm")
示例#6
0
    'item_cbf_weight': 0.037666643326618406,
    'item_cf_weight': 0.014294955186782246,
    'rp3_weight': 0.9314974601074552
}

# Optimize
best = fmin(fn=objective,
            space=search_space,
            algo=hp.tpe.suggest,
            max_evals=MAX_EVALS,
            trials=bayes_trials,
            verbose=True,
            points_to_evaluate=[opt, new_opt, last_opt])

best = space_eval(search_space, best)

# best will the return the the best hyperparameter set

print("\n############## Best Parameters ##############\n")
print(best, "\n\nEvaluating on test set now...")

RunRecommender.evaluate_on_test_set(HybridElasticNetICFUCFRP3Beta,
                                    best,
                                    Kfold=N_KFOLD)

####################################################
# Test Map to beat 0.05112213282549001             #
# MAP-10 score: 0.05107393648464094 on kfold, k = 4#
####################################################

# {'SLIM_weight': 0.7989266787188458, 'item_cbf_weight': 0.03258554983815878, 'item_cf_weight': 0.0077609799300920445, 'rp3_weight': 0.6740989817682256}
    "topK": hp.hp.quniform('topK', 0, 1000, 5),
    "shrink": hp.hp.uniformint('shrink', 0, 50),
    "bm_25_norm": hp.hp.choice('bm_25_norm', [True, False]),
    "normalize": hp.hp.choice('normalize', [True, False]),
    "similarity": hp.hp.choice('similarity', ["cosine", "jaccard", "dice"])
}


if __name__ == '__main__':
    ### step 3 : storing the results of every iteration
    bayes_trials = Trials()
    MAX_EVALS = 100

    item_cf_parameters = {'shrink': 46.0, 'similarity': "jaccard", 'topK': 8}

    # Optimize
    best = fmin(fn=objective, space=search_space, algo=hp.tpe.suggest,
                max_evals=MAX_EVALS, trials=bayes_trials, verbose=True, points_to_evaluate=item_cf_parameters)

    params = space_eval(search_space, best)

    ### best will the return the the best hyperparameter set


    print("Best parameters:")
    print(params)
    params["topK"] = int(params["topK"])


    RunRecommender.evaluate_on_test_set(ItemCollaborativeFilter, best, parallel_fit=False, Kfold=N_K_FOLD, parallelize_evaluation=True )
# last_opt = {'SSLIM_weight': 0.8737840927419455, 'item_cbf_weight': 0.037666643326618406, 'item_cf_weight': 0.014294955186782246, 'rp3_weight': 0.9314974601074552, 'user_cbf_weight': 0, 'user_cf_weight': 0}
#
opt = {'AlternatingLeastSquare': 0.07611985905191196, 'ItemCBF': 0.017561491230314447, 'ItemCollaborativeFilter': 0.0341817493248531, 'RP3betaRecommender': 0.9713719890744753, 'SLIMElasticNetRecommender': 0.9974897962716185, 'SLIM_BPR_Recommender': 0.8633266021278376}

# Optimize
best = fmin(fn=objective, space=search_space, algo=hp.tpe.suggest,
            max_evals=MAX_EVALS, trials=bayes_trials, verbose=True, points_to_evaluate=[opt])

best = space_eval(search_space, best)

# best will the return the the best hyperparameter set

print("\n############## Best Parameters ##############\n")
print(best, "\n\nEvaluating on test set now...")

RunRecommender.evaluate_on_test_set(Hybrid, {"weights": best}, Kfold=N_KFOLD,
                                    init_params={"recommenders": [MultiThreadSLIM_ElasticNet, RP3betaRecommender, ItemCBF, AlternatingLeastSquare, SLIM_BPR_Cython]},
                                    parallelize_evaluation=False,
                                    parallel_fit=False)

computer_sleep(verbose=False)


####################################################
# Test Map to beat 0.05112213282549001             #
# MAP-10 score: 0.05107393648464094 on kfold, k = 4#
####################################################

new_best = {'ItemCBF': 0.013769403495491125, 'ItemCollaborativeFilter': 0.015447034894805844, 'RP3betaRecommender': 0.9945281573130214, 'SLIMElasticNetRecommender': 0.7629862396511091}

# {'SLIM_weight': 0.7989266787188458, 'item_cbf_weight': 0.03258554983815878, 'item_cf_weight': 0.0077609799300920445, 'rp3_weight': 0.6740989817682256}
        end_pos = self.URM_train.indptr[user_id + 1]

        user_profile = self.URM_train.indices[start_pos:end_pos]

        scores[user_profile] = -np.inf

        return scores


if __name__ == '__main__':

    new_params_cosine = {
        'normalize': False,
        'shrink': 50,
        'suppress_interactions': False,
        'topK': 540,
        'top_pop_weight': 0.0020283643469209793
    }
    params_test_bm_25 = {'bm_25_norm': True}
    RunRecommender.evaluate_on_test_set(HybridUserCBFRegionalTopPop, {},
                                        Kfold=10,
                                        user_group="cold",
                                        parallelize_evaluation=True,
                                        parallel_fit=False)
    RunRecommender.evaluate_on_test_set(HybridUserCBFRegionalTopPop,
                                        new_params_cosine,
                                        Kfold=10,
                                        user_group="cold",
                                        parallelize_evaluation=True,
                                        parallel_fit=False)
示例#10
0
    loss = - RunRecommender.evaluate_on_validation_set(UserCollaborativeFilter, params, Kfold=N_K_FOLD, parallel_fit=False, parallelize_evaluation=True, user_group="warm")
    return loss

search_space = {
    "topK": hp.hp.quniform('topK', 0, 1000, 5),
    "shrink": hp.hp.uniformint('shrink', 0, 50),
    "bm_25_norm": hp.hp.choice('bm_25_norm', [True, False]),
    "normalize": hp.hp.choice('normalize', [True, False]),
    "similarity": hp.hp.choice('similarity', ["cosine", "jaccard", "dice"])
}


if __name__ == '__main__':
    ### step 3 : storing the results of every iteration
    bayes_trials = Trials()
    MAX_EVALS = 100

    # Optimize
    best = fmin(fn=objective, space=search_space, algo=hp.tpe.suggest,
                max_evals=MAX_EVALS, trials=bayes_trials, verbose=True, points_to_evaluate=[])

    ### best will the return the the best hyperparameter set

    params = space_eval(search_space, best)
    print("############### Best parameters ###############")
    params["topK"] = int(params["topK"])
    print(params)

    print("############### Test set performance ###############")
    RunRecommender.evaluate_on_test_set(UserCollaborativeFilter, params, parallel_fit=False, Kfold=N_K_FOLD, parallelize_evaluation=True, user_group="warm")
示例#11
0
        "top_pop_weight": 0.02139131367609725,
        "topK": 765,
        "shrink": 6,
        "normalize": True,
        "similarity": "jaccard",
        "suppress_interactions": False
    }

    # Optimize
    best = fmin(fn=objective,
                space=user_cbf_space,
                algo=hp.tpe.suggest,
                max_evals=MAX_EVALS,
                trials=bayes_trials,
                verbose=True,
                points_to_evaluate=[previous, last])

    ### best will the return the the best hyperparameter set

    print(best)
    params = space_eval(user_cbf_space, best)
    print(params)
    params["topK"] = int(params["topK"])

    print("############### Performance on test set #################")
    MAP = RunRecommender.evaluate_on_test_set(HybridUserCBFRegionalTopPop,
                                              params,
                                              Kfold=10,
                                              user_group="cold",
                                              parallel_fit=False,
                                              parallelize_evaluation=True)
示例#12
0
    def filter_seen(self, user_id, scores):
        start_pos = self.URM_train.indptr[user_id]
        end_pos = self.URM_train.indptr[user_id + 1]

        user_profile = self.URM_train.indices[start_pos:end_pos]

        scores[user_profile] = -np.inf

        return scores


if __name__ == "__main__":
    # evaluator = Evaluator()
    # evaluator.split_data_randomly_2()
    ubcbf = UserCBF
    params = {
        'normalize': True,
        'shrink': 1.0,
        'similarity': "dice",
        'suppress_interactions': True,
        'topK': 93 * 5
    }
    # ubcbf.helper.split_ucm_region()
    RunRecommender.evaluate_on_test_set(ubcbf,
                                        params,
                                        user_group="cold",
                                        parallel_fit=True,
                                        Kfold=4)
    # print('{0:.128f}'.format(map10))
    def filter_seen(self, user_id, scores):
        """Remove items that are in the user profile from recommendations

        :param user_id: array of user ids for which to compute recommendations
        :param scores: array containing the scores for each object"""

        start_pos = self.URM_train.indptr[user_id]
        end_pos = self.URM_train.indptr[user_id + 1]

        user_profile = self.URM_train.indices[start_pos:end_pos]

        scores[user_profile] = -np.inf

        return scores


if __name__ == "__main__":
    # Train and test data are now loaded by the helper

    weights = {
        'SLIM_weight': 0.9313349587356776,
        'als_weight': 0.7463720610782647,
        'item_cf_weight': 0.335817947135043
    }
    hybrid_ucficf = HybridALSElasticNetCF

    # Evaluation is performed by RunRecommender
    RunRecommender.evaluate_on_test_set(hybrid_ucficf, weights)

    # RunRecommender.run(hybrid_ucficf, weights)
示例#14
0
        # Compute scores of the recommendation
        scores = self.compute_scores(user_id)

        # Filter to exclude already seen items
        if exclude_seen:
            scores = self.filter_seen(user_id, scores)
        recommended_items = np.argsort(scores)
        recommended_items = np.flip(recommended_items, axis=0)
        return recommended_items[:at]

    def filter_seen(self, user_id, scores):
        start_pos = self.URM_train.indptr[user_id]
        end_pos = self.URM_train.indptr[user_id + 1]

        user_profile = self.URM_train.indices[start_pos:end_pos]

        scores[user_profile] = -np.inf

        return scores


if __name__ == "__main__":

    # evaluator.split_data_randomly()

    parameters = {"topK": 4, "shrink": 8}
    cbf_recommender = AssetCBF

    RunRecommender.evaluate_on_test_set(cbf_recommender, parameters)
    # RunRecommender.run(cbf_recommender)
def objective(params):
    print("Current parameters:")
    print(params)
    loss = -RunRecommender.evaluate_on_test_set(PureSVDRecommender, params)
    return loss
        :param user_id: array of user ids for which to compute recommendations
        :param scores: array containing the scores for each object"""

        start_pos = self.URM_train.indptr[user_id]
        end_pos = self.URM_train.indptr[user_id + 1]

        user_profile = self.URM_train.indices[start_pos:end_pos]

        scores[user_profile] = -np.inf

        return scores


if __name__ == "__main__":
    # Train and test data are now loaded by the helper

    #weights = {'SLIM_weight': 0.8950096358670148, 'item_cbf_weight': 0.034234727663263104, 'item_cf_weight': 0.011497379340447589, 'rp3_weight': 0.8894480634395567}

    #weights2 = {'SLIM_weight': 0.8525330515257261, 'item_cbf_weight': 0.03013686377319209, 'item_cf_weight': 0.01129668459365759, 'rp3_weight': 0.9360587800999112}
    weights = {'SLIM_weight': 0.8737840927419455, 'item_cbf_weight': 0.037666643326618406, 'item_cf_weight': 0.014294955186782246, 'rp3_weight': 0.9314974601074552}

    hybrid_ucficf = HybridElasticNetICFUCFRP3Beta

    # Evaluation is performed by RunRecommender
    # RunRecommender.evaluate_on_test_set(hybrid_ucficf, weights)

    RunRecommender.evaluate_on_test_set(hybrid_ucficf, weights, Kfold=4, parallelize_evaluation=True)
    #RunRecommender.evaluate_on_validation_set(hybrid_ucficf, weights)

    RunRecommender.run(hybrid_ucficf, weights)
示例#17
0
from SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython
from utils.run import RunRecommender

recommender = SLIM_BPR_Cython

RunRecommender.evaluate_on_test_set(SLIM_BPR_Cython, {})
    loss = - RunRecommender.evaluate_on_validation_set(UserCBF, params, Kfold=4, user_group="cold")
    return loss

user_cbf_space = {
    "topK": hp.hp.choice('topK', np.arange(0, 500, 5)),
    "shrink": hp.hp.uniformint('shrink', 0, 50),
    "similarity": hp.hp.choice('similarity', ["cosine", "adjusted", "asymmetric", "pearson", "jaccard", "dice", "tversky", "tanimoto"]),
    "suppress_interactions": hp.hp.choice('suppress_interactions', [True, False]),
    "normalize": hp.hp.choice('normalize', [True, False])
}

if __name__ == '__main__':
    ### step 3 : storing the results of every iteration
    bayes_trials = Trials()
    MAX_EVALS = 100

    previous = {'normalize': True, 'shrink': 5, 'similarity': "cosine", 'suppress_interactions': False, 'topK': 200}
    last = {'normalize': True, 'shrink': 1.0, 'similarity': "dice", 'suppress_interactions': True, 'topK': 93*5}

    # Optimize
    best = fmin(fn=objective, space=user_cbf_space, algo=hp.tpe.suggest,
                max_evals=MAX_EVALS, trials=bayes_trials, verbose=True, points_to_evaluate=[previous, last])

    ### best will the return the the best hyperparameter set

    print(best)



    MAP = RunRecommender.evaluate_on_test_set(UserCBF, best, Kfold=4, user_group="cold")
示例#19
0
    def recommend(self, user_id, at=10, exclude_seen=True):

        scores = self.compute_scores(user_id)

        if exclude_seen:
            scores = self.filter_seen(user_id, scores)

        # rank items
        ranking = scores.argsort()[::-1]

        return ranking[:at]

    def filter_seen(self, user_id, scores):
        start_pos = self.URM_train.indptr[user_id]
        end_pos = self.URM_train.indptr[user_id + 1]

        user_profile = self.URM_train.indices[start_pos:end_pos]

        scores[user_profile] = -np.inf

        return scores


if __name__ == "__main__":
    from utils.run import RunRecommender

    cb = ItemCollaborativeFilter

    map10 = RunRecommender.evaluate_on_test_set(cb, {})
    return loss

item_cbf_space = {
    "topK": hp.hp.choice('topK', np.arange(0, 500, 5)),
    "shrink": hp.hp.uniformint('shrink', 0, 50),
    "similarity": hp.hp.choice('similarity', ["cosine", "jaccard", "dice"]),
    "bm_25_normalization": hp.hp.choice('bm_25_normalization', [True, False]),

    "normalize": hp.hp.choice('normalize', [True, False])
}


if __name__ == '__main__':
    ### step 3 : storing the results of every iteration
    bayes_trials = Trials()
    MAX_EVALS = 100

    # Optimize
    best = fmin(fn=objective, space=item_cbf_space, algo=hp.tpe.suggest,
                max_evals=MAX_EVALS, trials=bayes_trials, verbose=True)

    ### best will the return the the best hyperparameter set

    params = space_eval(item_cbf_space, best)

    print('################### Best parameters ######################')
    print("Parameters:", params)

    print('################### Performance on Test set ######################')
    RunRecommender.evaluate_on_test_set(UserCBF, params, Kfold=N_FOLD, parallelize_evaluation=True, user_group="warm")
示例#21
0
def objective(params):
    print("Current parameters:")
    print(params)
    loss = -RunRecommender.evaluate_on_test_set(MF_MSE_PyTorch, params)
    return loss