Exemple #1
0
    def test_save_and_load(self):
        recommender_class = NCRWrapper

        recommender_instance_original, URM_train, URM_validation, URM_test, URM_negative = get_data_and_rec_instance(
            recommender_class)
        n_users, n_items = URM_train.shape

        evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                     URM_negative, [50],
                                                     exclude_seen=True)

        folder_path = "./temp_folder/"
        file_name = "temp_file"

        if not os.path.exists(folder_path):
            os.makedirs(folder_path)

        recommender_instance_original.fit()
        recommender_instance_original.save_model(folder_path=folder_path,
                                                 file_name=file_name)

        results_run_original, _ = evaluator_test.evaluateRecommender(
            recommender_instance_original)

        recommender_instance_loaded = recommender_class(URM_train)
        recommender_instance_loaded.load_model(folder_path=folder_path,
                                               file_name=file_name)

        results_run_loaded, _ = evaluator_test.evaluateRecommender(
            recommender_instance_loaded)

        print("Result original: {}\n".format(results_run_original))
        print("Result loaded: {}\n".format(results_run_loaded))

        users_to_evaluate_mask = np.zeros(n_users, dtype=np.bool)
        rows = URM_test.indptr
        num_ratings = np.ediff1d(rows)
        new_mask = num_ratings > 0
        users_to_evaluate_mask = np.logical_or(users_to_evaluate_mask,
                                               new_mask)
        user_id_list = np.arange(n_users, dtype=np.int)[users_to_evaluate_mask]

        URM_items_to_rank = sps.csr_matrix(URM_test.copy().astype(
            np.bool)) + sps.csr_matrix(URM_negative.copy().astype(np.bool))
        URM_items_to_rank.eliminate_zeros()
        URM_items_to_rank.data = np.ones_like(URM_items_to_rank.data)

        for test_user in user_id_list:
            start_pos = URM_items_to_rank.indptr[test_user]
            end_pos = URM_items_to_rank.indptr[test_user + 1]
            items_to_compute = URM_items_to_rank.indices[start_pos:end_pos]

            item_scores_original = recommender_instance_original._compute_item_score(
                user_id_array=np.atleast_1d(test_user),
                items_to_compute=items_to_compute)

            item_scores_loaded = recommender_instance_loaded._compute_item_score(
                user_id_array=np.atleast_1d(test_user),
                items_to_compute=items_to_compute)

            self.assertTrue(
                np.allclose(item_scores_original, item_scores_loaded),
                "item_scores of the fitted model and of the loaded model are different"
            )

        shutil.rmtree(folder_path, ignore_errors=True)
Exemple #2
0
def read_data_split_and_search(dataset_name,
                               flag_baselines_tune=False,
                               flag_DL_article_default=False,
                               flag_DL_tune=False,
                               flag_print_results=False):

    from Conferences.KDD.MCRec_our_interface.Movielens100K.Movielens100KReader import Movielens100KReader

    result_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    if dataset_name == "movielens100k":
        dataset = Movielens100KReader(result_folder_path)

    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy()

    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])
    assert_disjoint_matrices(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"], result_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               result_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                       URM_test_negative,
                                                       cutoff_list=[10])
    evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                 URM_test_negative,
                                                 cutoff_list=[10])

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
        PureSVDRecommender,
        NMFRecommender,
        IALSRecommender,
        MatrixFactorization_BPR_Cython,
        MatrixFactorization_FunkSVD_Cython,
        EASE_R_Recommender,
        SLIM_BPR_Cython,
        SLIMElasticNetRecommender,
    ]

    metric_to_optimize = "PRECISION"
    n_cases = 50
    n_random_starts = 15

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        URM_train_last_test=URM_train + URM_validation,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=result_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        resume_from_saved=True,
        n_cases=n_cases,
        n_random_starts=n_random_starts)

    if flag_baselines_tune:

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(
                    recommender_class, str(e)))
                traceback.print_exc()

        ################################################################################################
        ###### Content Baselines

        for ICM_name, ICM_object in dataset.ICM_DICT.items():

            try:

                runParameterSearch_Content(
                    ItemKNNCBFRecommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train + URM_validation,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    resume_from_saved=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On CBF recommender for ICM {} Exception {}".format(
                    ICM_name, str(e)))
                traceback.print_exc()

        ################################################################################################
        ###### Hybrid

        for ICM_name, ICM_object in dataset.ICM_DICT.items():

            try:

                runParameterSearch_Hybrid(
                    ItemKNN_CFCBF_Hybrid_Recommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train + URM_validation,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    resume_from_saved=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On recommender {} Exception {}".format(
                    ItemKNN_CFCBF_Hybrid_Recommender, str(e)))
                traceback.print_exc()

    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:

        if dataset_name == "movielens100k":
            """
            The code provided by the original authors of MCRec can be used only for the original data.
            Here I am passing to the Wrapper the URM_train matrix that is only required for its shape,
            the train will be done using the preprocessed data the original authors provided
            """
            from Conferences.KDD.MCRec_github.code.Dataset import Dataset

            original_dataset_reader = Dataset(
                'Conferences/KDD/MCRec_github/data/' + 'ml-100k')

            MCRec_article_hyperparameters = {
                "epochs": 200,
                "latent_dim": 128,
                "reg_latent": 0,
                "layers": [512, 256, 128, 64],
                "reg_layes": [0, 0, 0, 0],
                "learning_rate": 1e-3,
                "batch_size": 256,
                "num_negatives": 4,
            }

            MCRec_earlystopping_hyperparameters = {
                "validation_every_n": 5,
                "stop_on_validation": True,
                "evaluator_object": evaluator_validation,
                "lower_validations_allowed": 5,
                "validation_metric": metric_to_optimize
            }

            parameterSearch = SearchSingleCase(
                MCRecML100k_RecommenderWrapper,
                evaluator_validation=evaluator_validation,
                evaluator_test=evaluator_test)

            recommender_input_args = SearchInputRecommenderArgs(
                CONSTRUCTOR_POSITIONAL_ARGS=[
                    URM_train, original_dataset_reader
                ],
                FIT_KEYWORD_ARGS=MCRec_earlystopping_hyperparameters)

            recommender_input_args_last_test = recommender_input_args.copy()
            recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
                0] = URM_train + URM_validation

            parameterSearch.search(
                recommender_input_args,
                recommender_input_args_last_test=
                recommender_input_args_last_test,
                fit_hyperparameters_values=MCRec_article_hyperparameters,
                output_folder_path=result_folder_path,
                resume_from_saved=True,
                output_file_name_root=MCRecML100k_RecommenderWrapper.
                RECOMMENDER_NAME)

    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:

        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME,
                                          dataset_name)

        ICM_names_to_report_list = list(dataset.ICM_DICT.keys())

        result_loader = ResultFolderLoader(
            result_folder_path,
            base_algorithm_list=None,
            other_algorithm_list=[MCRecML100k_RecommenderWrapper],
            KNN_similarity_list=KNN_similarity_to_report_list,
            ICM_names_list=ICM_names_to_report_list,
            UCM_names_list=None)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("article_metrics"),
            metrics_list=["PRECISION", "RECALL", "NDCG"],
            cutoffs_list=[10],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("all_metrics"),
            metrics_list=[
                "PRECISION", "RECALL", "MAP", "MRR", "NDCG", "F1", "HIT_RATE",
                "ARHR", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST",
                "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI",
                "SHANNON_ENTROPY"
            ],
            cutoffs_list=[10],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_time_statistics(
            file_name + "{}_latex_results.txt".format("time"),
            n_evaluation_users=n_test_users,
            table_title=None)
    if input_flags.run_eval_ablation:

        for fold_index, dataSplitter_fold in enumerate(dataSplitter_k_fold):

            URM_train, URM_validation, URM_test = dataSplitter_fold.get_holdout_split()
            UCM_CoupledCF = dataSplitter_fold.get_UCM_from_name("UCM_all")
            ICM_CoupledCF = dataSplitter_fold.get_ICM_from_name("ICM_all")

            # Ensure negative items are consistent with positive items, accounting for removed cold users
            URM_test_negative_fold = get_URM_negatives_without_cold_users(dataSplitter_fold.removed_cold_users, URM_test_negative)

            # ensure IMPLICIT data
            assert_implicit_data([URM_train, URM_validation, URM_test, URM_test_negative_fold])
            assert_disjoint_matrices([URM_train, URM_validation, URM_test])

            evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative_fold, cutoff_list=cutoff_list_validation)
            evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative_fold, cutoff_list=cutoff_list_test)
            
            recommender_input_args = SearchInputRecommenderArgs(CONSTRUCTOR_POSITIONAL_ARGS=[URM_train, UCM_CoupledCF, ICM_CoupledCF])


            # Ablation with training on selected mode
            for map_mode in ["all_map", "main_diagonal", "off_diagonal"]:

                result_folder_path = os.path.join(output_folder_path, "fit_ablation_{}/{}_{}/".format(map_mode, map_mode, fold_index))

                search_metadata = run_train_with_early_stopping(input_flags.dataset_name,
                                                                URM_train, URM_validation,
                                                                UCM_CoupledCF, ICM_CoupledCF,
                                                                evaluator_validation,
                                                                evaluator_test,
def run_recommender(recommender_class):



    temp_save_file_folder = "./result_experiments/__temp_model/"

    if not os.path.isdir(temp_save_file_folder):
        os.makedirs(temp_save_file_folder)

    try:
        dataset_object = Movielens1MReader()

        dataSplitter = DataSplitter_leave_k_out(dataset_object, k_out_value=2)

        dataSplitter.load_data()
        URM_train, URM_validation, URM_test = dataSplitter.get_holdout_split()

        write_log_string(log_file, "On Recommender {}\n".format(recommender_class))



        recommender_object = recommender_class(URM_train)

        if isinstance(recommender_object, Incremental_Training_Early_Stopping):
            fit_params = {"epochs": 15}
        else:
            fit_params = {}

        recommender_object.fit(**fit_params)

        write_log_string(log_file, "Fit OK, ")



        evaluator = EvaluatorHoldout(URM_test, [5], exclude_seen=True)
        _, results_run_string = evaluator.evaluateRecommender(recommender_object)

        write_log_string(log_file, "EvaluatorHoldout OK, ")



        evaluator = EvaluatorNegativeItemSample(URM_test, URM_train, [5], exclude_seen=True)
        _, _ = evaluator.evaluateRecommender(recommender_object)

        write_log_string(log_file, "EvaluatorNegativeItemSample OK, ")



        recommender_object.saveModel(temp_save_file_folder, file_name="temp_model")

        write_log_string(log_file, "saveModel OK, ")



        recommender_object = recommender_class(URM_train)
        recommender_object.loadModel(temp_save_file_folder, file_name="temp_model")

        evaluator = EvaluatorHoldout(URM_test, [5], exclude_seen=True)
        _, results_run_string_2 = evaluator.evaluateRecommender(recommender_object)

        write_log_string(log_file, "loadModel OK, ")



        shutil.rmtree(temp_save_file_folder, ignore_errors=True)

        write_log_string(log_file, " PASS\n")
        write_log_string(log_file, results_run_string + "\n\n")



    except Exception as e:

        print("On Recommender {} Exception {}".format(recommender_class, str(e)))
        log_file.write("On Recommender {} Exception {}\n\n\n".format(recommender_class, str(e)))
        log_file.flush()

        traceback.print_exc()
Exemple #5
0
def read_data_split_and_search_CMN(dataset_name):

    from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader
    from Conferences.SIGIR.CMN_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader
    from Conferences.SIGIR.CMN_our_interface.Epinions.EpinionsReader import EpinionsReader

    if dataset_name == "citeulike":
        dataset = CiteULikeReader()

    elif dataset_name == "epinions":
        dataset = EpinionsReader()

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)


    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()



    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)



    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
    ]

    metric_to_optimize = "HIT_RATE"


    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data([URM_train, URM_validation, URM_test, URM_test_negative])


    if dataset_name == "citeulike":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_test, URM_test_negative])

    elif dataset_name == "pinterest":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative])

    else:
        assert_disjoint_matrices([URM_train, URM_validation, URM_test, URM_test_negative])



    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"],
                         output_folder_path + algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string + "popularity_statistics")



    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[5])
    evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[5, 10])


    runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,
                                                       URM_train = URM_train,
                                                       metric_to_optimize = metric_to_optimize,
                                                       evaluator_validation_earlystopping = evaluator_validation,
                                                       evaluator_validation = evaluator_validation,
                                                       evaluator_test = evaluator_test,
                                                       output_folder_path = output_folder_path,
                                                       parallelizeKNN = False,
                                                       allow_weighting = True,
                                                       n_cases = 35)





    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()


    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(recommender_class, str(e)))
            traceback.print_exc()



    ################################################################################################
    ###### CMN




    try:


        temp_file_folder = output_folder_path + "{}_log/".format(ALGORITHM_NAME)

        CMN_article_parameters = {
            "epochs": 100,
            "epochs_gmf": 100,
            "hops": 3,
            "neg_samples": 4,
            "reg_l2_cmn": 1e-1,
            "reg_l2_gmf": 1e-4,
            "pretrain": True,
            "learning_rate": 1e-3,
            "verbose": False,
            "temp_file_folder": temp_file_folder
        }

        if dataset_name == "citeulike":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 50

        elif dataset_name == "epinions":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 40

        elif dataset_name == "pinterest":
            CMN_article_parameters["batch_size"] = 256
            CMN_article_parameters["embed_size"] = 50



        CMN_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }


        parameterSearch = SearchSingleCase(CMN_RecommenderWrapper,
                                           evaluator_validation=evaluator_validation,
                                           evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
                                            CONSTRUCTOR_POSITIONAL_ARGS = [URM_train],
                                            FIT_KEYWORD_ARGS = CMN_earlystopping_parameters)

        parameterSearch.search(recommender_parameters,
                               fit_parameters_values=CMN_article_parameters,
                               output_folder_path = output_folder_path,
                               output_file_name_root = CMN_RecommenderWrapper.RECOMMENDER_NAME)




    except Exception as e:

        print("On recommender {} Exception {}".format(CMN_RecommenderWrapper, str(e)))
        traceback.print_exc()





    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr)>=1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1)


    print_time_statistics_latex_table(result_folder_path = output_folder_path,
                                      dataset_name = dataset_name,
                                      results_file_prefix_name = ALGORITHM_NAME,
                                      other_algorithm_list = [CMN_RecommenderWrapper],
                                      ICM_names_to_report_list = [],
                                      n_validation_users = n_validation_users,
                                      n_test_users = n_test_users,
                                      n_decimals = 2)


    print_results_latex_table(result_folder_path = output_folder_path,
                              results_file_prefix_name = ALGORITHM_NAME,
                              dataset_name = dataset_name,
                              metrics_to_report_list = ["HIT_RATE", "NDCG"],
                              cutoffs_to_report_list = [5, 10],
                              ICM_names_to_report_list = [],
                              other_algorithm_list = [CMN_RecommenderWrapper])
def read_data_split_and_search_MCRec(dataset_name):

    from Conferences.KDD.MCRec_our_interface.Movielens100K.Movielens100KReader import Movielens100KReader
    from Conferences.KDD.MCRec_our_interface.LastFM.LastFMReader import LastFMReader
    from Conferences.KDD.MCRec_our_interface.Yelp.YelpReader import YelpReader

    if dataset_name == "movielens100k":
        dataset = Movielens100KReader()

    elif dataset_name == "yelp":
        dataset = YelpReader()

    elif dataset_name == "lastfm":
        dataset = LastFMReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()

    # Ensure IMPLICIT data
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])
    assert_disjoint_matrices(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"], output_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    if dataset_name == "movielens100k":
        URM_train += URM_validation
        evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                           URM_test_negative,
                                                           cutoff_list=[10],
                                                           exclude_seen=False)
    else:
        evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                           URM_test_negative,
                                                           cutoff_list=[10])

    evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                 URM_test_negative,
                                                 cutoff_list=[10])

    collaborative_algorithm_list = [
        Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender,
        P3alphaRecommender, RP3betaRecommender, PureSVDRecommender
    ]

    metric_to_optimize = "PRECISION"

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        n_cases=35)

    # pool = PoolWithSubprocess(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)

    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### Content Baselines

    ICM_dictionary = dataset.ICM_dict

    ICM_name_list = ICM_dictionary.keys()

    for ICM_name in ICM_name_list:

        try:

            ICM_object = ICM_dictionary[ICM_name]

            runParameterSearch_Content(
                ItemKNNCBFRecommender,
                URM_train=URM_train,
                metric_to_optimize=metric_to_optimize,
                evaluator_validation=evaluator_validation,
                evaluator_test=evaluator_test,
                output_folder_path=output_folder_path,
                parallelizeKNN=False,
                ICM_name=ICM_name,
                ICM_object=ICM_object.copy(),
                n_cases=35)

        except Exception as e:

            print("On CBF recommender for ICM {} Exception {}".format(
                ICM_name, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### Hybrid

    for ICM_name in ICM_name_list:

        try:

            ICM_object = ICM_dictionary[ICM_name]

            runParameterSearch_Hybrid(
                ItemKNN_CFCBF_Hybrid_Recommender,
                URM_train=URM_train,
                metric_to_optimize=metric_to_optimize,
                evaluator_validation=evaluator_validation,
                evaluator_test=evaluator_test,
                output_folder_path=output_folder_path,
                parallelizeKNN=False,
                ICM_name=ICM_name,
                ICM_object=ICM_object,
                allow_weighting=True,
                n_cases=35)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                ItemKNN_CFCBF_Hybrid_Recommender, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### MCRec

    if dataset_name == "movielens100k":

        # Since I am using the original Data reader, the content of URM_validation are seen items, therefore I have to set another
        # evaluator which does not exclude them
        # evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10], exclude_seen=False)

        MCRec_article_parameters = {
            "epochs": 100,
            "latent_dim": 128,
            "reg_latent": 0,
            "layers": [512, 256, 128, 64],
            "reg_layes": [0, 0, 0, 0],
            "learning_rate": 1e-3,
            "batch_size": 256,
            "num_negatives": 4,
        }

        MCRec_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }

        parameterSearch = SearchSingleCase(
            MCRecML100k_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=MCRec_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=MCRec_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=MCRecML100k_RecommenderWrapper.
            RECOMMENDER_NAME)

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    ICM_names_to_report_list = ["ICM_genre"]

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[MCRecML100k_RecommenderWrapper],
        ICM_names_to_report_list=ICM_names_to_report_list,
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)

    print_results_latex_table(
        result_folder_path=output_folder_path,
        results_file_prefix_name=ALGORITHM_NAME,
        dataset_name=dataset_name,
        metrics_to_report_list=["PRECISION", "RECALL", "NDCG"],
        cutoffs_to_report_list=[10],
        ICM_names_to_report_list=ICM_names_to_report_list,
        other_algorithm_list=[MCRecML100k_RecommenderWrapper])
Exemple #7
0
def read_data_split_and_search(dataset_name,
                               flag_baselines_tune=False,
                               flag_DL_article_default=False,
                               flag_DL_tune=False,
                               flag_print_results=False):

    from Conferences.WWW.NeuMF_our_interface.Movielens1M.Movielens1MReader import Movielens1MReader
    from Conferences.WWW.NeuMF_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader

    result_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    if dataset_name == "movielens1m":
        dataset = Movielens1MReader(result_folder_path)

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader(result_folder_path)

    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy()

    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    assert_disjoint_matrices([URM_train, URM_validation, URM_test])
    assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative])

    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["Training data", "Test data"], result_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([
        URM_train + URM_validation + URM_test, URM_train + URM_validation,
        URM_test
    ], ["Full data", "Training data", "Test data"],
                               result_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
        PureSVDRecommender,
        NMFRecommender,
        IALSRecommender,
        MatrixFactorization_BPR_Cython,
        MatrixFactorization_FunkSVD_Cython,
        EASE_R_Recommender,
        SLIM_BPR_Cython,
        SLIMElasticNetRecommender,
    ]

    metric_to_optimize = "HIT_RATE"
    n_cases = 50
    n_random_starts = 15

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                       URM_test_negative,
                                                       cutoff_list=[10])
    evaluator_test = EvaluatorNegativeItemSample(
        URM_test,
        URM_test_negative,
        cutoff_list=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        URM_train_last_test=URM_train + URM_validation,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=result_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        resume_from_saved=True,
        n_cases=n_cases,
        n_random_starts=n_random_starts)

    if flag_baselines_tune:

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(
                    recommender_class, str(e)))
                traceback.print_exc()

    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:

        try:

            if dataset_name == "movielens1m":
                num_factors = 64
            elif dataset_name == "pinterest":
                num_factors = 16

            neuMF_article_hyperparameters = {
                "epochs": 100,
                "epochs_gmf": 100,
                "epochs_mlp": 100,
                "batch_size": 256,
                "num_factors": num_factors,
                "layers": [num_factors * 4, num_factors * 2, num_factors],
                "reg_mf": 0.0,
                "reg_layers": [0, 0, 0],
                "num_negatives": 4,
                "learning_rate": 1e-3,
                "learning_rate_pretrain": 1e-3,
                "learner": "sgd",
                "learner_pretrain": "adam",
                "pretrain": True
            }

            neuMF_earlystopping_hyperparameters = {
                "validation_every_n": 5,
                "stop_on_validation": True,
                "evaluator_object": evaluator_validation,
                "lower_validations_allowed": 5,
                "validation_metric": metric_to_optimize
            }

            parameterSearch = SearchSingleCase(
                NeuMF_RecommenderWrapper,
                evaluator_validation=evaluator_validation,
                evaluator_test=evaluator_test)

            recommender_input_args = SearchInputRecommenderArgs(
                CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
                FIT_KEYWORD_ARGS=neuMF_earlystopping_hyperparameters)

            recommender_input_args_last_test = recommender_input_args.copy()
            recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
                0] = URM_train + URM_validation

            parameterSearch.search(
                recommender_input_args,
                recommender_input_args_last_test=
                recommender_input_args_last_test,
                fit_hyperparameters_values=neuMF_article_hyperparameters,
                output_folder_path=result_folder_path,
                resume_from_saved=True,
                output_file_name_root=NeuMF_RecommenderWrapper.RECOMMENDER_NAME
            )

        except Exception as e:

            print("On recommender {} Exception {}".format(
                NeuMF_RecommenderWrapper, str(e)))
            traceback.print_exc()

    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:

        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME,
                                          dataset_name)

        result_loader = ResultFolderLoader(
            result_folder_path,
            base_algorithm_list=None,
            other_algorithm_list=[NeuMF_RecommenderWrapper],
            KNN_similarity_list=KNN_similarity_to_report_list,
            ICM_names_list=None,
            UCM_names_list=None)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("article_metrics"),
            metrics_list=["HIT_RATE", "NDCG"],
            cutoffs_list=[1, 5, 10],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("all_metrics"),
            metrics_list=[
                "PRECISION", "RECALL", "MAP", "MRR", "NDCG", "F1", "HIT_RATE",
                "ARHR", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST",
                "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI",
                "SHANNON_ENTROPY"
            ],
            cutoffs_list=[10],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_time_statistics(
            file_name + "{}_latex_results.txt".format("time"),
            n_evaluation_users=n_test_users,
            table_title=None)
def read_data_split_and_search(dataset_name,
                               flag_baselines_tune=False,
                               flag_DL_article_default=False,
                               flag_DL_tune=False,
                               flag_print_results=False):

    result_folder_path = "result_experiments/IJCAI/CoupledCF_{}/".format(
        dataset_name)

    #Logger(path=result_folder_path, name_file='CoupledCF_' + dataset_name)

    if dataset_name.startswith("movielens1m"):

        if dataset_name.endswith("_original"):
            dataset = Movielens1MReader(result_folder_path, type='original')
        elif dataset_name.endswith("_ours"):
            dataset = Movielens1MReader(result_folder_path, type='ours')
        else:
            print("Dataset name not supported, current is {}".format(
                dataset_name))
            return

        UCM_to_report = ["UCM_all"]
        ICM_to_report = ["ICM_all"]

        UCM_CoupledCF = dataset.ICM_DICT["UCM_all"]
        ICM_CoupledCF = dataset.ICM_DICT["ICM_all"]

    elif dataset_name.startswith("tafeng"):

        if dataset_name.endswith("_original"):
            dataset = TafengReader(result_folder_path, type='original')
        elif dataset_name.endswith("_ours"):
            dataset = TafengReader(result_folder_path, type='ours')
        else:
            print("Dataset name not supported, current is {}".format(
                dataset_name))
            return

        UCM_to_report = ["UCM_all"]
        ICM_to_report = ["ICM_original"]

        UCM_CoupledCF = dataset.ICM_DICT["UCM_all"]
        ICM_CoupledCF = dataset.ICM_DICT["ICM_original"]

    else:
        print("Dataset name not supported, current is {}".format(dataset_name))
        return

    print('Current dataset is: {}'.format(dataset_name))

    UCM_dict = {
        UCM_name: UCM_object
        for (UCM_name, UCM_object) in dataset.ICM_DICT.items()
        if "UCM" in UCM_name
    }
    ICM_dict = {
        UCM_name: UCM_object
        for (UCM_name, UCM_object) in dataset.ICM_DICT.items()
        if "ICM" in UCM_name
    }

    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy()

    # Matrices are 1-indexed, so remove first row
    print_negative_items_stats(URM_train[1:], URM_validation[1:], URM_test[1:],
                               URM_test_negative[1:])

    # Ensure IMPLICIT data
    from Utils.assertions_on_data_for_experiments import assert_implicit_data, assert_disjoint_matrices

    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])
    assert_disjoint_matrices([URM_train, URM_validation, URM_test])

    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
        PureSVDRecommender,
        NMFRecommender,
        IALSRecommender,
        MatrixFactorization_BPR_Cython,
        MatrixFactorization_FunkSVD_Cython,
        EASE_R_Recommender,
        SLIM_BPR_Cython,
        SLIMElasticNetRecommender,
    ]

    metric_to_optimize = "NDCG"
    n_cases = 50
    n_random_starts = 15

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    cutoff_list_validation = [5]
    cutoff_list_test = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    evaluator_validation = EvaluatorNegativeItemSample(
        URM_validation, URM_test_negative, cutoff_list=cutoff_list_validation)
    evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                 URM_test_negative,
                                                 cutoff_list=cutoff_list_test)

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        URM_train_last_test=URM_train + URM_validation,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=result_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        resume_from_saved=True,
        n_cases=n_cases,
        n_random_starts=n_random_starts)

    if flag_baselines_tune:

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(
                    recommender_class, str(e)))
                traceback.print_exc()

        ###############################################################################################
        ##### Item Content Baselines

        for ICM_name, ICM_object in ICM_dict.items():

            try:

                runParameterSearch_Content(
                    ItemKNNCBFRecommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train + URM_validation,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    resume_from_saved=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

                runParameterSearch_Hybrid(
                    ItemKNN_CFCBF_Hybrid_Recommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train + URM_validation,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    resume_from_saved=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On CBF recommender for ICM {} Exception {}".format(
                    ICM_name, str(e)))
                traceback.print_exc()

        ################################################################################################
        ###### User Content Baselines

        for UCM_name, UCM_object in UCM_dict.items():

            try:

                runParameterSearch_Content(
                    UserKNNCBFRecommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train + URM_validation,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    resume_from_saved=True,
                    ICM_name=UCM_name,
                    ICM_object=UCM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

                runParameterSearch_Hybrid(
                    UserKNN_CFCBF_Hybrid_Recommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train + URM_validation,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    resume_from_saved=True,
                    ICM_name=UCM_name,
                    ICM_object=UCM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On CBF recommender for UCM {} Exception {}".format(
                    UCM_name, str(e)))
                traceback.print_exc()

    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:

        model_name = dataset.DATASET_NAME

        earlystopping_hyperparameters = {
            'validation_every_n': 5,
            'stop_on_validation': True,
            'lower_validations_allowed': 5,
            'evaluator_object': evaluator_validation,
            'validation_metric': metric_to_optimize
        }

        if 'tafeng' in dataset_name:
            model_number = 3
            article_hyperparameters = {
                'learning_rate': 0.005,
                'epochs': 100,
                'n_negative_sample': 4,
                'temp_file_folder': None,
                'dataset_name': model_name,
                'number_model': model_number,
                'verbose': 0,
                'plot_model': False,
            }
        else:
            # movielens1m and other dataset
            model_number = 3
            article_hyperparameters = {
                'learning_rate': 0.001,
                'epochs': 100,
                'n_negative_sample': 4,
                'temp_file_folder': None,
                'dataset_name': model_name,
                'number_model': model_number,
                'verbose': 0,
                'plot_model': False,
            }

        parameterSearch = SearchSingleCase(
            DeepCF_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_input_args = SearchInputRecommenderArgs(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=earlystopping_hyperparameters)

        recommender_input_args_last_test = recommender_input_args.copy()
        recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
            0] = URM_train + URM_validation

        parameterSearch.search(
            recommender_input_args,
            recommender_input_args_last_test=recommender_input_args_last_test,
            fit_hyperparameters_values=article_hyperparameters,
            output_folder_path=result_folder_path,
            resume_from_saved=True,
            output_file_name_root=DeepCF_RecommenderWrapper.RECOMMENDER_NAME)

        if 'tafeng' in dataset_name:
            # tafeng model has a different structure
            model_number = 2
            article_hyperparameters = {
                'learning_rate': 0.005,
                'epochs': 100,
                'n_negative_sample': 4,
                'temp_file_folder': None,
                'dataset_name': "Tafeng",
                'number_model': model_number,
                'verbose': 0,
                'plot_model': False,
            }
        else:
            # movielens1m use this tructure with model 2
            model_number = 2
            article_hyperparameters = {
                'learning_rate': 0.001,
                'epochs': 100,
                'n_negative_sample': 4,
                'temp_file_folder': None,
                'dataset_name': "Movielens1M",
                'number_model': model_number,
                'verbose': 0,
                'plot_model': False,
            }

        parameterSearch = SearchSingleCase(
            CoupledCF_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_input_args = SearchInputRecommenderArgs(
            CONSTRUCTOR_POSITIONAL_ARGS=[
                URM_train, UCM_CoupledCF, ICM_CoupledCF
            ],
            FIT_KEYWORD_ARGS=earlystopping_hyperparameters)

        recommender_input_args_last_test = recommender_input_args.copy()
        recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
            0] = URM_train + URM_validation

        parameterSearch.search(
            recommender_input_args,
            recommender_input_args_last_test=recommender_input_args_last_test,
            fit_hyperparameters_values=article_hyperparameters,
            output_folder_path=result_folder_path,
            resume_from_saved=True,
            output_file_name_root=CoupledCF_RecommenderWrapper.RECOMMENDER_NAME
        )

    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:

        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME,
                                          dataset_name)

        result_loader = ResultFolderLoader(
            result_folder_path,
            base_algorithm_list=None,
            other_algorithm_list=[
                DeepCF_RecommenderWrapper, CoupledCF_RecommenderWrapper
            ],
            KNN_similarity_list=KNN_similarity_to_report_list,
            ICM_names_list=ICM_to_report,
            UCM_names_list=UCM_to_report)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("article_metrics"),
            metrics_list=["HIT_RATE", "NDCG"],
            cutoffs_list=[1, 5, 10],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_results(
            file_name +
            "{}_latex_results.txt".format("beyond_accuracy_metrics"),
            metrics_list=[
                "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL",
                "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY"
            ],
            cutoffs_list=[5],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("all_metrics"),
            metrics_list=[
                "PRECISION", "RECALL", "MAP_MIN_DEN", "MRR", "NDCG", "F1",
                "HIT_RATE", "ARHR_ALL_HITS", "NOVELTY",
                "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL",
                "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY"
            ],
            cutoffs_list=[5],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_time_statistics(
            file_name + "{}_latex_results.txt".format("time"),
            n_evaluation_users=n_test_users,
            table_title=None)
Exemple #9
0
def read_data_split_and_search(dataset_name,
                               flag_baselines_tune=False,
                               flag_DL_article_default=False,
                               flag_DL_tune=False,
                               flag_print_results=False):

    result_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    # Ensure both experiments use the same data
    dataset_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME,
        dataset_name.replace("_remove_cold_items", ""))

    if not os.path.exists(dataset_folder_path):
        os.makedirs(dataset_folder_path)

    if 'amazon_music' in dataset_name:
        dataset = AmazonMusicReader(dataset_folder_path)

    elif 'movielens1m_ours' in dataset_name:
        dataset = Movielens1MReader(dataset_folder_path, type="ours")

    elif 'movielens1m_original' in dataset_name:
        dataset = Movielens1MReader(dataset_folder_path, type="original")

    else:
        print("Dataset name not supported, current is {}".format(dataset_name))
        return

    print('Current dataset is: {}'.format(dataset_name))

    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy()

    # Ensure IMPLICI data and DISJOINT matrices
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])
    assert_disjoint_matrices(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    cold_items_statistics(URM_train, URM_validation, URM_test,
                          URM_test_negative)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["Training data", "Test data"], result_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([
        URM_train + URM_validation + URM_test, URM_train + URM_validation,
        URM_test
    ], ["Full data", "Training data", "Test data"],
                               result_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
        PureSVDRecommender,
        NMFRecommender,
        IALSRecommender,
        MatrixFactorization_BPR_Cython,
        MatrixFactorization_FunkSVD_Cython,
        EASE_R_Recommender,
        SLIM_BPR_Cython,
        SLIMElasticNetRecommender,
    ]

    metric_to_optimize = "NDCG"
    n_cases = 50
    n_random_starts = 15

    cutoff_list_validation = [10]
    cutoff_list_test = [5, 10, 20]

    if "_remove_cold_items" in dataset_name:
        ignore_items_validation = get_cold_items(URM_train)
        ignore_items_test = get_cold_items(URM_train + URM_validation)
    else:
        ignore_items_validation = None
        ignore_items_test = None

    evaluator_validation = EvaluatorNegativeItemSample(
        URM_validation,
        URM_test_negative,
        cutoff_list=cutoff_list_validation,
        ignore_items=ignore_items_validation)
    evaluator_test = EvaluatorNegativeItemSample(
        URM_test,
        URM_test_negative,
        cutoff_list=cutoff_list_test,
        ignore_items=ignore_items_test)

    # The Evaluator automatically skips users with no test interactions
    # in this case we need the evaluation done with and without cold items to be comparable
    # So we ensure the users that are included in the evaluation are the same in both cases.
    evaluator_validation.users_to_evaluate = np.arange(URM_train.shape[0])
    evaluator_test.users_to_evaluate = np.arange(URM_train.shape[0])

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        URM_train_last_test=URM_train + URM_validation,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=result_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        resume_from_saved=True,
        n_cases=n_cases,
        n_random_starts=n_random_starts)

    if flag_baselines_tune:

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(
                    recommender_class, str(e)))
                traceback.print_exc()

    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:

        earlystopping_hyperparameters = {
            'validation_every_n': 5,
            'stop_on_validation': True,
            'lower_validations_allowed': 5,
            'evaluator_object': evaluator_validation,
            'validation_metric': metric_to_optimize,
        }

        num_factors = 64

        article_hyperparameters = {
            'epochs': 500,
            'learning_rate': 0.001,
            'batch_size': 256,
            'num_negatives': 4,
            'layers': (num_factors * 4, num_factors * 2, num_factors),
            'regularization_layers': (0, 0, 0),
            'learner': 'adam',
            'verbose': False,
        }

        parameterSearch = SearchSingleCase(
            DELF_MLP_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_input_args = SearchInputRecommenderArgs(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=earlystopping_hyperparameters)

        recommender_input_args_last_test = recommender_input_args.copy()
        recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
            0] = URM_train + URM_validation

        parameterSearch.search(
            recommender_input_args,
            recommender_input_args_last_test=recommender_input_args_last_test,
            fit_hyperparameters_values=article_hyperparameters,
            output_folder_path=result_folder_path,
            resume_from_saved=True,
            output_file_name_root=DELF_MLP_RecommenderWrapper.RECOMMENDER_NAME)

        parameterSearch = SearchSingleCase(
            DELF_EF_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_input_args = SearchInputRecommenderArgs(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=earlystopping_hyperparameters)

        recommender_input_args_last_test = recommender_input_args.copy()
        recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
            0] = URM_train + URM_validation

        parameterSearch.search(
            recommender_input_args,
            recommender_input_args_last_test=recommender_input_args_last_test,
            fit_hyperparameters_values=article_hyperparameters,
            output_folder_path=result_folder_path,
            resume_from_saved=True,
            output_file_name_root=DELF_EF_RecommenderWrapper.RECOMMENDER_NAME)

    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:

        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME,
                                          dataset_name)

        result_loader = ResultFolderLoader(
            result_folder_path,
            base_algorithm_list=None,
            other_algorithm_list=[
                DELF_MLP_RecommenderWrapper, DELF_EF_RecommenderWrapper
            ],
            KNN_similarity_list=KNN_similarity_to_report_list,
            ICM_names_list=None,
            UCM_names_list=None)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("article_metrics"),
            metrics_list=["HIT_RATE", "NDCG"],
            cutoffs_list=cutoff_list_test,
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("all_metrics"),
            metrics_list=[
                "PRECISION", "RECALL", "MAP_MIN_DEN", "MRR", "NDCG", "F1",
                "HIT_RATE", "ARHR_ALL_HITS", "NOVELTY",
                "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL",
                "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY"
            ],
            cutoffs_list=[10],
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_time_statistics(
            file_name + "{}_latex_results.txt".format("time"),
            n_evaluation_users=n_test_users,
            table_title=None)
Exemple #10
0
def read_data_split_and_search(dataset_name,
                               flag_baselines_tune=False,
                               flag_DL_article_default=False,
                               flag_DL_tune=False,
                               flag_print_results=False):

    result_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    if dataset_name == "gowalla":
        dataset = GowallaReader(result_folder_path)

    elif dataset_name == "yelp":
        dataset = YelpReader(result_folder_path)

    else:
        print("Dataset name not supported, current is {}".format(dataset_name))
        return

    print('Current dataset is: {}'.format(dataset_name))

    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy()

    print_negative_items_stats(URM_train, URM_validation, URM_test,
                               URM_test_negative)

    # Ensure IMPLICIT data
    from Utils.assertions_on_data_for_experiments import assert_implicit_data, assert_disjoint_matrices

    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    # URM_test_negative contains duplicates in both train and test
    assert_disjoint_matrices([URM_train, URM_validation, URM_test])

    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
        PureSVDRecommender,
        NMFRecommender,
        IALSRecommender,
        MatrixFactorization_BPR_Cython,
        MatrixFactorization_FunkSVD_Cython,
        EASE_R_Recommender,
        SLIM_BPR_Cython,
        SLIMElasticNetRecommender,
    ]

    metric_to_optimize = "NDCG"
    n_cases = 50
    n_random_starts = 15

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    cutoff_list_validation = [10]
    cutoff_list_test = [5, 10, 20]

    evaluator_validation = EvaluatorNegativeItemSample(
        URM_validation, URM_test_negative, cutoff_list=cutoff_list_validation)
    evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                 URM_test_negative,
                                                 cutoff_list=cutoff_list_test)

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        URM_train_last_test=URM_train + URM_validation,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=result_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        resume_from_saved=True,
        n_cases=n_cases,
        n_random_starts=n_random_starts)

    if flag_baselines_tune:

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(
                    recommender_class, str(e)))
                traceback.print_exc()

    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:

        # Providing an empty matrix to URM_negative for the train samples
        article_hyperparameters = {
            "batch_size": 512,
            "epochs": 1500,
            "epochs_MFBPR": 500,
            "embedding_size": 64,
            "hidden_size": 128,
            "negative_sample_per_positive": 1,
            "negative_instances_per_positive": 4,
            "regularization_users_items": 0.01,
            "regularization_weights": 10,
            "regularization_filter_weights": 1,
            "learning_rate_embeddings": 0.05,
            "learning_rate_CNN": 0.05,
            "channel_size": [32, 32, 32, 32, 32, 32],
            "dropout": 0.0,
            "epoch_verbose": 1,
        }

        earlystopping_hyperparameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "lower_validations_allowed": 5,
            "evaluator_object": evaluator_validation,
            "validation_metric": metric_to_optimize,
            "epochs_min": 150
        }

        parameterSearch = SearchSingleCase(
            ConvNCF_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_input_args = SearchInputRecommenderArgs(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=earlystopping_hyperparameters)

        recommender_input_args_last_test = recommender_input_args.copy()
        recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
            0] = URM_train + URM_validation

        parameterSearch.search(
            recommender_input_args,
            recommender_input_args_last_test=recommender_input_args_last_test,
            fit_hyperparameters_values=article_hyperparameters,
            output_folder_path=result_folder_path,
            resume_from_saved=True,
            output_file_name_root=ConvNCF_RecommenderWrapper.RECOMMENDER_NAME)

        #remember to close the global session since use global variables
        ConvNCF.close_session(verbose=True)

    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:

        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME,
                                          dataset_name)

        result_loader = ResultFolderLoader(
            result_folder_path,
            base_algorithm_list=None,
            other_algorithm_list=[ConvNCF_RecommenderWrapper],
            KNN_similarity_list=KNN_similarity_to_report_list,
            ICM_names_list=None,
            UCM_names_list=None)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("article_metrics"),
            metrics_list=["HIT_RATE", "NDCG"],
            cutoffs_list=cutoff_list_test,
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_results(
            file_name + "{}_latex_results.txt".format("all_metrics"),
            metrics_list=[
                "PRECISION", "RECALL", "MAP_MIN_DEN", "MRR", "NDCG", "F1",
                "HIT_RATE", "ARHR_ALL_HITS", "NOVELTY",
                "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL",
                "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY"
            ],
            cutoffs_list=cutoff_list_validation,
            table_title=None,
            highlight_best=True)

        result_loader.generate_latex_time_statistics(
            file_name + "{}_latex_results.txt".format("time"),
            n_evaluation_users=n_test_users,
            table_title=None)
Exemple #11
0
def read_data_split_and_search(dataset_name,
                               flag_baselines_tune=False,
                               flag_DL_article_default=False,
                               flag_DL_tune=False,
                               flag_print_results=False):
    result_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    if dataset_name == "delicious-hetrec2011":
        dataset = DeliciousHetrec2011Reader(result_folder_path)

    elif dataset_name == "delicious-hetrec2011-cold-users":
        dataset = DeliciousHetrec2011ColdUsersReader(result_folder_path)

    elif dataset_name == "delicious-hetrec2011-cold-items":
        dataset = DeliciousHetrec2011ColdItemsReader(result_folder_path)

    elif dataset_name == "lastfm-hetrec2011":
        dataset = LastFMHetrec2011Reader(result_folder_path)

    elif dataset_name == "lastfm-hetrec2011-cold-users":
        dataset = LastFMHetrec2011ColdUsersReader(result_folder_path)

    elif dataset_name == "lastfm-hetrec2011-cold-items":
        dataset = LastFMHetrec2011ColdItemsReader(result_folder_path)

    else:
        print("Dataset name not supported, current is {}".format(dataset_name))
        return

    print('Current dataset is: {}'.format(dataset_name))

    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_negative = dataset.URM_DICT["URM_negative"].copy()
    UCM_train = dataset.UCM_DICT["UCM"].copy()
    ICM_train = dataset.ICM_DICT["ICM"].copy()

    if dataset_name == "delicious-hetrec2011" or dataset_name == "lastfm-hetrec2011":
        URM_train_last_test = URM_train + URM_validation

        # Ensure IMPLICIT data and disjoint test-train split
        assert_implicit_data([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
    else:
        URM_train_last_test = URM_train

        # Ensure IMPLICIT data and disjoint test-train split
        assert_implicit_data([URM_train, URM_test])
        assert_disjoint_matrices([URM_train, URM_test])

    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    metric_to_optimize = "MAP"
    cutoff_list_validation = [5, 10, 20]
    cutoff_list_test = [5, 10, 20]

    n_cases = 50
    n_random_starts = 15

    evaluator_validation = EvaluatorNegativeItemSample(
        URM_validation, URM_negative, cutoff_list=cutoff_list_validation)
    evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                 URM_negative,
                                                 cutoff_list=cutoff_list_test)

    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:
        article_hyperparameters = {
            "pretrain_samples": 3,
            "pretrain_batch_size": 200,
            "pretrain_iterations": 5,
            "embed_len": 128,
            "topK": 10,
            "fliter_theta": 16,
            "aggre_theta": 64,
            "batch_size": 400,
            "samples": 3,
            "margin": 20,
            "epochs": 30,
            "iter_without_att": 5,
            "directed": False,
        }

        # Do not modify earlystopping
        earlystopping_hyperparameters = {
            "validation_every_n": 5,
            "stop_on_validation": False,
            "lower_validations_allowed": 5,
            "evaluator_object": evaluator_validation,
            "validation_metric": metric_to_optimize,
        }

        # This is a simple version of the tuning code that is reported below and uses SearchSingleCase
        # You may use this for a simpler testing
        # recommender_instance = HERSWrapper(URM_train, UCM_train, ICM_train)
        #
        # recommender_instance.fit(**article_hyperparameters,
        #                          **earlystopping_hyperparameters)
        #
        # evaluator_test.evaluateRecommender(recommender_instance)

        # Fit the DL model, select the optimal number of epochs and save the result
        parameterSearch = SearchSingleCase(
            HERSWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_input_args = SearchInputRecommenderArgs(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train, UCM_train, ICM_train],
            FIT_KEYWORD_ARGS=earlystopping_hyperparameters)

        if dataset_name == "delicious-hetrec2011" or dataset_name == "lastfm-hetrec2011":
            recommender_input_args_last_test = recommender_input_args.copy()
            recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
                0] = URM_train_last_test

            parameterSearch.search(
                recommender_input_args,
                recommender_input_args_last_test=
                recommender_input_args_last_test,
                fit_hyperparameters_values=article_hyperparameters,
                output_folder_path=result_folder_path,
                output_file_name_root=HERSWrapper.RECOMMENDER_NAME)
        else:
            parameterSearch.search(
                recommender_input_args,
                fit_hyperparameters_values=article_hyperparameters,
                output_folder_path=result_folder_path,
                output_file_name_root=HERSWrapper.RECOMMENDER_NAME)

    ################################################################################################
    ######
    ######      BASELINE ALGORITHMS - Nothing should be modified below this point
    ######

    if flag_baselines_tune:

        ################################################################################################
        ###### Collaborative Baselines

        collaborative_algorithm_list = [
            Random,
            TopPop,
            ItemKNNCFRecommender,
            PureSVDRecommender,
            SLIM_BPR_Cython,
        ]

        # Running hyperparameter tuning of baslines
        # See if the results are reasonable and comparable to baselines reported in the paper
        runParameterSearch_Collaborative_partial = partial(
            runParameterSearch_Collaborative,
            URM_train=URM_train,
            URM_train_last_test=URM_train_last_test,
            metric_to_optimize=metric_to_optimize,
            evaluator_validation_earlystopping=evaluator_validation,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test,
            output_folder_path=result_folder_path,
            resume_from_saved=True,
            parallelizeKNN=False,
            allow_weighting=True,
            n_cases=n_cases,
            n_random_starts=n_random_starts)

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(
                    recommender_class, str(e)))
                traceback.print_exc()

        ################################################################################################
        ###### Content Baselines

        for ICM_name, ICM_object in dataset.ICM_DICT.items():

            try:

                runParameterSearch_Content(
                    ItemKNNCBFRecommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train_last_test,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On CBF recommender for ICM {} Exception {}".format(
                    ICM_name, str(e)))
                traceback.print_exc()

        ################################################################################################
        ###### Hybrid

        for ICM_name, ICM_object in dataset.ICM_DICT.items():

            try:

                runParameterSearch_Hybrid(
                    ItemKNN_CFCBF_Hybrid_Recommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train_last_test,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On recommender {} Exception {}".format(
                    ItemKNN_CFCBF_Hybrid_Recommender, str(e)))
                traceback.print_exc()

    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:
        n_validation_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

        print_time_statistics_latex_table(
            result_folder_path=result_folder_path,
            dataset_name=dataset_name,
            algorithm_name=ALGORITHM_NAME,
            other_algorithm_list=[HERSWrapper],
            KNN_similarity_to_report_list=KNN_similarity_to_report_list,
            n_validation_users=n_validation_users,
            n_test_users=n_test_users,
            n_decimals=2)

        print_results_latex_table(
            result_folder_path=result_folder_path,
            algorithm_name=ALGORITHM_NAME,
            file_name_suffix="article_metrics_",
            dataset_name=dataset_name,
            metrics_to_report_list=["HIT_RATE", "NDCG"],
            cutoffs_to_report_list=cutoff_list_test,
            other_algorithm_list=[HERSWrapper],
            KNN_similarity_to_report_list=KNN_similarity_to_report_list)

        print_results_latex_table(
            result_folder_path=result_folder_path,
            algorithm_name=ALGORITHM_NAME,
            file_name_suffix="all_metrics_",
            dataset_name=dataset_name,
            metrics_to_report_list=[
                "PRECISION", "RECALL", "MAP", "MRR", "NDCG", "F1", "HIT_RATE",
                "ARHR", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST",
                "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI",
                "SHANNON_ENTROPY"
            ],
            cutoffs_to_report_list=cutoff_list_validation,
            other_algorithm_list=[HERSWrapper],
            KNN_similarity_to_report_list=KNN_similarity_to_report_list)
Exemple #12
0
print('number_of_users:', number_of_users)
print('number_of_items:', number_of_items)

# test(URM_train, 267)

cutoff = 5

print_statistics(URM_train)

CMN_wrapper_train = CMN_RecommenderWrapper(URM_train)
user_KNNCF_Recommender = UserKNNCFRecommender(URM_train)
item_KNNCF_Recommender = ItemKNNCFRecommender(URM_train)
rp3_beta_Recommender = RP3betaRecommender(URM_train)

evaluator_negative_item_sample = EvaluatorNegativeItemSample(
    URM_test, URM_test_negative, cutoff_list=[5, 10])

CMN_wrapper_train.loadModel('result_experiments/SIGIR/CMN_pinterest/',
                            'CMN_RecommenderWrapper_best_model')
d, s = evaluator_negative_item_sample.evaluateRecommender(CMN_wrapper_train)
print('CMN_wrapper_train')
print(s)

user_KNNCF_Recommender.loadModel('result_experiments/SIGIR/CMN_pinterest/',
                                 'UserKNNCFRecommender_cosine_best_model')
d, s = evaluator_negative_item_sample.evaluateRecommender(
    user_KNNCF_Recommender)
print('user_KNNCF_Recommender')
print(s)

item_KNNCF_Recommender.loadModel('result_experiments/SIGIR/CMN_pinterest/',
Exemple #13
0
def read_data_split_and_search_NeuCF(dataset_name):
    from Conferences.WWW.NeuMF_our_interface.Movielens1M.Movielens1MReader import Movielens1MReader
    from Conferences.WWW.NeuMF_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader

    if dataset_name == "movielens1m":
        dataset = Movielens1MReader()

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()

    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    assert_disjoint_matrices([URM_train, URM_validation, URM_test])
    assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative])

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"], output_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    collaborative_algorithm_list = [
        Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender,
        P3alphaRecommender, RP3betaRecommender, SLIMElasticNetRecommender
    ]

    metric_to_optimize = "HIT_RATE"

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                       URM_test_negative,
                                                       cutoff_list=[10])
    evaluator_test = EvaluatorNegativeItemSample(
        URM_test,
        URM_test_negative,
        cutoff_list=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        n_cases=35)

    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### NeuMF

    try:

        if dataset_name == "movielens1m":
            num_factors = 64
        elif dataset_name == "pinterest":
            num_factors = 16

        neuMF_article_parameters = {
            "epochs": 100,
            "epochs_gmf": 100,
            "epochs_mlp": 100,
            "batch_size": 256,
            "num_factors": num_factors,
            "layers": [num_factors * 4, num_factors * 2, num_factors],
            "reg_mf": 0.0,
            "reg_layers": [0, 0, 0],
            "num_negatives": 4,
            "learning_rate": 1e-3,
            "learning_rate_pretrain": 1e-3,
            "learner": "sgd",
            "learner_pretrain": "adam",
            "pretrain": True
        }

        neuMF_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }

        parameterSearch = SearchSingleCase(
            NeuMF_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=neuMF_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=neuMF_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=NeuMF_RecommenderWrapper.RECOMMENDER_NAME)

    except Exception as e:

        print("On recommender {} Exception {}".format(NeuMF_RecommenderWrapper,
                                                      str(e)))
        traceback.print_exc()

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[NeuMF_RecommenderWrapper],
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)

    print_results_latex_table(result_folder_path=output_folder_path,
                              results_file_prefix_name=ALGORITHM_NAME,
                              dataset_name=dataset_name,
                              metrics_to_report_list=["HIT_RATE", "NDCG"],
                              cutoffs_to_report_list=[1, 5, 10],
                              other_algorithm_list=[NeuMF_RecommenderWrapper])
def read_data_split_and_search(dataset_name,
                                   flag_baselines_tune = False,
                                   flag_DL_article_default = False, flag_DL_tune = False,
                                   flag_print_results = False):


    result_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)


    if dataset_name == 'amazon_music_original':
        dataset = AmazonMusicReader(result_folder_path, original = True)

    elif dataset_name == 'amazon_music_ours':
        dataset = AmazonMusicReader(result_folder_path, original = False)

    elif dataset_name == 'amazon_movie':
        dataset = AmazonMovieReader(result_folder_path)

    elif dataset_name == 'movielens100k':
        dataset = Movielens100KReader(result_folder_path)

    elif dataset_name == 'movielens1m':
        dataset = Movielens1MReader(result_folder_path)

    else:
        print("Dataset name not supported, current is {}".format(dataset_name))
        return


    print ('Current dataset is: {}'.format(dataset_name))



    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy()


    # Ensure DISJOINT sets. Do not ensure IMPLICIT data because the algorithm needs explicit data
    assert_disjoint_matrices([URM_train, URM_validation, URM_test, URM_test_negative])

    cold_items_statistics(URM_train, URM_validation, URM_test, URM_test_negative)

    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["Training data", "Test data"],
                         result_folder_path + algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation + URM_test, URM_train + URM_validation, URM_test],
                               ["Full data", "Training data", "Test data"],
                               result_folder_path + algorithm_dataset_string + "popularity_statistics")


    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
        PureSVDRecommender,
        NMFRecommender,
        IALSRecommender,
        MatrixFactorization_BPR_Cython,
        MatrixFactorization_FunkSVD_Cython,
        EASE_R_Recommender,
        SLIM_BPR_Cython,
        SLIMElasticNetRecommender,
        ]

    metric_to_optimize = "NDCG"
    n_cases = 50
    n_random_starts = 15

    cutoff_list_validation = [10]
    cutoff_list_test = [5, 10, 20]

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=cutoff_list_validation)
    evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=cutoff_list_test)


    runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,
                                                       URM_train = URM_train,
                                                       URM_train_last_test = URM_train + URM_validation,
                                                       metric_to_optimize = metric_to_optimize,
                                                       evaluator_validation_earlystopping = evaluator_validation,
                                                       evaluator_validation = evaluator_validation,
                                                       evaluator_test = evaluator_test,
                                                       output_folder_path = result_folder_path,
                                                       parallelizeKNN = False,
                                                       allow_weighting = True,
                                                       resume_from_saved = True,
                                                       n_cases = n_cases,
                                                       n_random_starts = n_random_starts)



    if flag_baselines_tune:
        
        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(recommender_class, str(e)))
                traceback.print_exc()


    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    """
    NOTICE: We did not upload the source code of DMF as it was not publicly available and the original
            authors did not respond to our request to add it to this repository
    """

    if flag_DL_article_default:

        if dataset_name in ['amazon_music_original', 'amazon_music_ours']:
            last_layer_size = 128
        else:
            last_layer_size = 64

        article_hyperparameters = {'epochs': 300,
                              'learning_rate': 0.0001,
                              'batch_size': 256,
                              'num_negatives': 7,   # As reported in the "Detailed implementation" section of the original paper
                              'last_layer_size': last_layer_size,
                              }

        earlystopping_hyperparameters = {'validation_every_n': 5,
                                    'stop_on_validation': True,
                                    'lower_validations_allowed': 5,
                                    'evaluator_object': evaluator_validation,
                                    'validation_metric': metric_to_optimize,
                                    }

        #
        # try:
        #
        #
        #     parameterSearch = SearchSingleCase(DMF_NCE_RecommenderWrapper,
        #                                        evaluator_validation=evaluator_validation,
        #                                        evaluator_test=evaluator_test)
        #
        #     recommender_input_args = SearchInputRecommenderArgs(
        #                                         CONSTRUCTOR_POSITIONAL_ARGS = [URM_train],
        #                                         FIT_KEYWORD_ARGS = earlystopping_hyperparameters)
        #
        #     recommender_input_args_last_test = recommender_input_args.copy()
        #     recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[0] = URM_train + URM_validation
        #
        #     parameterSearch.search(recommender_input_args,
        #                            recommender_input_args_last_test = recommender_input_args_last_test,
        #                            fit_hyperparameters_values = article_hyperparameters,
        #                            output_folder_path = result_folder_path,
        #                            resume_from_saved = True,
        #                            output_file_name_root = DMF_NCE_RecommenderWrapper.RECOMMENDER_NAME)
        #
        #
        #
        # except Exception as e:
        #
        #     print("On recommender {} Exception {}".format(DMF_NCE_RecommenderWrapper, str(e)))
        #     traceback.print_exc()
        #
        #
        #
        # try:
        #
        #
        #     parameterSearch = SearchSingleCase(DMF_BCE_RecommenderWrapper,
        #                                        evaluator_validation=evaluator_validation,
        #                                        evaluator_test=evaluator_test)
        #
        #     recommender_input_args = SearchInputRecommenderArgs(
        #                                         CONSTRUCTOR_POSITIONAL_ARGS = [URM_train],
        #                                         FIT_KEYWORD_ARGS = earlystopping_hyperparameters)
        #
        #     recommender_input_args_last_test = recommender_input_args.copy()
        #     recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[0] = URM_train + URM_validation
        #
        #     parameterSearch.search(recommender_input_args,
        #                            recommender_input_args_last_test = recommender_input_args_last_test,
        #                            fit_hyperparameters_values = article_hyperparameters,
        #                            output_folder_path = result_folder_path,
        #                            resume_from_saved = True,
        #                            output_file_name_root = DMF_BCE_RecommenderWrapper.RECOMMENDER_NAME)
        #
        #
        # except Exception as e:
        #
        #     print("On recommender {} Exception {}".format(DMF_BCE_RecommenderWrapper, str(e)))
        #     traceback.print_exc()



    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:

        n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1)
        file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME, dataset_name)

        result_loader = ResultFolderLoader(result_folder_path,
                                         base_algorithm_list = None,
                                         other_algorithm_list = [DMF_NCE_RecommenderWrapper, DMF_BCE_RecommenderWrapper],
                                         KNN_similarity_list = KNN_similarity_to_report_list,
                                         ICM_names_list = None,
                                         UCM_names_list = None)


        result_loader.generate_latex_results(file_name + "{}_latex_results.txt".format("article_metrics"),
                                           metrics_list = ["HIT_RATE", "NDCG"],
                                           cutoffs_list = cutoff_list_validation,
                                           table_title = None,
                                           highlight_best = True)

        result_loader.generate_latex_results(file_name + "{}_latex_results.txt".format("all_metrics"),
                                           metrics_list = ["PRECISION", "RECALL", "MAP_MIN_DEN", "MRR", "NDCG", "F1", "HIT_RATE", "ARHR_ALL_HITS",
                                                           "NOVELTY", "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY"],
                                           cutoffs_list = [10],
                                           table_title = None,
                                           highlight_best = True)

        result_loader.generate_latex_time_statistics(file_name + "{}_latex_results.txt".format("time"),
                                           n_evaluation_users=n_test_users,
                                           table_title = None)
Exemple #15
0
def read_data_split_and_search_CMN(dataset_name):
    from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader
    from Conferences.SIGIR.CMN_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader
    from Conferences.SIGIR.CMN_our_interface.Epinions.EpinionsReader import EpinionsReader

    if dataset_name == "citeulike":
        dataset = CiteULikeReader()

    elif dataset_name == "epinions":
        dataset = EpinionsReader()

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()

    test_mode = False
    limit = False
    if limit:
        p = 700
        URM_train = URM_train[:p, :]
        URM_validation = URM_validation[:p, :]
        URM_test = URM_test[:p, :]
        URM_test_negative = URM_test_negative[:p, :]
        '''
        user: 3
        is_relevant_current_cutoff: [ True  True  True False False]
        recommended_items_current_cutoff: [  65   86   68 3671 1341]
        Warning! is_relevant_current_cutoff.sum()>1: 3
        relevant_items: [65 68 81 86]
        relevant_items_rating: [1. 1. 1. 1.]
        items_to_compute: 
        [  42   62   65   68   81   86  148  218  559  662  776  792 1164 1341
         1418 1491 1593 1603 1617 1697 2140 2251 2446 2517 2566 2643 2719 2769
         2771 3081 3133 3161 3188 3268 3409 3666 3671 3845 3864 3897 3984 4272
         4327 4329 4431 4519 4565 4568 4718 4812 4915 5096 5128 5137 5141 5184
         5217 5241 5371 5394 5415 5492 5521 5775 5798 5830 5831 5931 6005 6281
         6375 6558 6638 6644 6661 6705 6881 6898 6939 6970 7010 7018 7147 7224
         7327 7404 7453 7466 7475 7561 7764 8064 8102 8222 8368 8530 8957 9101
         9322 9368 9619 9782 9832]
        '''
        print('USER 3')

        print('test ', URM_test[3])
        print('train ', URM_train[3])
        print('valid ', URM_validation[3])
        print('neg ', URM_test_negative[3])

        # Durante l'esecuzione era stato notato un HR>1. Il motivo e' che veniva calcolato sul validation set (che per ogni utente ha
        # piu' oggetti preferiti (non uno)
        # Alla fine l'HR sara' minore o uguale ad uno perche' e' calcolato sul test set.

    popularity = get_popularity(URM_train)

    min_value = np.min(popularity)
    max_value = np.max(popularity)
    gap = max_value - min_value

    popularity = (popularity - min_value) / gap

    print('Luciano > min:', min_value)
    print('Luciano > max:', max_value)
    print('Luciano > normalized popularity:', popularity)

    set_parameters(popularity=popularity,
                   loss_alpha=200,
                   loss_beta=0.02,
                   loss_scale=1,
                   loss_percentile=get_percentile(popularity, 45),
                   metrics_alpha=100,
                   metrics_beta=0.03,
                   metrics_gamma=5,
                   metrics_scale=1 / 15,
                   metrics_percentile=0.45,
                   new_loss=False)

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
    ]

    # metric_to_optimize = "WEIGHTED_HIT_RATE"
    metric_to_optimize = "HIT_RATE"
    # metric_to_optimize = "CUSTOM_HIT_RATE"

    print('metric_to_optimize:', metric_to_optimize)

    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    if dataset_name == "citeulike":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_test, URM_test_negative])

    elif dataset_name == "pinterest":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices(
            [URM_train, URM_validation, URM_test_negative])

    else:
        assert_disjoint_matrices(
            [URM_train, URM_validation, URM_test, URM_test_negative])

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"], output_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                       URM_test_negative,
                                                       cutoff_list=[5])
    if not test_mode:
        evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                     URM_test_negative,
                                                     cutoff_list=[5, 10])
    else:
        evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                     URM_test_negative,
                                                     cutoff_list=[5])

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        n_cases=35)

    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:
            if not test_mode:
                runParameterSearch_Collaborative_partial(recommender_class)
            else:
                print('skipping', recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### CMN
    '''
    Parameters from original paper:
    {
      "batch_size": 128,
      "decay_rate": 0.9,
      "embed_size": 50,
      "filename": "data/pinterest.npz",
      "grad_clip": 5.0,
      "hops": 2,
      "item_count": "9916",
      "l2": 0.1,
      "learning_rate": 0.001,
      "logdir": "result/004/",
      "max_neighbors": 1586,
      "neg_count": 4,
      "optimizer": "rmsprop",
      "optimizer_params": "{'momentum': 0.9, 'decay': 0.9}",
      "pretrain": "pretrain/pinterest_e50.npz",
      "save_directory": "result/004/",
      "tol": 1e-05,
      "user_count": "55187"
    }
    '''

    try:

        temp_file_folder = output_folder_path + "{}_log/".format(
            ALGORITHM_NAME)

        CMN_article_parameters = {
            "epochs": 100,
            "epochs_gmf": 100,
            "hops": 3,
            "neg_samples": 4,
            "reg_l2_cmn": 1e-1,
            "reg_l2_gmf": 1e-4,
            "pretrain": True,
            "learning_rate": 1e-3,
            "verbose": False,
            "temp_file_folder": temp_file_folder
        }

        if dataset_name == "citeulike":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 50

        elif dataset_name == "epinions":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 40

        elif dataset_name == "pinterest":
            CMN_article_parameters["batch_size"] = 128
            # CMN_article_parameters["batch_size"] = 256
            CMN_article_parameters["embed_size"] = 50

        CMN_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }

        parameterSearch = SearchSingleCase(
            CMN_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=CMN_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=CMN_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=CMN_RecommenderWrapper.RECOMMENDER_NAME)

    except Exception as e:

        print("On recommender {} Exception {}".format(CMN_RecommenderWrapper,
                                                      str(e)))
        traceback.print_exc()

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[CMN_RecommenderWrapper],
        ICM_names_to_report_list=[],
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)
    if not test_mode:
        print_results_latex_table(
            result_folder_path=output_folder_path,
            results_file_prefix_name=ALGORITHM_NAME,
            dataset_name=dataset_name,
            metrics_to_report_list=["HIT_RATE", "NDCG"],
            cutoffs_to_report_list=[5, 10],
            ICM_names_to_report_list=[],
            other_algorithm_list=[CMN_RecommenderWrapper])
    else:
        print_results_latex_table(
            result_folder_path=output_folder_path,
            results_file_prefix_name=ALGORITHM_NAME,
            dataset_name=dataset_name,
            metrics_to_report_list=["HIT_RATE", "NDCG"],
            cutoffs_to_report_list=[5],
            ICM_names_to_report_list=[],
            other_algorithm_list=[CMN_RecommenderWrapper])