def read_data_split_and_search_MCRec(dataset_name):

    from Conferences.KDD.MCRec_our_interface.Movielens100K.Movielens100KReader import Movielens100KReader
    from Conferences.KDD.MCRec_our_interface.LastFM.LastFMReader import LastFMReader
    from Conferences.KDD.MCRec_our_interface.Yelp.YelpReader import YelpReader

    if dataset_name == "movielens100k":
        dataset = Movielens100KReader()

    elif dataset_name == "yelp":
        dataset = YelpReader()

    elif dataset_name == "lastfm":
        dataset = LastFMReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()

    # Ensure IMPLICIT data
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])
    assert_disjoint_matrices(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"], output_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    if dataset_name == "movielens100k":
        URM_train += URM_validation
        evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                           URM_test_negative,
                                                           cutoff_list=[10],
                                                           exclude_seen=False)
    else:
        evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                           URM_test_negative,
                                                           cutoff_list=[10])

    evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                 URM_test_negative,
                                                 cutoff_list=[10])

    collaborative_algorithm_list = [
        Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender,
        P3alphaRecommender, RP3betaRecommender, PureSVDRecommender
    ]

    metric_to_optimize = "PRECISION"

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        n_cases=35)

    # pool = PoolWithSubprocess(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)

    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### Content Baselines

    ICM_dictionary = dataset.ICM_dict

    ICM_name_list = ICM_dictionary.keys()

    for ICM_name in ICM_name_list:

        try:

            ICM_object = ICM_dictionary[ICM_name]

            runParameterSearch_Content(
                ItemKNNCBFRecommender,
                URM_train=URM_train,
                metric_to_optimize=metric_to_optimize,
                evaluator_validation=evaluator_validation,
                evaluator_test=evaluator_test,
                output_folder_path=output_folder_path,
                parallelizeKNN=False,
                ICM_name=ICM_name,
                ICM_object=ICM_object.copy(),
                n_cases=35)

        except Exception as e:

            print("On CBF recommender for ICM {} Exception {}".format(
                ICM_name, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### Hybrid

    for ICM_name in ICM_name_list:

        try:

            ICM_object = ICM_dictionary[ICM_name]

            runParameterSearch_Hybrid(
                ItemKNN_CFCBF_Hybrid_Recommender,
                URM_train=URM_train,
                metric_to_optimize=metric_to_optimize,
                evaluator_validation=evaluator_validation,
                evaluator_test=evaluator_test,
                output_folder_path=output_folder_path,
                parallelizeKNN=False,
                ICM_name=ICM_name,
                ICM_object=ICM_object,
                allow_weighting=True,
                n_cases=35)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                ItemKNN_CFCBF_Hybrid_Recommender, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### MCRec

    if dataset_name == "movielens100k":

        # Since I am using the original Data reader, the content of URM_validation are seen items, therefore I have to set another
        # evaluator which does not exclude them
        # evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10], exclude_seen=False)

        MCRec_article_parameters = {
            "epochs": 100,
            "latent_dim": 128,
            "reg_latent": 0,
            "layers": [512, 256, 128, 64],
            "reg_layes": [0, 0, 0, 0],
            "learning_rate": 1e-3,
            "batch_size": 256,
            "num_negatives": 4,
        }

        MCRec_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }

        parameterSearch = SearchSingleCase(
            MCRecML100k_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=MCRec_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=MCRec_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=MCRecML100k_RecommenderWrapper.
            RECOMMENDER_NAME)

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    ICM_names_to_report_list = ["ICM_genre"]

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[MCRecML100k_RecommenderWrapper],
        ICM_names_to_report_list=ICM_names_to_report_list,
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)

    print_results_latex_table(
        result_folder_path=output_folder_path,
        results_file_prefix_name=ALGORITHM_NAME,
        dataset_name=dataset_name,
        metrics_to_report_list=["PRECISION", "RECALL", "NDCG"],
        cutoffs_to_report_list=[10],
        ICM_names_to_report_list=ICM_names_to_report_list,
        other_algorithm_list=[MCRecML100k_RecommenderWrapper])
Exemplo n.º 2
0
def read_data_split_and_search_SpectralCF(dataset_name, cold_start=False,
                                          cold_items=None, isKNN_multiprocess=True, isKNN_tune=True,
                                          isSpectralCF_train_default=True, isSpectralCF_tune=True, print_results=True):


    if dataset_name == "movielens1m_original":
        assert(cold_start is not True)
        dataset = Movielens1MReader(type="original")

    elif dataset_name == "movielens1m_ours":
        dataset = Movielens1MReader(type="ours", cold_start=cold_start, cold_items=cold_items)

    elif dataset_name == "hetrec":
        assert (cold_start is not True)
        dataset = MovielensHetrec2011Reader()

    elif dataset_name == "amazon_instant_video":
        assert (cold_start is not True)
        dataset = AmazonInstantVideoReader()


    if not cold_start:
        output_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)
    else:
        output_folder_path = "result_experiments/{}/{}_cold_{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, cold_items, dataset_name)


    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()

    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data([URM_train, URM_validation, URM_test])
    assert_disjoint_matrices([URM_train, URM_validation, URM_test])


    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"],
                         output_folder_path + algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string + "popularity_statistics")


    metric_to_optimize = "RECALL"

    from Base.Evaluation.Evaluator import EvaluatorHoldout

    if not cold_start:
        cutoff_list_validation = [50]
        cutoff_list_test = [20, 30, 40, 50, 60, 70, 80, 90, 100]
    else:
        cutoff_list_validation = [20]
        cutoff_list_test = [20]

    evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=cutoff_list_validation)
    evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=cutoff_list_test)

    ################################################################################################
    ###### KNN CF

    if isKNN_tune:
        collaborative_algorithm_list = [
            Random,
            TopPop,
            UserKNNCFRecommender,
            ItemKNNCFRecommender,
            P3alphaRecommender,
            RP3betaRecommender,
        ]

        runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,
                                                           URM_train = URM_train,
                                                           metric_to_optimize = metric_to_optimize,
                                                           evaluator_validation_earlystopping = evaluator_validation,
                                                           evaluator_validation = evaluator_validation,
                                                           evaluator_test = evaluator_test,
                                                           output_folder_path = output_folder_path,
                                                           parallelizeKNN = False,
                                                           allow_weighting = True,
                                                           n_cases = 35)
        if isKNN_multiprocess:
            pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
            resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)

            pool.close()
            pool.join()

        else:
            for recommender_class in collaborative_algorithm_list:
                try:
                    runParameterSearch_Collaborative_partial(recommender_class)
                except Exception as e:
                    print("On recommender {} Exception {}".format(recommender_class, str(e)))
                    traceback.print_exc()



    ################################################################################################
    ###### SpectralCF

    if isSpectralCF_train_default:
        try:

            spectralCF_article_parameters = {
                "epochs": 1000,
                "batch_size": 1024,
                "embedding_size": 16,
                "decay": 0.001,
                "k": 3,
                "learning_rate": 1e-3,
            }

            spectralCF_earlystopping_parameters = {
                "validation_every_n": 5,
                "stop_on_validation": True,
                "lower_validations_allowed": 20,
                "evaluator_object": evaluator_validation,
                "validation_metric": metric_to_optimize,
                "epochs_min": 400,
            }

            parameterSearch = SearchSingleCase(SpectralCF_RecommenderWrapper,
                                               evaluator_validation=evaluator_validation,
                                               evaluator_test=evaluator_test)

            recommender_parameters = SearchInputRecommenderParameters(
                                                CONSTRUCTOR_POSITIONAL_ARGS = [URM_train],
                                                FIT_KEYWORD_ARGS = spectralCF_earlystopping_parameters)

            parameterSearch.search(recommender_parameters,
                                   fit_parameters_values = spectralCF_article_parameters,
                                   output_folder_path = output_folder_path,
                                   output_file_name_root = SpectralCF_RecommenderWrapper.RECOMMENDER_NAME + "_article_default")

        except Exception as e:

            print("On recommender {} Exception {}".format(SpectralCF_RecommenderWrapper, str(e)))
            traceback.print_exc()


    elif isSpectralCF_tune:

        try:

            spectralCF_earlystopping_parameters = {
                "validation_every_n": 5,
                "stop_on_validation": True,
                "lower_validations_allowed": 20,
                "evaluator_object": evaluator_validation,
                "validation_metric": metric_to_optimize,
                "epochs_min": 400,
                "epochs": 2000
            }

            runParameterSearch_SpectralCF(SpectralCF_RecommenderWrapper,
                                             URM_train = URM_train,
                                             earlystopping_parameters = spectralCF_earlystopping_parameters,
                                             metric_to_optimize = metric_to_optimize,
                                             evaluator_validation = evaluator_validation,
                                             evaluator_test = evaluator_test,
                                             output_folder_path = output_folder_path,
                                             n_cases = 35,
                                             output_file_name_root = SpectralCF_RecommenderWrapper.RECOMMENDER_NAME)



        except Exception as e:

            print("On recommender {} Exception {}".format(SpectralCF_RecommenderWrapper, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### print results

    if print_results:

        n_validation_users = np.sum(np.ediff1d(URM_validation.indptr)>=1)
        n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1)

        if not cold_start:
            results_file_root_name = ALGORITHM_NAME
        else:
            results_file_root_name = "{}_cold_{}".format(ALGORITHM_NAME, cold_items)



        print_time_statistics_latex_table(result_folder_path = output_folder_path,
                                          dataset_name = dataset_name,
                                          results_file_prefix_name = results_file_root_name,
                                          other_algorithm_list = [SpectralCF_RecommenderWrapper],
                                          n_validation_users = n_validation_users,
                                          n_test_users = n_test_users,
                                          n_decimals = 2)

        if cold_start:
            cutoffs_to_report_list = [20]
        else:
            cutoffs_to_report_list = [20, 40, 60, 80, 100]

        print_results_latex_table(result_folder_path = output_folder_path,
                                  results_file_prefix_name = results_file_root_name,
                                  dataset_name = dataset_name,
                                  metrics_to_report_list = ["RECALL", "MAP"],
                                  cutoffs_to_report_list = cutoffs_to_report_list,
                                  other_algorithm_list = [SpectralCF_RecommenderWrapper])
Exemplo n.º 3
0
def read_data_split_and_search_CMN(dataset_name):

    from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader
    from Conferences.SIGIR.CMN_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader
    from Conferences.SIGIR.CMN_our_interface.Epinions.EpinionsReader import EpinionsReader

    if dataset_name == "citeulike":
        dataset = CiteULikeReader()

    elif dataset_name == "epinions":
        dataset = EpinionsReader()

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)


    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()



    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)



    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
    ]

    metric_to_optimize = "HIT_RATE"


    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data([URM_train, URM_validation, URM_test, URM_test_negative])


    if dataset_name == "citeulike":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_test, URM_test_negative])

    elif dataset_name == "pinterest":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative])

    else:
        assert_disjoint_matrices([URM_train, URM_validation, URM_test, URM_test_negative])



    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"],
                         output_folder_path + algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string + "popularity_statistics")



    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[5])
    evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[5, 10])


    runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,
                                                       URM_train = URM_train,
                                                       metric_to_optimize = metric_to_optimize,
                                                       evaluator_validation_earlystopping = evaluator_validation,
                                                       evaluator_validation = evaluator_validation,
                                                       evaluator_test = evaluator_test,
                                                       output_folder_path = output_folder_path,
                                                       parallelizeKNN = False,
                                                       allow_weighting = True,
                                                       n_cases = 35)





    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()


    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(recommender_class, str(e)))
            traceback.print_exc()



    ################################################################################################
    ###### CMN




    try:


        temp_file_folder = output_folder_path + "{}_log/".format(ALGORITHM_NAME)

        CMN_article_parameters = {
            "epochs": 100,
            "epochs_gmf": 100,
            "hops": 3,
            "neg_samples": 4,
            "reg_l2_cmn": 1e-1,
            "reg_l2_gmf": 1e-4,
            "pretrain": True,
            "learning_rate": 1e-3,
            "verbose": False,
            "temp_file_folder": temp_file_folder
        }

        if dataset_name == "citeulike":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 50

        elif dataset_name == "epinions":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 40

        elif dataset_name == "pinterest":
            CMN_article_parameters["batch_size"] = 256
            CMN_article_parameters["embed_size"] = 50



        CMN_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }


        parameterSearch = SearchSingleCase(CMN_RecommenderWrapper,
                                           evaluator_validation=evaluator_validation,
                                           evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
                                            CONSTRUCTOR_POSITIONAL_ARGS = [URM_train],
                                            FIT_KEYWORD_ARGS = CMN_earlystopping_parameters)

        parameterSearch.search(recommender_parameters,
                               fit_parameters_values=CMN_article_parameters,
                               output_folder_path = output_folder_path,
                               output_file_name_root = CMN_RecommenderWrapper.RECOMMENDER_NAME)




    except Exception as e:

        print("On recommender {} Exception {}".format(CMN_RecommenderWrapper, str(e)))
        traceback.print_exc()





    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr)>=1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1)


    print_time_statistics_latex_table(result_folder_path = output_folder_path,
                                      dataset_name = dataset_name,
                                      results_file_prefix_name = ALGORITHM_NAME,
                                      other_algorithm_list = [CMN_RecommenderWrapper],
                                      ICM_names_to_report_list = [],
                                      n_validation_users = n_validation_users,
                                      n_test_users = n_test_users,
                                      n_decimals = 2)


    print_results_latex_table(result_folder_path = output_folder_path,
                              results_file_prefix_name = ALGORITHM_NAME,
                              dataset_name = dataset_name,
                              metrics_to_report_list = ["HIT_RATE", "NDCG"],
                              cutoffs_to_report_list = [5, 10],
                              ICM_names_to_report_list = [],
                              other_algorithm_list = [CMN_RecommenderWrapper])
Exemplo n.º 4
0
def read_data_split_and_search(dataset_name,
                               flag_baselines_tune=False,
                               flag_DL_article_default=False,
                               flag_DL_tune=False,
                               flag_print_results=False):
    result_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    if dataset_name == "delicious-hetrec2011":
        dataset = DeliciousHetrec2011Reader(result_folder_path)

    elif dataset_name == "delicious-hetrec2011-cold-users":
        dataset = DeliciousHetrec2011ColdUsersReader(result_folder_path)

    elif dataset_name == "delicious-hetrec2011-cold-items":
        dataset = DeliciousHetrec2011ColdItemsReader(result_folder_path)

    elif dataset_name == "lastfm-hetrec2011":
        dataset = LastFMHetrec2011Reader(result_folder_path)

    elif dataset_name == "lastfm-hetrec2011-cold-users":
        dataset = LastFMHetrec2011ColdUsersReader(result_folder_path)

    elif dataset_name == "lastfm-hetrec2011-cold-items":
        dataset = LastFMHetrec2011ColdItemsReader(result_folder_path)

    else:
        print("Dataset name not supported, current is {}".format(dataset_name))
        return

    print('Current dataset is: {}'.format(dataset_name))

    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_negative = dataset.URM_DICT["URM_negative"].copy()
    UCM_train = dataset.UCM_DICT["UCM"].copy()
    ICM_train = dataset.ICM_DICT["ICM"].copy()

    if dataset_name == "delicious-hetrec2011" or dataset_name == "lastfm-hetrec2011":
        URM_train_last_test = URM_train + URM_validation

        # Ensure IMPLICIT data and disjoint test-train split
        assert_implicit_data([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
    else:
        URM_train_last_test = URM_train

        # Ensure IMPLICIT data and disjoint test-train split
        assert_implicit_data([URM_train, URM_test])
        assert_disjoint_matrices([URM_train, URM_test])

    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)

    metric_to_optimize = "MAP"
    cutoff_list_validation = [5, 10, 20]
    cutoff_list_test = [5, 10, 20]

    n_cases = 50
    n_random_starts = 15

    evaluator_validation = EvaluatorNegativeItemSample(
        URM_validation, URM_negative, cutoff_list=cutoff_list_validation)
    evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                 URM_negative,
                                                 cutoff_list=cutoff_list_test)

    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:
        article_hyperparameters = {
            "pretrain_samples": 3,
            "pretrain_batch_size": 200,
            "pretrain_iterations": 5,
            "embed_len": 128,
            "topK": 10,
            "fliter_theta": 16,
            "aggre_theta": 64,
            "batch_size": 400,
            "samples": 3,
            "margin": 20,
            "epochs": 30,
            "iter_without_att": 5,
            "directed": False,
        }

        # Do not modify earlystopping
        earlystopping_hyperparameters = {
            "validation_every_n": 5,
            "stop_on_validation": False,
            "lower_validations_allowed": 5,
            "evaluator_object": evaluator_validation,
            "validation_metric": metric_to_optimize,
        }

        # This is a simple version of the tuning code that is reported below and uses SearchSingleCase
        # You may use this for a simpler testing
        # recommender_instance = HERSWrapper(URM_train, UCM_train, ICM_train)
        #
        # recommender_instance.fit(**article_hyperparameters,
        #                          **earlystopping_hyperparameters)
        #
        # evaluator_test.evaluateRecommender(recommender_instance)

        # Fit the DL model, select the optimal number of epochs and save the result
        parameterSearch = SearchSingleCase(
            HERSWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_input_args = SearchInputRecommenderArgs(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train, UCM_train, ICM_train],
            FIT_KEYWORD_ARGS=earlystopping_hyperparameters)

        if dataset_name == "delicious-hetrec2011" or dataset_name == "lastfm-hetrec2011":
            recommender_input_args_last_test = recommender_input_args.copy()
            recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[
                0] = URM_train_last_test

            parameterSearch.search(
                recommender_input_args,
                recommender_input_args_last_test=
                recommender_input_args_last_test,
                fit_hyperparameters_values=article_hyperparameters,
                output_folder_path=result_folder_path,
                output_file_name_root=HERSWrapper.RECOMMENDER_NAME)
        else:
            parameterSearch.search(
                recommender_input_args,
                fit_hyperparameters_values=article_hyperparameters,
                output_folder_path=result_folder_path,
                output_file_name_root=HERSWrapper.RECOMMENDER_NAME)

    ################################################################################################
    ######
    ######      BASELINE ALGORITHMS - Nothing should be modified below this point
    ######

    if flag_baselines_tune:

        ################################################################################################
        ###### Collaborative Baselines

        collaborative_algorithm_list = [
            Random,
            TopPop,
            ItemKNNCFRecommender,
            PureSVDRecommender,
            SLIM_BPR_Cython,
        ]

        # Running hyperparameter tuning of baslines
        # See if the results are reasonable and comparable to baselines reported in the paper
        runParameterSearch_Collaborative_partial = partial(
            runParameterSearch_Collaborative,
            URM_train=URM_train,
            URM_train_last_test=URM_train_last_test,
            metric_to_optimize=metric_to_optimize,
            evaluator_validation_earlystopping=evaluator_validation,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test,
            output_folder_path=result_folder_path,
            resume_from_saved=True,
            parallelizeKNN=False,
            allow_weighting=True,
            n_cases=n_cases,
            n_random_starts=n_random_starts)

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(
                    recommender_class, str(e)))
                traceback.print_exc()

        ################################################################################################
        ###### Content Baselines

        for ICM_name, ICM_object in dataset.ICM_DICT.items():

            try:

                runParameterSearch_Content(
                    ItemKNNCBFRecommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train_last_test,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On CBF recommender for ICM {} Exception {}".format(
                    ICM_name, str(e)))
                traceback.print_exc()

        ################################################################################################
        ###### Hybrid

        for ICM_name, ICM_object in dataset.ICM_DICT.items():

            try:

                runParameterSearch_Hybrid(
                    ItemKNN_CFCBF_Hybrid_Recommender,
                    URM_train=URM_train,
                    URM_train_last_test=URM_train_last_test,
                    metric_to_optimize=metric_to_optimize,
                    evaluator_validation=evaluator_validation,
                    evaluator_test=evaluator_test,
                    output_folder_path=result_folder_path,
                    parallelizeKNN=False,
                    allow_weighting=True,
                    ICM_name=ICM_name,
                    ICM_object=ICM_object.copy(),
                    n_cases=n_cases,
                    n_random_starts=n_random_starts)

            except Exception as e:

                print("On recommender {} Exception {}".format(
                    ItemKNN_CFCBF_Hybrid_Recommender, str(e)))
                traceback.print_exc()

    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:
        n_validation_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

        print_time_statistics_latex_table(
            result_folder_path=result_folder_path,
            dataset_name=dataset_name,
            algorithm_name=ALGORITHM_NAME,
            other_algorithm_list=[HERSWrapper],
            KNN_similarity_to_report_list=KNN_similarity_to_report_list,
            n_validation_users=n_validation_users,
            n_test_users=n_test_users,
            n_decimals=2)

        print_results_latex_table(
            result_folder_path=result_folder_path,
            algorithm_name=ALGORITHM_NAME,
            file_name_suffix="article_metrics_",
            dataset_name=dataset_name,
            metrics_to_report_list=["HIT_RATE", "NDCG"],
            cutoffs_to_report_list=cutoff_list_test,
            other_algorithm_list=[HERSWrapper],
            KNN_similarity_to_report_list=KNN_similarity_to_report_list)

        print_results_latex_table(
            result_folder_path=result_folder_path,
            algorithm_name=ALGORITHM_NAME,
            file_name_suffix="all_metrics_",
            dataset_name=dataset_name,
            metrics_to_report_list=[
                "PRECISION", "RECALL", "MAP", "MRR", "NDCG", "F1", "HIT_RATE",
                "ARHR", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST",
                "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI",
                "SHANNON_ENTROPY"
            ],
            cutoffs_to_report_list=cutoff_list_validation,
            other_algorithm_list=[HERSWrapper],
            KNN_similarity_to_report_list=KNN_similarity_to_report_list)
def read_data_split_and_search_CollaborativeVAE(dataset_variant,
                                                train_interactions):

    from Conferences.KDD.CollaborativeVAE_our_interface.Citeulike.CiteulikeReader import CiteulikeReader

    dataset = CiteulikeReader(dataset_variant=dataset_variant,
                              train_interactions=train_interactions)

    output_folder_path = "result_experiments/{}/{}_citeulike_{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_variant, train_interactions)

    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()

    # Ensure IMPLICIT data
    assert_implicit_data([URM_train, URM_validation, URM_test])
    assert_disjoint_matrices([URM_train, URM_validation, URM_test])

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
    ]

    metric_to_optimize = "RECALL"

    from Base.Evaluation.Evaluator import EvaluatorHoldout

    evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[150])
    evaluator_test = EvaluatorHoldout(
        URM_test, cutoff_list=[50, 100, 150, 200, 250, 300])

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        n_cases=35)

    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### Content Baselines

    ICM_title_abstract = dataset.ICM_title_abstract.copy()

    try:

        runParameterSearch_Content(ItemKNNCBFRecommender,
                                   URM_train=URM_train,
                                   metric_to_optimize=metric_to_optimize,
                                   evaluator_validation=evaluator_validation,
                                   evaluator_test=evaluator_test,
                                   output_folder_path=output_folder_path,
                                   parallelizeKNN=False,
                                   ICM_name="ICM_title_abstract",
                                   ICM_object=ICM_title_abstract,
                                   allow_weighting=False,
                                   n_cases=35)

    except Exception as e:

        print("On recommender {} Exception {}".format(ItemKNNCBFRecommender,
                                                      str(e)))
        traceback.print_exc()

    ################################################################################################
    ###### Hybrid

    try:

        runParameterSearch_Hybrid(ItemKNN_CFCBF_Hybrid_Recommender,
                                  URM_train=URM_train,
                                  metric_to_optimize=metric_to_optimize,
                                  evaluator_validation=evaluator_validation,
                                  evaluator_test=evaluator_test,
                                  output_folder_path=output_folder_path,
                                  parallelizeKNN=False,
                                  ICM_name="ICM_title_abstract",
                                  ICM_object=ICM_title_abstract,
                                  allow_weighting=True,
                                  n_cases=35)

    except Exception as e:

        print("On recommender {} Exception {}".format(
            ItemKNN_CFCBF_Hybrid_Recommender, str(e)))
        traceback.print_exc()

    ################################################################################################
    ###### CollaborativeVAE

    try:

        temp_file_folder = output_folder_path + "{}_log/".format(
            ALGORITHM_NAME)

        cvae_recommender_article_parameters = {
            "epochs": 200,
            "learning_rate_vae": 1e-2,
            "learning_rate_cvae": 1e-3,
            "num_factors": 50,
            "dimensions_vae": [200, 100],
            "epochs_vae": [50, 50],
            "batch_size": 128,
            "lambda_u": 0.1,
            "lambda_v": 10,
            "lambda_r": 1,
            "a": 1,
            "b": 0.01,
            "M": 300,
            "temp_file_folder": temp_file_folder
        }

        cvae_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }

        parameterSearch = SearchSingleCase(
            CollaborativeVAE_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train, ICM_title_abstract],
            FIT_KEYWORD_ARGS=cvae_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=cvae_recommender_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=CollaborativeVAE_RecommenderWrapper.
            RECOMMENDER_NAME)

    except Exception as e:

        print("On recommender {} Exception {}".format(
            CollaborativeVAE_RecommenderWrapper, str(e)))
        traceback.print_exc()

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    ICM_names_to_report_list = ["ICM_title_abstract"]
    dataset_name = "{}_{}".format(dataset_variant, train_interactions)

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[CollaborativeVAE_RecommenderWrapper],
        ICM_names_to_report_list=ICM_names_to_report_list,
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)

    print_results_latex_table(
        result_folder_path=output_folder_path,
        results_file_prefix_name=ALGORITHM_NAME,
        dataset_name=dataset_name,
        metrics_to_report_list=["RECALL"],
        cutoffs_to_report_list=[50, 100, 150, 200, 250, 300],
        ICM_names_to_report_list=ICM_names_to_report_list,
        other_algorithm_list=[CollaborativeVAE_RecommenderWrapper])
Exemplo n.º 6
0
def read_data_split_and_search_NeuCF(dataset_name):
    from Conferences.WWW.NeuMF_our_interface.Movielens1M.Movielens1MReader import Movielens1MReader
    from Conferences.WWW.NeuMF_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader

    if dataset_name == "movielens1m":
        dataset = Movielens1MReader()

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()

    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    assert_disjoint_matrices([URM_train, URM_validation, URM_test])
    assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative])

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"], output_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    collaborative_algorithm_list = [
        Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender,
        P3alphaRecommender, RP3betaRecommender, SLIMElasticNetRecommender
    ]

    metric_to_optimize = "HIT_RATE"

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                       URM_test_negative,
                                                       cutoff_list=[10])
    evaluator_test = EvaluatorNegativeItemSample(
        URM_test,
        URM_test_negative,
        cutoff_list=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        n_cases=35)

    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### NeuMF

    try:

        if dataset_name == "movielens1m":
            num_factors = 64
        elif dataset_name == "pinterest":
            num_factors = 16

        neuMF_article_parameters = {
            "epochs": 100,
            "epochs_gmf": 100,
            "epochs_mlp": 100,
            "batch_size": 256,
            "num_factors": num_factors,
            "layers": [num_factors * 4, num_factors * 2, num_factors],
            "reg_mf": 0.0,
            "reg_layers": [0, 0, 0],
            "num_negatives": 4,
            "learning_rate": 1e-3,
            "learning_rate_pretrain": 1e-3,
            "learner": "sgd",
            "learner_pretrain": "adam",
            "pretrain": True
        }

        neuMF_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }

        parameterSearch = SearchSingleCase(
            NeuMF_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=neuMF_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=neuMF_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=NeuMF_RecommenderWrapper.RECOMMENDER_NAME)

    except Exception as e:

        print("On recommender {} Exception {}".format(NeuMF_RecommenderWrapper,
                                                      str(e)))
        traceback.print_exc()

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[NeuMF_RecommenderWrapper],
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)

    print_results_latex_table(result_folder_path=output_folder_path,
                              results_file_prefix_name=ALGORITHM_NAME,
                              dataset_name=dataset_name,
                              metrics_to_report_list=["HIT_RATE", "NDCG"],
                              cutoffs_to_report_list=[1, 5, 10],
                              other_algorithm_list=[NeuMF_RecommenderWrapper])
Exemplo n.º 7
0
def read_data_split_and_search_MultiVAE(dataset_name):

    from Conferences.WWW.MultiVAE_our_interface.Movielens20M.Movielens20MReader import Movielens20MReader
    from Conferences.WWW.MultiVAE_our_interface.NetflixPrize.NetflixPrizeReader import NetflixPrizeReader

    split_type = "cold_user"

    if dataset_name == "movielens20m":
        dataset = Movielens20MReader(split_type=split_type)

    elif dataset_name == "netflixPrize":
        dataset = NetflixPrizeReader()

    output_folder_path = "result_experiments/{}/{}_{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name, split_type)

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    metric_to_optimize = "NDCG"

    if split_type == "cold_user":

        collaborative_algorithm_list = [
            Random,
            TopPop,
            ItemKNNCFRecommender,
            P3alphaRecommender,
            RP3betaRecommender,
        ]

        URM_train = dataset.URM_train.copy()
        URM_train_all = dataset.URM_train_all.copy()
        URM_validation = dataset.URM_validation.copy()
        URM_test = dataset.URM_test.copy()

        # Ensure IMPLICIT data and DISJOINT sets
        assert_implicit_data(
            [URM_train, URM_train_all, URM_validation, URM_test])
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_train_all, URM_validation, URM_test])

        from Base.Evaluation.Evaluator import EvaluatorHoldout

        evaluator_validation = EvaluatorHoldout(URM_validation,
                                                cutoff_list=[100])
        evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[20, 50, 100])

        evaluator_validation = EvaluatorUserSubsetWrapper(
            evaluator_validation, URM_train_all)
        evaluator_test = EvaluatorUserSubsetWrapper(evaluator_test,
                                                    URM_train_all)

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        n_cases=35)

    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:

            runParameterSearch_Collaborative_partial(recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### MultiVAE

    try:

        output_root_path_MultiVAE = output_folder_path + "{}_log/".format(
            ALGORITHM_NAME)

        if dataset_name == "movielens20m":
            epochs = 100

        elif dataset_name == "netflixPrize":
            epochs = 200

        multiVAE_article_parameters = {
            "epochs": epochs,
            "batch_size": 500,
            "total_anneal_steps": 200000,
            "p_dims": None,
        }

        multiVAE_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize,
            "temp_file_folder": output_root_path_MultiVAE
        }

        parameterSearch = SearchSingleCase(
            MultiVAE_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=multiVAE_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=multiVAE_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=MultiVAE_RecommenderWrapper.RECOMMENDER_NAME)

    except Exception as e:

        print("On recommender {} Exception {}".format(
            MultiVAE_RecommenderWrapper, str(e)))
        traceback.print_exc()

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[MultiVAE_RecommenderWrapper],
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)

    print_results_latex_table(
        result_folder_path=output_folder_path,
        results_file_prefix_name=ALGORITHM_NAME,
        dataset_name=dataset_name,
        metrics_to_report_list=["RECALL", "NDCG"],
        cutoffs_to_report_list=[20, 50, 100],
        other_algorithm_list=[MultiVAE_RecommenderWrapper])
Exemplo n.º 8
0
def read_data_split_and_search_CMN(dataset_name):
    from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader
    from Conferences.SIGIR.CMN_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader
    from Conferences.SIGIR.CMN_our_interface.Epinions.EpinionsReader import EpinionsReader

    if dataset_name == "citeulike":
        dataset = CiteULikeReader()

    elif dataset_name == "epinions":
        dataset = EpinionsReader()

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader()

    output_folder_path = "result_experiments/{}/{}_{}/".format(
        CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    URM_train = dataset.URM_train.copy()
    URM_validation = dataset.URM_validation.copy()
    URM_test = dataset.URM_test.copy()
    URM_test_negative = dataset.URM_test_negative.copy()

    test_mode = False
    limit = False
    if limit:
        p = 700
        URM_train = URM_train[:p, :]
        URM_validation = URM_validation[:p, :]
        URM_test = URM_test[:p, :]
        URM_test_negative = URM_test_negative[:p, :]
        '''
        user: 3
        is_relevant_current_cutoff: [ True  True  True False False]
        recommended_items_current_cutoff: [  65   86   68 3671 1341]
        Warning! is_relevant_current_cutoff.sum()>1: 3
        relevant_items: [65 68 81 86]
        relevant_items_rating: [1. 1. 1. 1.]
        items_to_compute: 
        [  42   62   65   68   81   86  148  218  559  662  776  792 1164 1341
         1418 1491 1593 1603 1617 1697 2140 2251 2446 2517 2566 2643 2719 2769
         2771 3081 3133 3161 3188 3268 3409 3666 3671 3845 3864 3897 3984 4272
         4327 4329 4431 4519 4565 4568 4718 4812 4915 5096 5128 5137 5141 5184
         5217 5241 5371 5394 5415 5492 5521 5775 5798 5830 5831 5931 6005 6281
         6375 6558 6638 6644 6661 6705 6881 6898 6939 6970 7010 7018 7147 7224
         7327 7404 7453 7466 7475 7561 7764 8064 8102 8222 8368 8530 8957 9101
         9322 9368 9619 9782 9832]
        '''
        print('USER 3')

        print('test ', URM_test[3])
        print('train ', URM_train[3])
        print('valid ', URM_validation[3])
        print('neg ', URM_test_negative[3])

        # Durante l'esecuzione era stato notato un HR>1. Il motivo e' che veniva calcolato sul validation set (che per ogni utente ha
        # piu' oggetti preferiti (non uno)
        # Alla fine l'HR sara' minore o uguale ad uno perche' e' calcolato sul test set.

    popularity = get_popularity(URM_train)

    min_value = np.min(popularity)
    max_value = np.max(popularity)
    gap = max_value - min_value

    popularity = (popularity - min_value) / gap

    print('Luciano > min:', min_value)
    print('Luciano > max:', max_value)
    print('Luciano > normalized popularity:', popularity)

    set_parameters(popularity=popularity,
                   loss_alpha=200,
                   loss_beta=0.02,
                   loss_scale=1,
                   loss_percentile=get_percentile(popularity, 45),
                   metrics_alpha=100,
                   metrics_beta=0.03,
                   metrics_gamma=5,
                   metrics_scale=1 / 15,
                   metrics_percentile=0.45,
                   new_loss=False)

    # If directory does not exist, create
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
    ]

    # metric_to_optimize = "WEIGHTED_HIT_RATE"
    metric_to_optimize = "HIT_RATE"
    # metric_to_optimize = "CUSTOM_HIT_RATE"

    print('metric_to_optimize:', metric_to_optimize)

    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data(
        [URM_train, URM_validation, URM_test, URM_test_negative])

    if dataset_name == "citeulike":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_test, URM_test_negative])

    elif dataset_name == "pinterest":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices(
            [URM_train, URM_validation, URM_test_negative])

    else:
        assert_disjoint_matrices(
            [URM_train, URM_validation, URM_test, URM_test_negative])

    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["URM train", "URM test"], output_folder_path +
                         algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation, URM_test],
                               ["URM train", "URM test"],
                               output_folder_path + algorithm_dataset_string +
                               "popularity_statistics")

    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation,
                                                       URM_test_negative,
                                                       cutoff_list=[5])
    if not test_mode:
        evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                     URM_test_negative,
                                                     cutoff_list=[5, 10])
    else:
        evaluator_test = EvaluatorNegativeItemSample(URM_test,
                                                     URM_test_negative,
                                                     cutoff_list=[5])

    runParameterSearch_Collaborative_partial = partial(
        runParameterSearch_Collaborative,
        URM_train=URM_train,
        metric_to_optimize=metric_to_optimize,
        evaluator_validation_earlystopping=evaluator_validation,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test,
        output_folder_path=output_folder_path,
        parallelizeKNN=False,
        allow_weighting=True,
        n_cases=35)

    # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
    #
    # pool.close()
    # pool.join()

    for recommender_class in collaborative_algorithm_list:

        try:
            if not test_mode:
                runParameterSearch_Collaborative_partial(recommender_class)
            else:
                print('skipping', recommender_class)

        except Exception as e:

            print("On recommender {} Exception {}".format(
                recommender_class, str(e)))
            traceback.print_exc()

    ################################################################################################
    ###### CMN
    '''
    Parameters from original paper:
    {
      "batch_size": 128,
      "decay_rate": 0.9,
      "embed_size": 50,
      "filename": "data/pinterest.npz",
      "grad_clip": 5.0,
      "hops": 2,
      "item_count": "9916",
      "l2": 0.1,
      "learning_rate": 0.001,
      "logdir": "result/004/",
      "max_neighbors": 1586,
      "neg_count": 4,
      "optimizer": "rmsprop",
      "optimizer_params": "{'momentum': 0.9, 'decay': 0.9}",
      "pretrain": "pretrain/pinterest_e50.npz",
      "save_directory": "result/004/",
      "tol": 1e-05,
      "user_count": "55187"
    }
    '''

    try:

        temp_file_folder = output_folder_path + "{}_log/".format(
            ALGORITHM_NAME)

        CMN_article_parameters = {
            "epochs": 100,
            "epochs_gmf": 100,
            "hops": 3,
            "neg_samples": 4,
            "reg_l2_cmn": 1e-1,
            "reg_l2_gmf": 1e-4,
            "pretrain": True,
            "learning_rate": 1e-3,
            "verbose": False,
            "temp_file_folder": temp_file_folder
        }

        if dataset_name == "citeulike":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 50

        elif dataset_name == "epinions":
            CMN_article_parameters["batch_size"] = 128
            CMN_article_parameters["embed_size"] = 40

        elif dataset_name == "pinterest":
            CMN_article_parameters["batch_size"] = 128
            # CMN_article_parameters["batch_size"] = 256
            CMN_article_parameters["embed_size"] = 50

        CMN_earlystopping_parameters = {
            "validation_every_n": 5,
            "stop_on_validation": True,
            "evaluator_object": evaluator_validation,
            "lower_validations_allowed": 5,
            "validation_metric": metric_to_optimize
        }

        parameterSearch = SearchSingleCase(
            CMN_RecommenderWrapper,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        recommender_parameters = SearchInputRecommenderParameters(
            CONSTRUCTOR_POSITIONAL_ARGS=[URM_train],
            FIT_KEYWORD_ARGS=CMN_earlystopping_parameters)

        parameterSearch.search(
            recommender_parameters,
            fit_parameters_values=CMN_article_parameters,
            output_folder_path=output_folder_path,
            output_file_name_root=CMN_RecommenderWrapper.RECOMMENDER_NAME)

    except Exception as e:

        print("On recommender {} Exception {}".format(CMN_RecommenderWrapper,
                                                      str(e)))
        traceback.print_exc()

    n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1)
    n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)

    print_time_statistics_latex_table(
        result_folder_path=output_folder_path,
        dataset_name=dataset_name,
        results_file_prefix_name=ALGORITHM_NAME,
        other_algorithm_list=[CMN_RecommenderWrapper],
        ICM_names_to_report_list=[],
        n_validation_users=n_validation_users,
        n_test_users=n_test_users,
        n_decimals=2)
    if not test_mode:
        print_results_latex_table(
            result_folder_path=output_folder_path,
            results_file_prefix_name=ALGORITHM_NAME,
            dataset_name=dataset_name,
            metrics_to_report_list=["HIT_RATE", "NDCG"],
            cutoffs_to_report_list=[5, 10],
            ICM_names_to_report_list=[],
            other_algorithm_list=[CMN_RecommenderWrapper])
    else:
        print_results_latex_table(
            result_folder_path=output_folder_path,
            results_file_prefix_name=ALGORITHM_NAME,
            dataset_name=dataset_name,
            metrics_to_report_list=["HIT_RATE", "NDCG"],
            cutoffs_to_report_list=[5],
            ICM_names_to_report_list=[],
            other_algorithm_list=[CMN_RecommenderWrapper])