def read_data_split_and_search(dataset_name, flag_baselines_tune=False, flag_DL_article_default=False, flag_DL_tune=False, flag_print_results=False): from Conferences.KDD.MCRec_our_interface.Movielens100K.Movielens100KReader import Movielens100KReader result_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) if dataset_name == "movielens100k": dataset = Movielens100KReader(result_folder_path) URM_train = dataset.URM_DICT["URM_train"].copy() URM_validation = dataset.URM_DICT["URM_validation"].copy() URM_test = dataset.URM_DICT["URM_test"].copy() URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy() # Ensure IMPLICIT data and DISJOINT sets assert_implicit_data( [URM_train, URM_validation, URM_test, URM_test_negative]) assert_disjoint_matrices( [URM_train, URM_validation, URM_test, URM_test_negative]) # If directory does not exist, create if not os.path.exists(result_folder_path): os.makedirs(result_folder_path) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["URM train", "URM test"], result_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([URM_train + URM_validation, URM_test], ["URM train", "URM test"], result_folder_path + algorithm_dataset_string + "popularity_statistics") from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10]) evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[10]) collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, PureSVDRecommender, NMFRecommender, IALSRecommender, MatrixFactorization_BPR_Cython, MatrixFactorization_FunkSVD_Cython, EASE_R_Recommender, SLIM_BPR_Cython, SLIMElasticNetRecommender, ] metric_to_optimize = "PRECISION" n_cases = 50 n_random_starts = 15 runParameterSearch_Collaborative_partial = partial( runParameterSearch_Collaborative, URM_train=URM_train, URM_train_last_test=URM_train + URM_validation, metric_to_optimize=metric_to_optimize, evaluator_validation_earlystopping=evaluator_validation, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=result_folder_path, parallelizeKNN=False, allow_weighting=True, resume_from_saved=True, n_cases=n_cases, n_random_starts=n_random_starts) if flag_baselines_tune: for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format( recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### Content Baselines for ICM_name, ICM_object in dataset.ICM_DICT.items(): try: runParameterSearch_Content( ItemKNNCBFRecommender, URM_train=URM_train, URM_train_last_test=URM_train + URM_validation, metric_to_optimize=metric_to_optimize, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=result_folder_path, parallelizeKNN=False, allow_weighting=True, resume_from_saved=True, ICM_name=ICM_name, ICM_object=ICM_object.copy(), n_cases=n_cases, n_random_starts=n_random_starts) except Exception as e: print("On CBF recommender for ICM {} Exception {}".format( ICM_name, str(e))) traceback.print_exc() ################################################################################################ ###### Hybrid for ICM_name, ICM_object in dataset.ICM_DICT.items(): try: runParameterSearch_Hybrid( ItemKNN_CFCBF_Hybrid_Recommender, URM_train=URM_train, URM_train_last_test=URM_train + URM_validation, metric_to_optimize=metric_to_optimize, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=result_folder_path, parallelizeKNN=False, allow_weighting=True, resume_from_saved=True, ICM_name=ICM_name, ICM_object=ICM_object.copy(), n_cases=n_cases, n_random_starts=n_random_starts) except Exception as e: print("On recommender {} Exception {}".format( ItemKNN_CFCBF_Hybrid_Recommender, str(e))) traceback.print_exc() ################################################################################################ ###### ###### DL ALGORITHM ###### if flag_DL_article_default: if dataset_name == "movielens100k": """ The code provided by the original authors of MCRec can be used only for the original data. Here I am passing to the Wrapper the URM_train matrix that is only required for its shape, the train will be done using the preprocessed data the original authors provided """ from Conferences.KDD.MCRec_github.code.Dataset import Dataset original_dataset_reader = Dataset( 'Conferences/KDD/MCRec_github/data/' + 'ml-100k') MCRec_article_hyperparameters = { "epochs": 200, "latent_dim": 128, "reg_latent": 0, "layers": [512, 256, 128, 64], "reg_layes": [0, 0, 0, 0], "learning_rate": 1e-3, "batch_size": 256, "num_negatives": 4, } MCRec_earlystopping_hyperparameters = { "validation_every_n": 5, "stop_on_validation": True, "evaluator_object": evaluator_validation, "lower_validations_allowed": 5, "validation_metric": metric_to_optimize } parameterSearch = SearchSingleCase( MCRecML100k_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_input_args = SearchInputRecommenderArgs( CONSTRUCTOR_POSITIONAL_ARGS=[ URM_train, original_dataset_reader ], FIT_KEYWORD_ARGS=MCRec_earlystopping_hyperparameters) recommender_input_args_last_test = recommender_input_args.copy() recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[ 0] = URM_train + URM_validation parameterSearch.search( recommender_input_args, recommender_input_args_last_test= recommender_input_args_last_test, fit_hyperparameters_values=MCRec_article_hyperparameters, output_folder_path=result_folder_path, resume_from_saved=True, output_file_name_root=MCRecML100k_RecommenderWrapper. RECOMMENDER_NAME) ################################################################################################ ###### ###### PRINT RESULTS ###### if flag_print_results: n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1) file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME, dataset_name) ICM_names_to_report_list = list(dataset.ICM_DICT.keys()) result_loader = ResultFolderLoader( result_folder_path, base_algorithm_list=None, other_algorithm_list=[MCRecML100k_RecommenderWrapper], KNN_similarity_list=KNN_similarity_to_report_list, ICM_names_list=ICM_names_to_report_list, UCM_names_list=None) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("article_metrics"), metrics_list=["PRECISION", "RECALL", "NDCG"], cutoffs_list=[10], table_title=None, highlight_best=True) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("all_metrics"), metrics_list=[ "PRECISION", "RECALL", "MAP", "MRR", "NDCG", "F1", "HIT_RATE", "ARHR", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY" ], cutoffs_list=[10], table_title=None, highlight_best=True) result_loader.generate_latex_time_statistics( file_name + "{}_latex_results.txt".format("time"), n_evaluation_users=n_test_users, table_title=None)
def read_data_split_and_search_CMN(dataset_name): from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader from Conferences.SIGIR.CMN_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader from Conferences.SIGIR.CMN_our_interface.Epinions.EpinionsReader import EpinionsReader if dataset_name == "citeulike": dataset = CiteULikeReader() elif dataset_name == "epinions": dataset = EpinionsReader() elif dataset_name == "pinterest": dataset = PinterestICCVReader() output_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) URM_train = dataset.URM_train.copy() URM_validation = dataset.URM_validation.copy() URM_test = dataset.URM_test.copy() URM_test_negative = dataset.URM_test_negative.copy() # If directory does not exist, create if not os.path.exists(output_folder_path): os.makedirs(output_folder_path) collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, ] metric_to_optimize = "HIT_RATE" # Ensure IMPLICIT data and DISJOINT sets assert_implicit_data([URM_train, URM_validation, URM_test, URM_test_negative]) if dataset_name == "citeulike": assert_disjoint_matrices([URM_train, URM_validation, URM_test]) assert_disjoint_matrices([URM_test, URM_test_negative]) elif dataset_name == "pinterest": assert_disjoint_matrices([URM_train, URM_validation, URM_test]) assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative]) else: assert_disjoint_matrices([URM_train, URM_validation, URM_test, URM_test_negative]) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_statistics") from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[5]) evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[5, 10]) runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative, URM_train = URM_train, metric_to_optimize = metric_to_optimize, evaluator_validation_earlystopping = evaluator_validation, evaluator_validation = evaluator_validation, evaluator_test = evaluator_test, output_folder_path = output_folder_path, parallelizeKNN = False, allow_weighting = True, n_cases = 35) # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1) # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list) # # pool.close() # pool.join() for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format(recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### CMN try: temp_file_folder = output_folder_path + "{}_log/".format(ALGORITHM_NAME) CMN_article_parameters = { "epochs": 100, "epochs_gmf": 100, "hops": 3, "neg_samples": 4, "reg_l2_cmn": 1e-1, "reg_l2_gmf": 1e-4, "pretrain": True, "learning_rate": 1e-3, "verbose": False, "temp_file_folder": temp_file_folder } if dataset_name == "citeulike": CMN_article_parameters["batch_size"] = 128 CMN_article_parameters["embed_size"] = 50 elif dataset_name == "epinions": CMN_article_parameters["batch_size"] = 128 CMN_article_parameters["embed_size"] = 40 elif dataset_name == "pinterest": CMN_article_parameters["batch_size"] = 256 CMN_article_parameters["embed_size"] = 50 CMN_earlystopping_parameters = { "validation_every_n": 5, "stop_on_validation": True, "evaluator_object": evaluator_validation, "lower_validations_allowed": 5, "validation_metric": metric_to_optimize } parameterSearch = SearchSingleCase(CMN_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_parameters = SearchInputRecommenderParameters( CONSTRUCTOR_POSITIONAL_ARGS = [URM_train], FIT_KEYWORD_ARGS = CMN_earlystopping_parameters) parameterSearch.search(recommender_parameters, fit_parameters_values=CMN_article_parameters, output_folder_path = output_folder_path, output_file_name_root = CMN_RecommenderWrapper.RECOMMENDER_NAME) except Exception as e: print("On recommender {} Exception {}".format(CMN_RecommenderWrapper, str(e))) traceback.print_exc() n_validation_users = np.sum(np.ediff1d(URM_validation.indptr)>=1) n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1) print_time_statistics_latex_table(result_folder_path = output_folder_path, dataset_name = dataset_name, results_file_prefix_name = ALGORITHM_NAME, other_algorithm_list = [CMN_RecommenderWrapper], ICM_names_to_report_list = [], n_validation_users = n_validation_users, n_test_users = n_test_users, n_decimals = 2) print_results_latex_table(result_folder_path = output_folder_path, results_file_prefix_name = ALGORITHM_NAME, dataset_name = dataset_name, metrics_to_report_list = ["HIT_RATE", "NDCG"], cutoffs_to_report_list = [5, 10], ICM_names_to_report_list = [], other_algorithm_list = [CMN_RecommenderWrapper])
def read_data_split_and_search_MCRec(dataset_name): from Conferences.KDD.MCRec_our_interface.Movielens100K.Movielens100KReader import Movielens100KReader from Conferences.KDD.MCRec_our_interface.LastFM.LastFMReader import LastFMReader from Conferences.KDD.MCRec_our_interface.Yelp.YelpReader import YelpReader if dataset_name == "movielens100k": dataset = Movielens100KReader() elif dataset_name == "yelp": dataset = YelpReader() elif dataset_name == "lastfm": dataset = LastFMReader() output_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) URM_train = dataset.URM_train.copy() URM_validation = dataset.URM_validation.copy() URM_test = dataset.URM_test.copy() URM_test_negative = dataset.URM_test_negative.copy() # Ensure IMPLICIT data assert_implicit_data( [URM_train, URM_validation, URM_test, URM_test_negative]) assert_disjoint_matrices( [URM_train, URM_validation, URM_test, URM_test_negative]) # If directory does not exist, create if not os.path.exists(output_folder_path): os.makedirs(output_folder_path) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_statistics") from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample if dataset_name == "movielens100k": URM_train += URM_validation evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10], exclude_seen=False) else: evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10]) evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[10]) collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, PureSVDRecommender ] metric_to_optimize = "PRECISION" runParameterSearch_Collaborative_partial = partial( runParameterSearch_Collaborative, URM_train=URM_train, metric_to_optimize=metric_to_optimize, evaluator_validation_earlystopping=evaluator_validation, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=output_folder_path, parallelizeKNN=False, n_cases=35) # pool = PoolWithSubprocess(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1) # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list) # pool.close() # pool.join() for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format( recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### Content Baselines ICM_dictionary = dataset.ICM_dict ICM_name_list = ICM_dictionary.keys() for ICM_name in ICM_name_list: try: ICM_object = ICM_dictionary[ICM_name] runParameterSearch_Content( ItemKNNCBFRecommender, URM_train=URM_train, metric_to_optimize=metric_to_optimize, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=output_folder_path, parallelizeKNN=False, ICM_name=ICM_name, ICM_object=ICM_object.copy(), n_cases=35) except Exception as e: print("On CBF recommender for ICM {} Exception {}".format( ICM_name, str(e))) traceback.print_exc() ################################################################################################ ###### Hybrid for ICM_name in ICM_name_list: try: ICM_object = ICM_dictionary[ICM_name] runParameterSearch_Hybrid( ItemKNN_CFCBF_Hybrid_Recommender, URM_train=URM_train, metric_to_optimize=metric_to_optimize, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=output_folder_path, parallelizeKNN=False, ICM_name=ICM_name, ICM_object=ICM_object, allow_weighting=True, n_cases=35) except Exception as e: print("On recommender {} Exception {}".format( ItemKNN_CFCBF_Hybrid_Recommender, str(e))) traceback.print_exc() ################################################################################################ ###### MCRec if dataset_name == "movielens100k": # Since I am using the original Data reader, the content of URM_validation are seen items, therefore I have to set another # evaluator which does not exclude them # evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10], exclude_seen=False) MCRec_article_parameters = { "epochs": 100, "latent_dim": 128, "reg_latent": 0, "layers": [512, 256, 128, 64], "reg_layes": [0, 0, 0, 0], "learning_rate": 1e-3, "batch_size": 256, "num_negatives": 4, } MCRec_earlystopping_parameters = { "validation_every_n": 5, "stop_on_validation": True, "evaluator_object": evaluator_validation, "lower_validations_allowed": 5, "validation_metric": metric_to_optimize } parameterSearch = SearchSingleCase( MCRecML100k_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_parameters = SearchInputRecommenderParameters( CONSTRUCTOR_POSITIONAL_ARGS=[URM_train], FIT_KEYWORD_ARGS=MCRec_earlystopping_parameters) parameterSearch.search( recommender_parameters, fit_parameters_values=MCRec_article_parameters, output_folder_path=output_folder_path, output_file_name_root=MCRecML100k_RecommenderWrapper. RECOMMENDER_NAME) n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1) n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1) ICM_names_to_report_list = ["ICM_genre"] print_time_statistics_latex_table( result_folder_path=output_folder_path, dataset_name=dataset_name, results_file_prefix_name=ALGORITHM_NAME, other_algorithm_list=[MCRecML100k_RecommenderWrapper], ICM_names_to_report_list=ICM_names_to_report_list, n_validation_users=n_validation_users, n_test_users=n_test_users, n_decimals=2) print_results_latex_table( result_folder_path=output_folder_path, results_file_prefix_name=ALGORITHM_NAME, dataset_name=dataset_name, metrics_to_report_list=["PRECISION", "RECALL", "NDCG"], cutoffs_to_report_list=[10], ICM_names_to_report_list=ICM_names_to_report_list, other_algorithm_list=[MCRecML100k_RecommenderWrapper])
def read_data_split_and_search_SpectralCF(dataset_name, cold_start=False, cold_items=None, isKNN_multiprocess=True, isKNN_tune=True, isSpectralCF_train_default=True, isSpectralCF_tune=True, print_results=True): if dataset_name == "movielens1m_original": assert(cold_start is not True) dataset = Movielens1MReader(type="original") elif dataset_name == "movielens1m_ours": dataset = Movielens1MReader(type="ours", cold_start=cold_start, cold_items=cold_items) elif dataset_name == "hetrec": assert (cold_start is not True) dataset = MovielensHetrec2011Reader() elif dataset_name == "amazon_instant_video": assert (cold_start is not True) dataset = AmazonInstantVideoReader() if not cold_start: output_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) else: output_folder_path = "result_experiments/{}/{}_cold_{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, cold_items, dataset_name) URM_train = dataset.URM_train.copy() URM_validation = dataset.URM_validation.copy() URM_test = dataset.URM_test.copy() # Ensure IMPLICIT data and DISJOINT sets assert_implicit_data([URM_train, URM_validation, URM_test]) assert_disjoint_matrices([URM_train, URM_validation, URM_test]) # If directory does not exist, create if not os.path.exists(output_folder_path): os.makedirs(output_folder_path) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_statistics") metric_to_optimize = "RECALL" from Base.Evaluation.Evaluator import EvaluatorHoldout if not cold_start: cutoff_list_validation = [50] cutoff_list_test = [20, 30, 40, 50, 60, 70, 80, 90, 100] else: cutoff_list_validation = [20] cutoff_list_test = [20] evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=cutoff_list_validation) evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=cutoff_list_test) ################################################################################################ ###### KNN CF if isKNN_tune: collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, ] runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative, URM_train = URM_train, metric_to_optimize = metric_to_optimize, evaluator_validation_earlystopping = evaluator_validation, evaluator_validation = evaluator_validation, evaluator_test = evaluator_test, output_folder_path = output_folder_path, parallelizeKNN = False, allow_weighting = True, n_cases = 35) if isKNN_multiprocess: pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1) resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list) pool.close() pool.join() else: for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format(recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### SpectralCF if isSpectralCF_train_default: try: spectralCF_article_parameters = { "epochs": 1000, "batch_size": 1024, "embedding_size": 16, "decay": 0.001, "k": 3, "learning_rate": 1e-3, } spectralCF_earlystopping_parameters = { "validation_every_n": 5, "stop_on_validation": True, "lower_validations_allowed": 20, "evaluator_object": evaluator_validation, "validation_metric": metric_to_optimize, "epochs_min": 400, } parameterSearch = SearchSingleCase(SpectralCF_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_parameters = SearchInputRecommenderParameters( CONSTRUCTOR_POSITIONAL_ARGS = [URM_train], FIT_KEYWORD_ARGS = spectralCF_earlystopping_parameters) parameterSearch.search(recommender_parameters, fit_parameters_values = spectralCF_article_parameters, output_folder_path = output_folder_path, output_file_name_root = SpectralCF_RecommenderWrapper.RECOMMENDER_NAME + "_article_default") except Exception as e: print("On recommender {} Exception {}".format(SpectralCF_RecommenderWrapper, str(e))) traceback.print_exc() elif isSpectralCF_tune: try: spectralCF_earlystopping_parameters = { "validation_every_n": 5, "stop_on_validation": True, "lower_validations_allowed": 20, "evaluator_object": evaluator_validation, "validation_metric": metric_to_optimize, "epochs_min": 400, "epochs": 2000 } runParameterSearch_SpectralCF(SpectralCF_RecommenderWrapper, URM_train = URM_train, earlystopping_parameters = spectralCF_earlystopping_parameters, metric_to_optimize = metric_to_optimize, evaluator_validation = evaluator_validation, evaluator_test = evaluator_test, output_folder_path = output_folder_path, n_cases = 35, output_file_name_root = SpectralCF_RecommenderWrapper.RECOMMENDER_NAME) except Exception as e: print("On recommender {} Exception {}".format(SpectralCF_RecommenderWrapper, str(e))) traceback.print_exc() ################################################################################################ ###### print results if print_results: n_validation_users = np.sum(np.ediff1d(URM_validation.indptr)>=1) n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1) if not cold_start: results_file_root_name = ALGORITHM_NAME else: results_file_root_name = "{}_cold_{}".format(ALGORITHM_NAME, cold_items) print_time_statistics_latex_table(result_folder_path = output_folder_path, dataset_name = dataset_name, results_file_prefix_name = results_file_root_name, other_algorithm_list = [SpectralCF_RecommenderWrapper], n_validation_users = n_validation_users, n_test_users = n_test_users, n_decimals = 2) if cold_start: cutoffs_to_report_list = [20] else: cutoffs_to_report_list = [20, 40, 60, 80, 100] print_results_latex_table(result_folder_path = output_folder_path, results_file_prefix_name = results_file_root_name, dataset_name = dataset_name, metrics_to_report_list = ["RECALL", "MAP"], cutoffs_to_report_list = cutoffs_to_report_list, other_algorithm_list = [SpectralCF_RecommenderWrapper])
def read_data_split_and_search(dataset_name, flag_baselines_tune=False, flag_DL_article_default=False, flag_DL_tune=False, flag_print_results=False): from Conferences.WWW.NeuMF_our_interface.Movielens1M.Movielens1MReader import Movielens1MReader from Conferences.WWW.NeuMF_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader result_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) if dataset_name == "movielens1m": dataset = Movielens1MReader(result_folder_path) elif dataset_name == "pinterest": dataset = PinterestICCVReader(result_folder_path) URM_train = dataset.URM_DICT["URM_train"].copy() URM_validation = dataset.URM_DICT["URM_validation"].copy() URM_test = dataset.URM_DICT["URM_test"].copy() URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy() # Ensure IMPLICIT data and DISJOINT sets assert_implicit_data( [URM_train, URM_validation, URM_test, URM_test_negative]) assert_disjoint_matrices([URM_train, URM_validation, URM_test]) assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative]) # If directory does not exist, create if not os.path.exists(result_folder_path): os.makedirs(result_folder_path) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["Training data", "Test data"], result_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([ URM_train + URM_validation + URM_test, URM_train + URM_validation, URM_test ], ["Full data", "Training data", "Test data"], result_folder_path + algorithm_dataset_string + "popularity_statistics") collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, PureSVDRecommender, NMFRecommender, IALSRecommender, MatrixFactorization_BPR_Cython, MatrixFactorization_FunkSVD_Cython, EASE_R_Recommender, SLIM_BPR_Cython, SLIMElasticNetRecommender, ] metric_to_optimize = "HIT_RATE" n_cases = 50 n_random_starts = 15 from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10]) evaluator_test = EvaluatorNegativeItemSample( URM_test, URM_test_negative, cutoff_list=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) runParameterSearch_Collaborative_partial = partial( runParameterSearch_Collaborative, URM_train=URM_train, URM_train_last_test=URM_train + URM_validation, metric_to_optimize=metric_to_optimize, evaluator_validation_earlystopping=evaluator_validation, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=result_folder_path, parallelizeKNN=False, allow_weighting=True, resume_from_saved=True, n_cases=n_cases, n_random_starts=n_random_starts) if flag_baselines_tune: for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format( recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### ###### DL ALGORITHM ###### if flag_DL_article_default: try: if dataset_name == "movielens1m": num_factors = 64 elif dataset_name == "pinterest": num_factors = 16 neuMF_article_hyperparameters = { "epochs": 100, "epochs_gmf": 100, "epochs_mlp": 100, "batch_size": 256, "num_factors": num_factors, "layers": [num_factors * 4, num_factors * 2, num_factors], "reg_mf": 0.0, "reg_layers": [0, 0, 0], "num_negatives": 4, "learning_rate": 1e-3, "learning_rate_pretrain": 1e-3, "learner": "sgd", "learner_pretrain": "adam", "pretrain": True } neuMF_earlystopping_hyperparameters = { "validation_every_n": 5, "stop_on_validation": True, "evaluator_object": evaluator_validation, "lower_validations_allowed": 5, "validation_metric": metric_to_optimize } parameterSearch = SearchSingleCase( NeuMF_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_input_args = SearchInputRecommenderArgs( CONSTRUCTOR_POSITIONAL_ARGS=[URM_train], FIT_KEYWORD_ARGS=neuMF_earlystopping_hyperparameters) recommender_input_args_last_test = recommender_input_args.copy() recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[ 0] = URM_train + URM_validation parameterSearch.search( recommender_input_args, recommender_input_args_last_test= recommender_input_args_last_test, fit_hyperparameters_values=neuMF_article_hyperparameters, output_folder_path=result_folder_path, resume_from_saved=True, output_file_name_root=NeuMF_RecommenderWrapper.RECOMMENDER_NAME ) except Exception as e: print("On recommender {} Exception {}".format( NeuMF_RecommenderWrapper, str(e))) traceback.print_exc() ################################################################################################ ###### ###### PRINT RESULTS ###### if flag_print_results: n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1) file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME, dataset_name) result_loader = ResultFolderLoader( result_folder_path, base_algorithm_list=None, other_algorithm_list=[NeuMF_RecommenderWrapper], KNN_similarity_list=KNN_similarity_to_report_list, ICM_names_list=None, UCM_names_list=None) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("article_metrics"), metrics_list=["HIT_RATE", "NDCG"], cutoffs_list=[1, 5, 10], table_title=None, highlight_best=True) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("all_metrics"), metrics_list=[ "PRECISION", "RECALL", "MAP", "MRR", "NDCG", "F1", "HIT_RATE", "ARHR", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY" ], cutoffs_list=[10], table_title=None, highlight_best=True) result_loader.generate_latex_time_statistics( file_name + "{}_latex_results.txt".format("time"), n_evaluation_users=n_test_users, table_title=None)
def read_data_split_and_search(dataset_name, flag_baselines_tune=False, flag_DL_article_default=False, flag_DL_tune=False, flag_print_results=False): result_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) if not os.path.exists(result_folder_path): os.makedirs(result_folder_path) # Ensure both experiments use the same data dataset_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name.replace("_remove_cold_items", "")) if not os.path.exists(dataset_folder_path): os.makedirs(dataset_folder_path) if 'amazon_music' in dataset_name: dataset = AmazonMusicReader(dataset_folder_path) elif 'movielens1m_ours' in dataset_name: dataset = Movielens1MReader(dataset_folder_path, type="ours") elif 'movielens1m_original' in dataset_name: dataset = Movielens1MReader(dataset_folder_path, type="original") else: print("Dataset name not supported, current is {}".format(dataset_name)) return print('Current dataset is: {}'.format(dataset_name)) URM_train = dataset.URM_DICT["URM_train"].copy() URM_validation = dataset.URM_DICT["URM_validation"].copy() URM_test = dataset.URM_DICT["URM_test"].copy() URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy() # Ensure IMPLICI data and DISJOINT matrices assert_implicit_data( [URM_train, URM_validation, URM_test, URM_test_negative]) assert_disjoint_matrices( [URM_train, URM_validation, URM_test, URM_test_negative]) cold_items_statistics(URM_train, URM_validation, URM_test, URM_test_negative) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["Training data", "Test data"], result_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([ URM_train + URM_validation + URM_test, URM_train + URM_validation, URM_test ], ["Full data", "Training data", "Test data"], result_folder_path + algorithm_dataset_string + "popularity_statistics") collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, PureSVDRecommender, NMFRecommender, IALSRecommender, MatrixFactorization_BPR_Cython, MatrixFactorization_FunkSVD_Cython, EASE_R_Recommender, SLIM_BPR_Cython, SLIMElasticNetRecommender, ] metric_to_optimize = "NDCG" n_cases = 50 n_random_starts = 15 cutoff_list_validation = [10] cutoff_list_test = [5, 10, 20] if "_remove_cold_items" in dataset_name: ignore_items_validation = get_cold_items(URM_train) ignore_items_test = get_cold_items(URM_train + URM_validation) else: ignore_items_validation = None ignore_items_test = None evaluator_validation = EvaluatorNegativeItemSample( URM_validation, URM_test_negative, cutoff_list=cutoff_list_validation, ignore_items=ignore_items_validation) evaluator_test = EvaluatorNegativeItemSample( URM_test, URM_test_negative, cutoff_list=cutoff_list_test, ignore_items=ignore_items_test) # The Evaluator automatically skips users with no test interactions # in this case we need the evaluation done with and without cold items to be comparable # So we ensure the users that are included in the evaluation are the same in both cases. evaluator_validation.users_to_evaluate = np.arange(URM_train.shape[0]) evaluator_test.users_to_evaluate = np.arange(URM_train.shape[0]) runParameterSearch_Collaborative_partial = partial( runParameterSearch_Collaborative, URM_train=URM_train, URM_train_last_test=URM_train + URM_validation, metric_to_optimize=metric_to_optimize, evaluator_validation_earlystopping=evaluator_validation, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=result_folder_path, parallelizeKNN=False, allow_weighting=True, resume_from_saved=True, n_cases=n_cases, n_random_starts=n_random_starts) if flag_baselines_tune: for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format( recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### ###### DL ALGORITHM ###### if flag_DL_article_default: earlystopping_hyperparameters = { 'validation_every_n': 5, 'stop_on_validation': True, 'lower_validations_allowed': 5, 'evaluator_object': evaluator_validation, 'validation_metric': metric_to_optimize, } num_factors = 64 article_hyperparameters = { 'epochs': 500, 'learning_rate': 0.001, 'batch_size': 256, 'num_negatives': 4, 'layers': (num_factors * 4, num_factors * 2, num_factors), 'regularization_layers': (0, 0, 0), 'learner': 'adam', 'verbose': False, } parameterSearch = SearchSingleCase( DELF_MLP_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_input_args = SearchInputRecommenderArgs( CONSTRUCTOR_POSITIONAL_ARGS=[URM_train], FIT_KEYWORD_ARGS=earlystopping_hyperparameters) recommender_input_args_last_test = recommender_input_args.copy() recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[ 0] = URM_train + URM_validation parameterSearch.search( recommender_input_args, recommender_input_args_last_test=recommender_input_args_last_test, fit_hyperparameters_values=article_hyperparameters, output_folder_path=result_folder_path, resume_from_saved=True, output_file_name_root=DELF_MLP_RecommenderWrapper.RECOMMENDER_NAME) parameterSearch = SearchSingleCase( DELF_EF_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_input_args = SearchInputRecommenderArgs( CONSTRUCTOR_POSITIONAL_ARGS=[URM_train], FIT_KEYWORD_ARGS=earlystopping_hyperparameters) recommender_input_args_last_test = recommender_input_args.copy() recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[ 0] = URM_train + URM_validation parameterSearch.search( recommender_input_args, recommender_input_args_last_test=recommender_input_args_last_test, fit_hyperparameters_values=article_hyperparameters, output_folder_path=result_folder_path, resume_from_saved=True, output_file_name_root=DELF_EF_RecommenderWrapper.RECOMMENDER_NAME) ################################################################################################ ###### ###### PRINT RESULTS ###### if flag_print_results: n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1) file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME, dataset_name) result_loader = ResultFolderLoader( result_folder_path, base_algorithm_list=None, other_algorithm_list=[ DELF_MLP_RecommenderWrapper, DELF_EF_RecommenderWrapper ], KNN_similarity_list=KNN_similarity_to_report_list, ICM_names_list=None, UCM_names_list=None) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("article_metrics"), metrics_list=["HIT_RATE", "NDCG"], cutoffs_list=cutoff_list_test, table_title=None, highlight_best=True) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("all_metrics"), metrics_list=[ "PRECISION", "RECALL", "MAP_MIN_DEN", "MRR", "NDCG", "F1", "HIT_RATE", "ARHR_ALL_HITS", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY" ], cutoffs_list=[10], table_title=None, highlight_best=True) result_loader.generate_latex_time_statistics( file_name + "{}_latex_results.txt".format("time"), n_evaluation_users=n_test_users, table_title=None)
def read_data_split_and_search(dataset_name, cold_start=False, cold_items=None, flag_baselines_tune=False, flag_DL_article_default=False, flag_DL_tune=False, flag_print_results=False): if not cold_start: result_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) else: result_folder_path = "result_experiments/{}/{}_cold_{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, cold_items, dataset_name) if dataset_name == "movielens1m_original": assert (cold_start is not True) dataset = Movielens1MReader(result_folder_path, type="original") elif dataset_name == "movielens1m_ours": dataset = Movielens1MReader(result_folder_path, type="ours", cold_start=cold_start, cold_items=cold_items) elif dataset_name == "hetrec": assert (cold_start is not True) dataset = MovielensHetrec2011Reader(result_folder_path) elif dataset_name == "amazon_instant_video": assert (cold_start is not True) dataset = AmazonInstantVideoReader(result_folder_path) URM_train = dataset.URM_DICT["URM_train"].copy() URM_validation = dataset.URM_DICT["URM_validation"].copy() URM_test = dataset.URM_DICT["URM_test"].copy() # Ensure IMPLICIT data and DISJOINT sets assert_implicit_data([URM_train, URM_validation, URM_test]) assert_disjoint_matrices([URM_train, URM_validation, URM_test]) # If directory does not exist, create if not os.path.exists(result_folder_path): os.makedirs(result_folder_path) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["Train data", "Test data"], result_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([ URM_train + URM_validation + URM_test, URM_train + URM_validation, URM_test ], ["URM_all", "URM train", "URM test"], result_folder_path + algorithm_dataset_string + "popularity_statistics") metric_to_optimize = "RECALL" n_cases = 50 n_random_starts = 15 from Base.Evaluation.Evaluator import EvaluatorHoldout if not cold_start: cutoff_list_validation = [50] cutoff_list_test = [20, 30, 40, 50, 60, 70, 80, 90, 100] else: cutoff_list_validation = [20] cutoff_list_test = [20] evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=cutoff_list_validation) evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=cutoff_list_test) ################################################################################################ ###### KNN CF collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, PureSVDRecommender, NMFRecommender, IALSRecommender, MatrixFactorization_BPR_Cython, MatrixFactorization_FunkSVD_Cython, EASE_R_Recommender, SLIM_BPR_Cython, SLIMElasticNetRecommender, ] runParameterSearch_Collaborative_partial = partial( runParameterSearch_Collaborative, URM_train=URM_train, URM_train_last_test=URM_train + URM_validation, metric_to_optimize=metric_to_optimize, evaluator_validation_earlystopping=evaluator_validation, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=result_folder_path, parallelizeKNN=False, allow_weighting=True, resume_from_saved=True, n_cases=n_cases, n_random_starts=n_random_starts) if flag_baselines_tune: for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format( recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### ###### DL ALGORITHM ###### if flag_DL_article_default: try: spectralCF_article_hyperparameters = { "epochs": 1000, "batch_size": 1024, "embedding_size": 16, "decay": 0.001, "k": 3, "learning_rate": 1e-3, } spectralCF_earlystopping_hyperparameters = { "validation_every_n": 5, "stop_on_validation": True, "lower_validations_allowed": 5, "evaluator_object": evaluator_validation, "validation_metric": metric_to_optimize, "epochs_min": 400, } parameterSearch = SearchSingleCase( SpectralCF_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_input_args = SearchInputRecommenderArgs( CONSTRUCTOR_POSITIONAL_ARGS=[URM_train], FIT_KEYWORD_ARGS=spectralCF_earlystopping_hyperparameters) recommender_input_args_last_test = recommender_input_args.copy() recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[ 0] = URM_train + URM_validation parameterSearch.search( recommender_input_args, recommender_input_args_last_test= recommender_input_args_last_test, fit_hyperparameters_values=spectralCF_article_hyperparameters, output_folder_path=result_folder_path, resume_from_saved=True, output_file_name_root=SpectralCF_RecommenderWrapper. RECOMMENDER_NAME + "_article_default") except Exception as e: print("On recommender {} Exception {}".format( SpectralCF_RecommenderWrapper, str(e))) traceback.print_exc() if flag_DL_tune: try: spectralCF_earlystopping_hyperparameters = { "validation_every_n": 5, "stop_on_validation": True, "lower_validations_allowed": 5, "evaluator_object": evaluator_validation, "validation_metric": metric_to_optimize, "epochs_min": 400, "epochs": 2000 } runParameterSearch_SpectralCF( SpectralCF_RecommenderWrapper, URM_train=URM_train, URM_train_last_test=URM_train + URM_validation, earlystopping_hyperparameters= spectralCF_earlystopping_hyperparameters, metric_to_optimize=metric_to_optimize, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=result_folder_path, n_cases=n_cases, n_random_starts=n_random_starts, output_file_name_root=SpectralCF_RecommenderWrapper. RECOMMENDER_NAME) except Exception as e: print("On recommender {} Exception {}".format( SpectralCF_RecommenderWrapper, str(e))) traceback.print_exc() ################################################################################################ ###### ###### PRINT RESULTS ###### if flag_print_results: n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1) file_name = "{}..//{}_{}_".format( result_folder_path, ALGORITHM_NAME if not cold_start else "{}_cold_{}".format( ALGORITHM_NAME, cold_items), dataset_name) if cold_start: cutoffs_to_report_list = [20] else: cutoffs_to_report_list = [20, 40, 60, 80, 100] result_loader = ResultFolderLoader( result_folder_path, base_algorithm_list=None, other_algorithm_list=other_algorithm_list, KNN_similarity_list=KNN_similarity_to_report_list, ICM_names_list=None, UCM_names_list=None) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("article_metrics"), metrics_list=["RECALL", "MAP"], cutoffs_list=cutoffs_to_report_list, table_title=None, highlight_best=True) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("beyond_accuracy_metrics"), metrics_list=[ "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY" ], cutoffs_list=[50], table_title=None, highlight_best=True) result_loader.generate_latex_results( file_name + "{}_latex_results.txt".format("all_metrics"), metrics_list=[ "PRECISION", "RECALL", "MAP", "MRR", "NDCG", "F1", "HIT_RATE", "ARHR", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY" ], cutoffs_list=[50], table_title=None, highlight_best=True) result_loader.generate_latex_time_statistics( file_name + "{}_latex_results.txt".format("time"), n_evaluation_users=n_test_users, table_title=None)
def read_data_split_and_search_NeuCF(dataset_name): from Conferences.WWW.NeuMF_our_interface.Movielens1M.Movielens1MReader import Movielens1MReader from Conferences.WWW.NeuMF_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader if dataset_name == "movielens1m": dataset = Movielens1MReader() elif dataset_name == "pinterest": dataset = PinterestICCVReader() output_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) URM_train = dataset.URM_train.copy() URM_validation = dataset.URM_validation.copy() URM_test = dataset.URM_test.copy() URM_test_negative = dataset.URM_test_negative.copy() # Ensure IMPLICIT data and DISJOINT sets assert_implicit_data( [URM_train, URM_validation, URM_test, URM_test_negative]) assert_disjoint_matrices([URM_train, URM_validation, URM_test]) assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative]) # If directory does not exist, create if not os.path.exists(output_folder_path): os.makedirs(output_folder_path) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_statistics") collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, SLIMElasticNetRecommender ] metric_to_optimize = "HIT_RATE" from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[10]) evaluator_test = EvaluatorNegativeItemSample( URM_test, URM_test_negative, cutoff_list=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) runParameterSearch_Collaborative_partial = partial( runParameterSearch_Collaborative, URM_train=URM_train, metric_to_optimize=metric_to_optimize, evaluator_validation_earlystopping=evaluator_validation, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=output_folder_path, parallelizeKNN=False, allow_weighting=True, n_cases=35) # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1) # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list) # # pool.close() # pool.join() for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format( recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### NeuMF try: if dataset_name == "movielens1m": num_factors = 64 elif dataset_name == "pinterest": num_factors = 16 neuMF_article_parameters = { "epochs": 100, "epochs_gmf": 100, "epochs_mlp": 100, "batch_size": 256, "num_factors": num_factors, "layers": [num_factors * 4, num_factors * 2, num_factors], "reg_mf": 0.0, "reg_layers": [0, 0, 0], "num_negatives": 4, "learning_rate": 1e-3, "learning_rate_pretrain": 1e-3, "learner": "sgd", "learner_pretrain": "adam", "pretrain": True } neuMF_earlystopping_parameters = { "validation_every_n": 5, "stop_on_validation": True, "evaluator_object": evaluator_validation, "lower_validations_allowed": 5, "validation_metric": metric_to_optimize } parameterSearch = SearchSingleCase( NeuMF_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_parameters = SearchInputRecommenderParameters( CONSTRUCTOR_POSITIONAL_ARGS=[URM_train], FIT_KEYWORD_ARGS=neuMF_earlystopping_parameters) parameterSearch.search( recommender_parameters, fit_parameters_values=neuMF_article_parameters, output_folder_path=output_folder_path, output_file_name_root=NeuMF_RecommenderWrapper.RECOMMENDER_NAME) except Exception as e: print("On recommender {} Exception {}".format(NeuMF_RecommenderWrapper, str(e))) traceback.print_exc() n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1) n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1) print_time_statistics_latex_table( result_folder_path=output_folder_path, dataset_name=dataset_name, results_file_prefix_name=ALGORITHM_NAME, other_algorithm_list=[NeuMF_RecommenderWrapper], n_validation_users=n_validation_users, n_test_users=n_test_users, n_decimals=2) print_results_latex_table(result_folder_path=output_folder_path, results_file_prefix_name=ALGORITHM_NAME, dataset_name=dataset_name, metrics_to_report_list=["HIT_RATE", "NDCG"], cutoffs_to_report_list=[1, 5, 10], other_algorithm_list=[NeuMF_RecommenderWrapper])
def read_data_split_and_search(dataset_name, flag_baselines_tune = False, flag_DL_article_default = False, flag_DL_tune = False, flag_print_results = False): result_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) if dataset_name == 'amazon_music_original': dataset = AmazonMusicReader(result_folder_path, original = True) elif dataset_name == 'amazon_music_ours': dataset = AmazonMusicReader(result_folder_path, original = False) elif dataset_name == 'amazon_movie': dataset = AmazonMovieReader(result_folder_path) elif dataset_name == 'movielens100k': dataset = Movielens100KReader(result_folder_path) elif dataset_name == 'movielens1m': dataset = Movielens1MReader(result_folder_path) else: print("Dataset name not supported, current is {}".format(dataset_name)) return print ('Current dataset is: {}'.format(dataset_name)) URM_train = dataset.URM_DICT["URM_train"].copy() URM_validation = dataset.URM_DICT["URM_validation"].copy() URM_test = dataset.URM_DICT["URM_test"].copy() URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy() # Ensure DISJOINT sets. Do not ensure IMPLICIT data because the algorithm needs explicit data assert_disjoint_matrices([URM_train, URM_validation, URM_test, URM_test_negative]) cold_items_statistics(URM_train, URM_validation, URM_test, URM_test_negative) # If directory does not exist, create if not os.path.exists(result_folder_path): os.makedirs(result_folder_path) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["Training data", "Test data"], result_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([URM_train + URM_validation + URM_test, URM_train + URM_validation, URM_test], ["Full data", "Training data", "Test data"], result_folder_path + algorithm_dataset_string + "popularity_statistics") collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, PureSVDRecommender, NMFRecommender, IALSRecommender, MatrixFactorization_BPR_Cython, MatrixFactorization_FunkSVD_Cython, EASE_R_Recommender, SLIM_BPR_Cython, SLIMElasticNetRecommender, ] metric_to_optimize = "NDCG" n_cases = 50 n_random_starts = 15 cutoff_list_validation = [10] cutoff_list_test = [5, 10, 20] evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=cutoff_list_validation) evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=cutoff_list_test) runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative, URM_train = URM_train, URM_train_last_test = URM_train + URM_validation, metric_to_optimize = metric_to_optimize, evaluator_validation_earlystopping = evaluator_validation, evaluator_validation = evaluator_validation, evaluator_test = evaluator_test, output_folder_path = result_folder_path, parallelizeKNN = False, allow_weighting = True, resume_from_saved = True, n_cases = n_cases, n_random_starts = n_random_starts) if flag_baselines_tune: for recommender_class in collaborative_algorithm_list: try: runParameterSearch_Collaborative_partial(recommender_class) except Exception as e: print("On recommender {} Exception {}".format(recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### ###### DL ALGORITHM ###### """ NOTICE: We did not upload the source code of DMF as it was not publicly available and the original authors did not respond to our request to add it to this repository """ if flag_DL_article_default: if dataset_name in ['amazon_music_original', 'amazon_music_ours']: last_layer_size = 128 else: last_layer_size = 64 article_hyperparameters = {'epochs': 300, 'learning_rate': 0.0001, 'batch_size': 256, 'num_negatives': 7, # As reported in the "Detailed implementation" section of the original paper 'last_layer_size': last_layer_size, } earlystopping_hyperparameters = {'validation_every_n': 5, 'stop_on_validation': True, 'lower_validations_allowed': 5, 'evaluator_object': evaluator_validation, 'validation_metric': metric_to_optimize, } # # try: # # # parameterSearch = SearchSingleCase(DMF_NCE_RecommenderWrapper, # evaluator_validation=evaluator_validation, # evaluator_test=evaluator_test) # # recommender_input_args = SearchInputRecommenderArgs( # CONSTRUCTOR_POSITIONAL_ARGS = [URM_train], # FIT_KEYWORD_ARGS = earlystopping_hyperparameters) # # recommender_input_args_last_test = recommender_input_args.copy() # recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[0] = URM_train + URM_validation # # parameterSearch.search(recommender_input_args, # recommender_input_args_last_test = recommender_input_args_last_test, # fit_hyperparameters_values = article_hyperparameters, # output_folder_path = result_folder_path, # resume_from_saved = True, # output_file_name_root = DMF_NCE_RecommenderWrapper.RECOMMENDER_NAME) # # # # except Exception as e: # # print("On recommender {} Exception {}".format(DMF_NCE_RecommenderWrapper, str(e))) # traceback.print_exc() # # # # try: # # # parameterSearch = SearchSingleCase(DMF_BCE_RecommenderWrapper, # evaluator_validation=evaluator_validation, # evaluator_test=evaluator_test) # # recommender_input_args = SearchInputRecommenderArgs( # CONSTRUCTOR_POSITIONAL_ARGS = [URM_train], # FIT_KEYWORD_ARGS = earlystopping_hyperparameters) # # recommender_input_args_last_test = recommender_input_args.copy() # recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[0] = URM_train + URM_validation # # parameterSearch.search(recommender_input_args, # recommender_input_args_last_test = recommender_input_args_last_test, # fit_hyperparameters_values = article_hyperparameters, # output_folder_path = result_folder_path, # resume_from_saved = True, # output_file_name_root = DMF_BCE_RecommenderWrapper.RECOMMENDER_NAME) # # # except Exception as e: # # print("On recommender {} Exception {}".format(DMF_BCE_RecommenderWrapper, str(e))) # traceback.print_exc() ################################################################################################ ###### ###### PRINT RESULTS ###### if flag_print_results: n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1) file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME, dataset_name) result_loader = ResultFolderLoader(result_folder_path, base_algorithm_list = None, other_algorithm_list = [DMF_NCE_RecommenderWrapper, DMF_BCE_RecommenderWrapper], KNN_similarity_list = KNN_similarity_to_report_list, ICM_names_list = None, UCM_names_list = None) result_loader.generate_latex_results(file_name + "{}_latex_results.txt".format("article_metrics"), metrics_list = ["HIT_RATE", "NDCG"], cutoffs_list = cutoff_list_validation, table_title = None, highlight_best = True) result_loader.generate_latex_results(file_name + "{}_latex_results.txt".format("all_metrics"), metrics_list = ["PRECISION", "RECALL", "MAP_MIN_DEN", "MRR", "NDCG", "F1", "HIT_RATE", "ARHR_ALL_HITS", "NOVELTY", "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY"], cutoffs_list = [10], table_title = None, highlight_best = True) result_loader.generate_latex_time_statistics(file_name + "{}_latex_results.txt".format("time"), n_evaluation_users=n_test_users, table_title = None)
def read_data_split_and_search_CMN(dataset_name): from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader from Conferences.SIGIR.CMN_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader from Conferences.SIGIR.CMN_our_interface.Epinions.EpinionsReader import EpinionsReader if dataset_name == "citeulike": dataset = CiteULikeReader() elif dataset_name == "epinions": dataset = EpinionsReader() elif dataset_name == "pinterest": dataset = PinterestICCVReader() output_folder_path = "result_experiments/{}/{}_{}/".format( CONFERENCE_NAME, ALGORITHM_NAME, dataset_name) URM_train = dataset.URM_train.copy() URM_validation = dataset.URM_validation.copy() URM_test = dataset.URM_test.copy() URM_test_negative = dataset.URM_test_negative.copy() test_mode = False limit = False if limit: p = 700 URM_train = URM_train[:p, :] URM_validation = URM_validation[:p, :] URM_test = URM_test[:p, :] URM_test_negative = URM_test_negative[:p, :] ''' user: 3 is_relevant_current_cutoff: [ True True True False False] recommended_items_current_cutoff: [ 65 86 68 3671 1341] Warning! is_relevant_current_cutoff.sum()>1: 3 relevant_items: [65 68 81 86] relevant_items_rating: [1. 1. 1. 1.] items_to_compute: [ 42 62 65 68 81 86 148 218 559 662 776 792 1164 1341 1418 1491 1593 1603 1617 1697 2140 2251 2446 2517 2566 2643 2719 2769 2771 3081 3133 3161 3188 3268 3409 3666 3671 3845 3864 3897 3984 4272 4327 4329 4431 4519 4565 4568 4718 4812 4915 5096 5128 5137 5141 5184 5217 5241 5371 5394 5415 5492 5521 5775 5798 5830 5831 5931 6005 6281 6375 6558 6638 6644 6661 6705 6881 6898 6939 6970 7010 7018 7147 7224 7327 7404 7453 7466 7475 7561 7764 8064 8102 8222 8368 8530 8957 9101 9322 9368 9619 9782 9832] ''' print('USER 3') print('test ', URM_test[3]) print('train ', URM_train[3]) print('valid ', URM_validation[3]) print('neg ', URM_test_negative[3]) # Durante l'esecuzione era stato notato un HR>1. Il motivo e' che veniva calcolato sul validation set (che per ogni utente ha # piu' oggetti preferiti (non uno) # Alla fine l'HR sara' minore o uguale ad uno perche' e' calcolato sul test set. popularity = get_popularity(URM_train) min_value = np.min(popularity) max_value = np.max(popularity) gap = max_value - min_value popularity = (popularity - min_value) / gap print('Luciano > min:', min_value) print('Luciano > max:', max_value) print('Luciano > normalized popularity:', popularity) set_parameters(popularity=popularity, loss_alpha=200, loss_beta=0.02, loss_scale=1, loss_percentile=get_percentile(popularity, 45), metrics_alpha=100, metrics_beta=0.03, metrics_gamma=5, metrics_scale=1 / 15, metrics_percentile=0.45, new_loss=False) # If directory does not exist, create if not os.path.exists(output_folder_path): os.makedirs(output_folder_path) collaborative_algorithm_list = [ Random, TopPop, UserKNNCFRecommender, ItemKNNCFRecommender, P3alphaRecommender, RP3betaRecommender, ] # metric_to_optimize = "WEIGHTED_HIT_RATE" metric_to_optimize = "HIT_RATE" # metric_to_optimize = "CUSTOM_HIT_RATE" print('metric_to_optimize:', metric_to_optimize) # Ensure IMPLICIT data and DISJOINT sets assert_implicit_data( [URM_train, URM_validation, URM_test, URM_test_negative]) if dataset_name == "citeulike": assert_disjoint_matrices([URM_train, URM_validation, URM_test]) assert_disjoint_matrices([URM_test, URM_test_negative]) elif dataset_name == "pinterest": assert_disjoint_matrices([URM_train, URM_validation, URM_test]) assert_disjoint_matrices( [URM_train, URM_validation, URM_test_negative]) else: assert_disjoint_matrices( [URM_train, URM_validation, URM_test, URM_test_negative]) algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name) plot_popularity_bias([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_plot") save_popularity_statistics([URM_train + URM_validation, URM_test], ["URM train", "URM test"], output_folder_path + algorithm_dataset_string + "popularity_statistics") from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[5]) if not test_mode: evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[5, 10]) else: evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[5]) runParameterSearch_Collaborative_partial = partial( runParameterSearch_Collaborative, URM_train=URM_train, metric_to_optimize=metric_to_optimize, evaluator_validation_earlystopping=evaluator_validation, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test, output_folder_path=output_folder_path, parallelizeKNN=False, allow_weighting=True, n_cases=35) # pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1) # resultList = pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list) # # pool.close() # pool.join() for recommender_class in collaborative_algorithm_list: try: if not test_mode: runParameterSearch_Collaborative_partial(recommender_class) else: print('skipping', recommender_class) except Exception as e: print("On recommender {} Exception {}".format( recommender_class, str(e))) traceback.print_exc() ################################################################################################ ###### CMN ''' Parameters from original paper: { "batch_size": 128, "decay_rate": 0.9, "embed_size": 50, "filename": "data/pinterest.npz", "grad_clip": 5.0, "hops": 2, "item_count": "9916", "l2": 0.1, "learning_rate": 0.001, "logdir": "result/004/", "max_neighbors": 1586, "neg_count": 4, "optimizer": "rmsprop", "optimizer_params": "{'momentum': 0.9, 'decay': 0.9}", "pretrain": "pretrain/pinterest_e50.npz", "save_directory": "result/004/", "tol": 1e-05, "user_count": "55187" } ''' try: temp_file_folder = output_folder_path + "{}_log/".format( ALGORITHM_NAME) CMN_article_parameters = { "epochs": 100, "epochs_gmf": 100, "hops": 3, "neg_samples": 4, "reg_l2_cmn": 1e-1, "reg_l2_gmf": 1e-4, "pretrain": True, "learning_rate": 1e-3, "verbose": False, "temp_file_folder": temp_file_folder } if dataset_name == "citeulike": CMN_article_parameters["batch_size"] = 128 CMN_article_parameters["embed_size"] = 50 elif dataset_name == "epinions": CMN_article_parameters["batch_size"] = 128 CMN_article_parameters["embed_size"] = 40 elif dataset_name == "pinterest": CMN_article_parameters["batch_size"] = 128 # CMN_article_parameters["batch_size"] = 256 CMN_article_parameters["embed_size"] = 50 CMN_earlystopping_parameters = { "validation_every_n": 5, "stop_on_validation": True, "evaluator_object": evaluator_validation, "lower_validations_allowed": 5, "validation_metric": metric_to_optimize } parameterSearch = SearchSingleCase( CMN_RecommenderWrapper, evaluator_validation=evaluator_validation, evaluator_test=evaluator_test) recommender_parameters = SearchInputRecommenderParameters( CONSTRUCTOR_POSITIONAL_ARGS=[URM_train], FIT_KEYWORD_ARGS=CMN_earlystopping_parameters) parameterSearch.search( recommender_parameters, fit_parameters_values=CMN_article_parameters, output_folder_path=output_folder_path, output_file_name_root=CMN_RecommenderWrapper.RECOMMENDER_NAME) except Exception as e: print("On recommender {} Exception {}".format(CMN_RecommenderWrapper, str(e))) traceback.print_exc() n_validation_users = np.sum(np.ediff1d(URM_validation.indptr) >= 1) n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1) print_time_statistics_latex_table( result_folder_path=output_folder_path, dataset_name=dataset_name, results_file_prefix_name=ALGORITHM_NAME, other_algorithm_list=[CMN_RecommenderWrapper], ICM_names_to_report_list=[], n_validation_users=n_validation_users, n_test_users=n_test_users, n_decimals=2) if not test_mode: print_results_latex_table( result_folder_path=output_folder_path, results_file_prefix_name=ALGORITHM_NAME, dataset_name=dataset_name, metrics_to_report_list=["HIT_RATE", "NDCG"], cutoffs_to_report_list=[5, 10], ICM_names_to_report_list=[], other_algorithm_list=[CMN_RecommenderWrapper]) else: print_results_latex_table( result_folder_path=output_folder_path, results_file_prefix_name=ALGORITHM_NAME, dataset_name=dataset_name, metrics_to_report_list=["HIT_RATE", "NDCG"], cutoffs_to_report_list=[5], ICM_names_to_report_list=[], other_algorithm_list=[CMN_RecommenderWrapper])