Beispiel #1
0
    def fit(self,
            threshold_sim=0.5,
            threshold_count=5,
            topK=50,
            shrink=100,
            similarity='cosine',
            normalize=True,
            feature_weighting="none",
            **similarity_args):
        # Augmenting URM
        user_cf = UserKNNCFRecommender(self.URM_train)
        user_cf.fit(topK=topK,
                    shrink=shrink,
                    similarity=similarity,
                    normalize=normalize,
                    feature_weighting=feature_weighting,
                    **similarity_args)
        new_URM = VirtualNearestNeighbor.newAugmentUMR(
            URM_train=self.URM_train,
            W_sparse=user_cf.W_sparse,
            threshold_interactions=threshold_count,
            threshold_similarity=threshold_sim)

        new_user_cf = UserKNNCFRecommender(new_URM)
        new_user_cf.fit(topK=topK,
                        shrink=shrink,
                        similarity=similarity,
                        normalize=normalize,
                        feature_weighting=feature_weighting,
                        **similarity_args)
        self.W_sparse = new_user_cf.W_sparse
        self.URM_train = new_URM
def trial_boost_user(data):
    rec1 = UserKNNCFRecommender(data.urm_train)
    rec2 = NewUserKNNAgeRecommender(data)
    rec3 = UserKNNCFRecommender(data.urm_train)
    rec1.fit(topK=500, shrink=1000, similarity='cosine', feature_weighting='TF-IDF')
    rec2.set_similarity([1])
    rec2.fit(topK=0)

    for alpha in [0.999990, 0.999995, 0.999999]:
        print("alpha:" + str(alpha))
        rec3.W_sparse = alpha * rec1.W_sparse + (1 - alpha) * rec2.W_sparse
        total_evaluation(rec3)
    def __init__(self, urm_train):
        super(HybridNorm3Recommender, self).__init__(urm_train)

        urm_train = check_matrix(urm_train.copy(), 'csr')
        self.num_users = urm_train.shape[0]

        # recommender_1 = HybridGenRecommender(urm_train)
        # recommender_1.fit()

        recommender_1 = RP3betaRecommender(urm_train)
        recommender_1.fit(topK=16,
                          alpha=0.03374950051351756,
                          beta=0.24087176329409027,
                          normalize_similarity=True)

        recommender_3 = UserKNNCFRecommender(urm_train)
        recommender_3.fit(shrink=2, topK=600, normalize=True)

        recommender_2 = ItemKNNCFRecommender(urm_train)
        recommender_2.fit(topK=5,
                          shrink=500,
                          feature_weighting='BM25',
                          similarity='tversky',
                          normalize=False,
                          tversky_alpha=0.0,
                          tversky_beta=1.0)

        self.recommender_1 = recommender_1
        self.recommender_2 = recommender_2
        self.recommender_3 = recommender_3
    def __init__(self, urm_train, eurm=False):
        super(HybridNorm2Recommender, self).__init__(urm_train)
        self.data_folder = Path(__file__).parent.parent.absolute()
        self.eurm = eurm
        self.num_users = urm_train.shape[0]

        urm_train = check_matrix(urm_train.copy(), 'csr')

        recommender_2 = ItemKNNCFRecommender(urm_train)
        recommender_2.fit(topK=5, shrink=500, feature_weighting='BM25', similarity='tversky', normalize=False, tversky_alpha=0.0, tversky_beta=1.0)

        recommender_3 = UserKNNCFRecommender(urm_train)
        recommender_3.fit(shrink=2, topK=600, normalize=True)
        # recommender_3 = UserKNNCFRecommender(urm_train)
        # recommender_3.fit(topK=697, shrink=1000, feature_weighting='TF-IDF', similarity='tversky', normalize=False,
        #                   tversky_alpha=1.0, tversky_beta=1.0)

        recommender_4 = RP3betaRecommender(urm_train)
        recommender_4.fit(topK=16, alpha=0.03374950051351756, beta=0.24087176329409027, normalize_similarity=True)

        recommender_5 = SLIM_BPR_Cython(urm_train)
        recommender_5.fit(lambda_i=0.0926694015, lambda_j=0.001697250, learning_rate=0.002391, epochs=65, topK=200)

        recommender_6 = ALSRecommender(urm_train)
        recommender_6.fit(alpha=5, iterations=40, reg=0.3)


        self.recommender_2 = recommender_2
        self.recommender_3 = recommender_3
        self.recommender_4 = recommender_4
        self.recommender_5 = recommender_5
        self.recommender_6 = recommender_6

        if self.eurm:

            self.score_matrix_1 = sps.load_npz(self.data_folder / 'Data/icm_sparse.npz')
            self.score_matrix_2 = self.recommender_2._compute_item_matrix_score(np.arange(self.num_users))
            self.score_matrix_3 = self.recommender_3._compute_item_matrix_score(np.arange(self.num_users))
            self.score_matrix_4 = self.recommender_4._compute_item_matrix_score(np.arange(self.num_users))
            self.score_matrix_5 = self.recommender_5._compute_item_matrix_score(np.arange(self.num_users))
            self.score_matrix_6 = self.recommender_6._compute_item_score(np.arange(self.num_users))
            self.score_matrix_7 = sps.load_npz(self.data_folder / 'Data/ucm_sparse.npz')

            self.score_matrix_1 = normalize(self.score_matrix_1, norm='l2', axis=1)
            self.score_matrix_2 = normalize(self.score_matrix_2, norm='l2', axis=1)
            self.score_matrix_3 = normalize(self.score_matrix_3, norm='l2', axis=1)
            self.score_matrix_4 = normalize(self.score_matrix_4, norm='l2', axis=1)
            self.score_matrix_5 = normalize(self.score_matrix_5, norm='l2', axis=1)
            self.score_matrix_6 = normalize(self.score_matrix_6, norm='l2', axis=1)
            self.score_matrix_7 = normalize(self.score_matrix_7, norm='l2', axis=1)
Beispiel #5
0
 def __init__(self, data: DataObject):
     super(Hybrid102AlphaRecommender, self).__init__(data.urm_train)
     self.data = data
     self.rec1 = UserKNNCFRecommender(data.urm_train)
     self.rec1.fit(topK=1000, shrink=4500, similarity="cosine", feature_weighting="TF-IDF")
     self.rec2 = ItemKNNCFRecommender(data.urm_train)
     self.rec2.fit(topK=2000, shrink=800, similarity="cosine", feature_weighting="TF-IDF")
     self.rec3 = SLIM_BPR_Cython(data.urm_train)
     self.rec3.fit(epochs=120, topK=800, lambda_i=0.1, lambda_j=0.1, learning_rate=0.0001)
     self.rec4 = RP3betaRecommender(data.urm_train)
     self.rec4.fit(topK=30, alpha=0.21, beta=0.25)
     target_users = data.urm_train_users_by_type[2][1]
     self.target_users = target_users
     self.hybrid_rec = Hybrid1CXAlphaRecommender(data, recommenders=[self.rec1, self.rec2, self.rec3],
                                                 recommended_users=target_users, max_cutoff=30)
Beispiel #6
0
    def __init__(self, urm_train):
        super(HybridNorm1Recommender, self).__init__(urm_train)
        self.num_users = urm_train.shape[0]
        urm_train = check_matrix(urm_train.copy(), 'csr')

        recommender_1 = HybridGen2Recommender(urm_train)
        recommender_1.fit()

        recommender_2 = ItemKNNCFRecommender(urm_train)
        recommender_2.fit(topK=5,
                          shrink=500,
                          feature_weighting='BM25',
                          similarity='tversky',
                          normalize=False,
                          tversky_alpha=0.0,
                          tversky_beta=1.0)

        recommender_3 = UserKNNCFRecommender(urm_train)
        recommender_3.fit(topK=697,
                          shrink=1000,
                          feature_weighting='TF-IDF',
                          similarity='tversky',
                          normalize=False,
                          tversky_alpha=1.0,
                          tversky_beta=1.0)

        recommender_4 = RP3betaRecommender(urm_train)
        recommender_4.fit(topK=16,
                          alpha=0.03374950051351756,
                          beta=0.24087176329409027,
                          normalize_similarity=True)

        recommmender_5 = SLIM_BPR_Cython(urm_train)
        recommmender_5.fit(lambda_i=0.0926694015,
                           lambda_j=0.001697250,
                           learning_rate=0.002391,
                           epochs=65,
                           topK=200)

        recommender_6 = ALSRecommender(urm_train)
        recommender_6.fit(alpha=5, iterations=40, reg=0.3)

        self.recommender_1 = recommender_1
        self.recommender_2 = recommender_2
        self.recommender_3 = recommender_3
        self.recommender_4 = recommender_4
        self.recommender_5 = recommmender_5
        self.recommender_6 = recommender_6
Beispiel #7
0
    def eval(self, *args):
        # Input parameters
        topK = args[0][0]
        shrink = args[0][1]
        similarity = args[0][2]
        normalize = args[0][3]
        feature_weighting = args[0][4]

        # Text used for csv file
        input_as_string = f"topK={topK} - shrink={shrink} - similarity={similarity} - normalize={normalize} - " \
                          f"feature_weighting={feature_weighting}"
        recommender_name = "Item CF"

        # Creating the recommenders (parallel fit and evaluation)
        recs = [
            UserKNNCFRecommender(data.urm_train) for data in self.dataset_list
        ]
        pairs = zip(recs, self.dataset_list)
        results = Parallel(n_jobs=parallelism)(
            delayed(parallel_fit_and_eval_job)
            (rec, data, topK, shrink, similarity, normalize, feature_weighting)
            for rec, data in pairs)

        # Computing the average MAP
        map_per_type = np.array(results).mean(axis=0)

        # Storing the information on file
        f = open(self.filename_csv, "a+")
        map_as_string = " ".join([str(x) + "," for x in map_per_type])
        f.write(f"{recommender_name}, {input_as_string}, {map_as_string}\n")
        f.flush()
        f.close()

        # The MAP value that should be optimized
        optimized_map = map_per_type[self.type_of_user]

        # Printing stuffs
        current_time = time.time()
        print(
            f"run : {self.counter} - computed in {current_time - self.timer} seconds"
        )
        print(f"\tparameters : {input_as_string}")
        print(f"\tmap : {optimized_map}\n")
        self.counter += 1
        self.timer = current_time

        return -optimized_map
Beispiel #8
0
    def __init__(self,
                 URM_train,
                 ICM_train,
                 submission=False,
                 verbose=True,
                 seed=1205):
        super(LinearHybrid002, self).__init__(URM_train, verbose=verbose)
        self.URM_train = URM_train
        self.ICM_train = ICM_train

        # seed 1205: 'topK': 190, 'shrink': 0, 'similarity': 'cosine', 'normalize': True
        self.__rec1 = UserKNNCFRecommender(URM_train, verbose=False)
        self.__rec1_params = {
            'topK': 190,
            'shrink': 0,
            'similarity': 'cosine',
            'normalize': True
        }

        # seed 1205: 'topK': 100, 'shrink': 1000, 'similarity': 'asymmetric', 'normalize': True, 'asymmetric_alpha': 0.0
        self.__rec2 = ItemKNNCFRecommender(URM_train, verbose=False)
        self.__rec2_params = {
            'topK': 100,
            'shrink': 1000,
            'similarity': 'asymmetric',
            'normalize': True,
            'asymmetric_alpha': 0.0
        }

        # seed 1205: 'topK': 205, 'shrink': 1000, 'similarity': 'cosine', 'normalize': True, 'feature_weighting': 'BM25'
        self.__rec3 = ItemKNNCBFRecommender(URM_train,
                                            ICM_train,
                                            verbose=False)
        self.__rec3_params = {
            'topK': 205,
            'shrink': 1000,
            'similarity': 'cosine',
            'normalize': True,
            'feature_weighting': 'BM25'
        }

        self.__a = self.__b = self.__c = None
        self.seed = seed
        self.__submission = submission
    def __init__(self,
                 URM_train,
                 ICM_train,
                 submission=False,
                 verbose=True,
                 seed=1205):
        super(LinearHybrid008, self).__init__(URM_train, verbose=verbose)
        self.URM_train = URM_train
        self.ICM_train = ICM_train

        self.__rec1 = SSLIMElasticNet(URM_train, ICM_train, verbose=False)
        self.__rec1_params = {
            'beta': 0.4849594591575789,
            'topK': 1000,
            'l1_ratio': 1e-05,
            'alpha': 0.001
        }

        self.__rec2 = ItemKNNCBFRecommender(URM_train,
                                            ICM_train,
                                            verbose=False)
        self.__rec2_params = {
            'topK': 65,
            'shrink': 0,
            'similarity': 'dice',
            'normalize': True
        }

        self.__rec3 = UserKNNCFRecommender(URM_train, verbose=False)
        self.__rec3_params = {
            'topK': 190,
            'shrink': 0,
            'similarity': 'cosine',
            'normalize': True
        }

        self.__a = self.__b = self.__c = None
        self.seed = seed
        self.__submission = submission
Beispiel #10
0
    def __init__(self, URM_train, ICM_all, verbose=True):
        super(UserWiseHybrid001, self).__init__(URM_train, verbose=verbose)
        self.URM_train = URM_train
        self.ICM_all = ICM_all

        # range and recommender definition ---> element structure: ( (start, end), recommender, fit_args* )
        self.__recommender_segmentation = [
            ((0, 1), TopPop(URM_train), {}),
            ((1, 25), P3alphaRecommender(URM_train),
             {'topK': 729, 'alpha': 0.4104229220476686, 'normalize_similarity': False}),
            ((25, 50), RP3betaRecommender(URM_train),
             {'topK': 939, 'alpha': 0.6073516078011799, 'beta': 0.002238854541773972, 'normalize_similarity': False}),
            ((50, 100), UserKNNCFRecommender(URM_train),
             {'topK': 90, 'shrink': 77, 'similarity': 'cosine', 'normalize': True}),
            ((100, 200), RP3betaRecommender(URM_train),
             {'topK': 1000, 'alpha': 0.32110178834628456, 'beta': 0.0, 'normalize_similarity': True}),
            ((200, -1), SLIM_BPR_Cython(URM_train),
             {'topK': 120, 'epochs': 20, 'symmetric': True, 'sgd_mode': 'adam', 'lambda_i': 0.01, 'lambda_j': 1e-05, 'learning_rate': 0.0001}),
        ]

        self.__loaded = {}
        for f_range, _, _ in self.__recommender_segmentation:
            self.__loaded[f_range] = False
Beispiel #11
0
    URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage=0.90, seed=seed)

    evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])

    sslim = SSLIMElasticNet(URM_train, ICM_all, verbose=False)
    sslim_params = {'beta': 0.4849594591575789, 'topK': 1000, 'l1_ratio': 1e-05, 'alpha': 0.001}
    try:
        sslim.load_model(f'stored_recommenders/seed_1205_S-SLIMElasticNet/', 'for_notebook_analysis')
        print(f"{sslim.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {sslim.RECOMMENDER_NAME} ...")
        sslim.fit(**sslim_params)
        print(f"done.")
        sslim.save_model(f'stored_recommenders/seed_{str(seed)}_{sslim.RECOMMENDER_NAME}/', 'for_notebook_analysis')

    ucf = UserKNNCFRecommender(URM_train, verbose=False)
    ucf_params = {'topK': 190, 'shrink': 0, 'similarity': 'cosine', 'normalize': True}
    try:
        ucf.load_model(f'stored_recommenders/seed_{str(seed)}_{ucf.RECOMMENDER_NAME}/', 'for_notebook_analysis')
        print(f"{ucf.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {ucf.RECOMMENDER_NAME} ...")
        ucf.fit(**ucf_params)
        print(f"done.")
        ucf.save_model(f'stored_recommenders/seed_{str(seed)}_{ucf.RECOMMENDER_NAME}/', 'for_notebook_analysis')

    icb = ItemKNNCBFRecommender(URM_train, ICM_all, verbose=False)

    icb_params = {'topK': 65, 'shrink': 0, 'similarity': 'dice', 'normalize': True}
    try:
        icb.load_model(f'stored_recommenders/seed_{str(seed)}_{icb.RECOMMENDER_NAME}/', 'for_notebook_analysis')
Beispiel #12
0
    def fit(self,
            topK=None,
            shrink=None,
            weights=None,
            pop=None,
            weights1=None,
            weights2=None,
            weights3=None,
            weights4=None,
            weights5=None,
            weights6=None,
            weights7=None,
            weights8=None,
            pop1=None,
            pop2=None,
            similarity='cosine',
            normalize=True,
            final_weights=None,
            final_weights1=None,
            final_weights2=None,
            old_similarity_matrix=None,
            epochs=1,
            top1=None,
            shrink1=None,
            force_compute_sim=False,
            weights_to_dweights=-1,
            **similarity_args):

        if topK is None:  # IT MEANS THAT I'M TESTING ONE RECOMMENDER ON A SPECIIFC INTERVAL
            topK = [top1]
            shrink = [shrink1]

        if self.weights is None:
            if weights1 is not None:
                weights = [
                    weights1, weights2, weights3, weights4, weights5, weights6,
                    weights7, weights8
                ]
                weights = [x for x in weights if x is not None]
            self.weights = weights

        if self.pop is None:
            if pop is None:
                pop = [pop1, pop2]
                pop = [x for x in pop if x is not None]
            self.pop = pop

        if weights_to_dweights != -1:
            self.d_weights[weights_to_dweights] = self.weights

        assert self.weights is not None, "Weights Are None!"

        assert len(self.recommender_list) == len(
            self.weights
        ), "Weights: {} and recommender list: {} have different lenghts".format(
            len(self.weights), len(self.recommender_list))

        assert len(topK) == len(shrink) == len(self.recommender_list), "Knns, Shrinks and recommender list have " \
                                                                       "different lenghts "

        self.normalize = normalize
        self.topK = topK
        self.shrink = shrink

        self.gradients = [0] * self.recommender_number
        self.MAE = 0
        p3counter = 0
        rp3bcounter = 0
        slim_counter = 0
        factorCounter = 0

        for knn, shrink, recommender in zip(topK, shrink,
                                            self.recommender_list):
            if recommender.__class__ is SLIM_BPR_Cython:
                if "lambda_i" in list(similarity_args.keys()
                                      ):  # lambda i and j provided in args
                    if type(similarity_args["lambda_i"]) is not list:
                        similarity_args["lambda_i"] = [
                            similarity_args["lambda_i"]
                        ]
                        similarity_args["lambda_j"] = [
                            similarity_args["lambda_j"]
                        ]
                    recommender.fit(
                        old_similarity_matrix=old_similarity_matrix,
                        epochs=epochs,
                        force_compute_sim=force_compute_sim,
                        topK=knn,
                        lambda_i=similarity_args["lambda_i"][slim_counter],
                        lambda_j=similarity_args["lambda_j"][slim_counter])
                else:
                    recommender.fit(
                        old_similarity_matrix=old_similarity_matrix,
                        epochs=epochs,
                        force_compute_sim=force_compute_sim,
                        topK=knn)
                slim_counter += 1

            elif recommender.__class__ in [
                    MatrixFactorization_BPR_Cython,
                    MatrixFactorization_FunkSVD_Cython,
                    MatrixFactorization_AsySVD_Cython
            ]:
                recommender.fit(epochs=epochs,
                                force_compute_sim=force_compute_sim)

            elif recommender.__class__ in [SLIMElasticNetRecommender]:
                recommender.fit(topK=knn,
                                l1_ratio=similarity_args["l1_ratio"],
                                force_compute_sim=force_compute_sim)

            elif recommender.__class__ in [PureSVDRecommender]:
                recommender.fit(
                    num_factors=similarity_args["num_factors"][factorCounter],
                    force_compute_sim=force_compute_sim)
                factorCounter += 1

            elif recommender.__class__ in [P3alphaRecommender]:
                if type(similarity_args["alphaP3"]) is not list:
                    similarity_args["alphaP3"] = [similarity_args["alphaP3"]]
                recommender.fit(topK=knn,
                                alpha=similarity_args["alphaP3"][p3counter],
                                min_rating=0,
                                implicit=True,
                                normalize_similarity=True,
                                force_compute_sim=force_compute_sim)
                p3counter += 1

            elif recommender.__class__ in [RP3betaRecommender]:
                recommender.fit(alpha=similarity_args["alphaRP3"][rp3bcounter],
                                beta=similarity_args["betaRP"][rp3bcounter],
                                min_rating=0,
                                topK=knn,
                                implicit=True,
                                normalize_similarity=True,
                                force_compute_sim=force_compute_sim)
                rp3bcounter += 1

            elif recommender.__class__ in [ItemKNNCBFRecommender]:
                recommender.fit(knn,
                                shrink,
                                force_compute_sim=force_compute_sim,
                                feature_weighting_index=similarity_args[
                                    "feature_weighting_index"])

            else:  # ItemCF, UserCF, ItemCBF, UserCBF
                recommender.fit(knn,
                                shrink,
                                force_compute_sim=force_compute_sim)

        print("Recommender list before: {}".format(self.recommender_list))
        self.W_sparse20 = csr_matrix(([], ([], [])), shape=(20635, 20635))
        self.W_sparse50 = csr_matrix(([], ([], [])), shape=(50446, 50446))
        to_delete = []
        for index, recommender in enumerate(self.recommender_list):
            try:
                self.W_sparse20 += self.weights[index] * recommender.W_sparse
                to_delete.append(recommender)
                print("Recommender {} is summed in W_sparse20".format(
                    recommender))
                continue
            except:
                # the recommender doesn't have a W sparse matrix of that shape
                a = 1

            try:
                self.W_sparse50 += self.weights[index] * recommender.W_sparse
                to_delete.append(recommender)
                print("Recommender {} is summed in W_sparse50".format(
                    recommender))
                continue
            except:
                # the recommender doesn't have a W sparse matrix of that shape
                a = 1

        # remove recommenders that already has the similarity merged
        self.recommender_list = [
            x for x in self.recommender_list if x not in to_delete
        ]

        print("Recommender list after: {}".format(self.recommender_list))
        new_item_recommender = ItemKNNCFRecommender(self.URM_train)
        new_item_recommender.W_sparse = self.W_sparse20
        self.recommender_list.append(new_item_recommender)

        new_user_recommender = UserKNNCFRecommender(self.URM_train)
        new_user_recommender.W_sparse = self.W_sparse50
        self.recommender_list.append(new_user_recommender)
        print("Recommender list final: {}".format(self.recommender_list))

        if final_weights is None:
            self.final_weights = [final_weights1, final_weights2]
        else:
            self.final_weights = final_weights

        assert len(final_weights) == len(
            self.recommender_list
        ), "Lunghezza di final weights e dei reccomender rimasti è diversa. Sono rimasti {} recommender con i final weight di lunghezza {}".format(
            len(self.recommender_list), len(final_weights))
def read_data_split_and_search():
    """
    This function provides a simple example on how to tune parameters of a given algorithm

    The BayesianSearch object will save:
        - A .txt file with all the cases explored and the recommendation quality
        - A _best_model file which contains the trained model and can be loaded with recommender.load_model()
        - A _best_parameter file which contains a dictionary with all the fit parameters, it can be passed to recommender.fit(**_best_parameter)
        - A _best_result_validation file which contains a dictionary with the results of the best solution on the validation
        - A _best_result_test file which contains a dictionary with the results, on the test set, of the best solution chosen using the validation set
    """

    seed = 1205
    parser = DataParser()

    URM_all = parser.get_URM_all()
    ICM_obj = parser.get_ICM_all()

    # SPLIT TO GET TEST PARTITION
    URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage=0.90, seed=seed)

    # SPLIT TO GET THE HYBRID VALID PARTITION
    URM_train, URM_valid_hybrid = split_train_in_two_percentage_global_sample(URM_train, train_percentage=0.85,
                                                                              seed=seed)

    collaborative_algorithm_list = [
        # EASE_R_Recommender
        # PipeHybrid001,
        # Random,
        # TopPop,
        # P3alphaRecommender,
        # RP3betaRecommender,
        # ItemKNNCFRecommender,
        # UserKNNCFRecommender,
        # MatrixFactorization_BPR_Cython,
        # MatrixFactorization_FunkSVD_Cython,
        # PureSVDRecommender,
        # NMFRecommender,
        # PureSVDItemRecommender
        # SLIM_BPR_Cython,
        # SLIMElasticNetRecommender
        # IALSRecommender
        # MF_MSE_PyTorch
        # MergedHybrid000
        # LinearHybrid002ggg
        HybridCombinationSearch
    ]

    content_algorithm_list = [
        # ItemKNNCBFRecommender
    ]

    from Base.Evaluation.Evaluator import EvaluatorHoldout

    evaluator_valid_hybrid = EvaluatorHoldout(URM_valid_hybrid, cutoff_list=[10])
    evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])

    """
    earlystopping_keywargs = {"validation_every_n": 5,
                              "stop_on_validation": True,
                              "evaluator_object": evaluator_valid_hybrid,
                              "lower_validations_allowed": 5,
                              "validation_metric": 'MAP',
                              }

    print('IALS training...')
    ials = IALSRecommender(URM_train, verbose=False)
    ials_params = {'num_factors': 83, 'confidence_scaling': 'linear', 'alpha': 28.4278070726612,
                   'epsilon': 1.0234211788885077, 'reg': 0.0027328110246575004, 'epochs': 20}
    ials.fit(**ials_params, **earlystopping_keywargs)
    print("Done")


    print("PureSVD training...")
    psvd = PureSVDRecommender(URM_train, verbose=False)
    psvd_params = {'num_factors': 711}
    psvd.fit(**psvd_params)
    print("Done")
    """

    rp3b = RP3betaRecommender(URM_train, verbose=False)
    rp3b_params = {'topK': 1000, 'alpha': 0.38192761611274967, 'beta': 0.0, 'normalize_similarity': False}
    try:
        rp3b.load_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                        f'{rp3b.RECOMMENDER_NAME}_for_second_search')
        print(f"{rp3b.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {rp3b.RECOMMENDER_NAME} ...")
        rp3b.fit(**rp3b_params)
        print(f"done.")
        rp3b.save_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                        f'{rp3b.RECOMMENDER_NAME}_for_second_search')

    p3a = P3alphaRecommender(URM_train, verbose=False)
    p3a_params = {'topK': 131, 'alpha': 0.33660811631883863, 'normalize_similarity': False}
    try:
        p3a.load_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{p3a.RECOMMENDER_NAME}_for_second_search')
        print(f"{p3a.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {p3a.RECOMMENDER_NAME} ...")
        p3a.fit(**p3a_params)
        print(f"done.")
        p3a.save_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{p3a.RECOMMENDER_NAME}_for_second_search')

    icf = ItemKNNCFRecommender(URM_train, verbose=False)
    icf_params = {'topK': 55, 'shrink': 1000, 'similarity': 'asymmetric', 'normalize': True, 'asymmetric_alpha': 0.0}
    try:
        icf.load_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{icf.RECOMMENDER_NAME}_for_second_search')
        print(f"{icf.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {icf.RECOMMENDER_NAME} ...")
        icf.fit(**icf_params)
        print(f"done.")
        icf.save_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{icf.RECOMMENDER_NAME}_for_second_search')

    ucf = UserKNNCFRecommender(URM_train, verbose=False)
    ucf_params = {'topK': 190, 'shrink': 0, 'similarity': 'cosine', 'normalize': True}
    try:
        ucf.load_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{ucf.RECOMMENDER_NAME}_for_second_search')
        print(f"{ucf.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {ucf.RECOMMENDER_NAME} ...")
        ucf.fit(**ucf_params)
        print(f"done.")
        ucf.save_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{ucf.RECOMMENDER_NAME}_for_second_search')

    icb = ItemKNNCBFRecommender(URM_train, ICM_obj, verbose=False)
    icb_params = {'topK': 65, 'shrink': 0, 'similarity': 'dice', 'normalize': True}
    try:
        icb.load_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{icb.RECOMMENDER_NAME}_for_second_search')
        print(f"{icb.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {icf.RECOMMENDER_NAME} ...")
        icb.fit(**icb_params)
        print(f"done.")
        icb.save_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{icb.RECOMMENDER_NAME}_for_second_search')

    sen = SLIMElasticNetRecommender(URM_train, verbose=False)
    sen_params = {'topK': 992, 'l1_ratio': 0.004065081925341167, 'alpha': 0.003725005053334143}
    try:
        sen.load_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{sen.RECOMMENDER_NAME}_for_second_search')
        print(f"{sen.RECOMMENDER_NAME} loaded.")
    except:
        print(f"Fitting {sen.RECOMMENDER_NAME} ...")
        sen.fit(**sen_params)
        print(f"done.")
        sen.save_model(f'stored_recommenders/seed_{str(seed)}_hybrid_search/',
                       f'{sen.RECOMMENDER_NAME}_for_second_search')

    print("\nStart.")
    list_recommender = [icb, icf, ucf, p3a, rp3b, sen]
    list_already_seen = []
    combinations_already_seen = []
    """
    (icb, icf, p3a), (icb, icf, rp3b), (icb, icf, sen), (icb, p3a, rp3b), (icb, p3a, sen),
                                (icb, rp3b, sen), (icf, p3a, rp3b), (icf, p3a, sen)
    """

    for rec_perm in combinations(list_recommender, 3):

        if rec_perm not in combinations_already_seen:

            recommender_names = '_'.join([r.RECOMMENDER_NAME for r in rec_perm])
            output_folder_path = "result_experiments_v3/seed_" + str(
                seed) + '/linear_combination/' + recommender_names + '/'
            print(F"\nTESTING THE COMBO {recommender_names}")

            # If directory does not exist, create
            if not os.path.exists(output_folder_path):
                os.makedirs(output_folder_path)

            # TODO: setta I GIUSTI EVALUATOR QUI!!!!
            runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,
                                                               URM_train=URM_train,
                                                               ICM_train=ICM_obj,
                                                               metric_to_optimize="MAP",
                                                               n_cases=50,
                                                               n_random_starts=20,
                                                               evaluator_validation_earlystopping=evaluator_valid_hybrid,
                                                               evaluator_validation=evaluator_valid_hybrid,
                                                               #evaluator_test=evaluator_test,
                                                               output_folder_path=output_folder_path,
                                                               allow_weighting=False,
                                                               # similarity_type_list = ["cosine", 'jaccard'],
                                                               parallelizeKNN=False,
                                                               list_rec=rec_perm)
            pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
            pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)
Beispiel #14
0
    recommenderELASTIC = SLIMElasticNetRecommender(URM_train)
    # recommenderELASTIC.fit(topK=10)
    # recommenderELASTIC.save_model('model/', file_name='SLIM_ElasticNet')
    recommenderELASTIC.load_model('model/', file_name='SLIM_ElasticNet_train')

    # recommenderAlphaGRAPH = P3alphaRecommender(URM_train)
    # recommenderAlphaGRAPH.fit(topK=10, alpha=0.22, implicit=True, normalize_similarity=True)

    recommenderBetaGRAPH = RP3betaRecommender(URM_train)
    recommenderBetaGRAPH.fit(topK=10,
                             implicit=True,
                             normalize_similarity=True,
                             alpha=0.41,
                             beta=0.049)

    recommederUserKNN = UserKNNCFRecommender(URM_train)
    recommederUserKNN.fit(topK=10, shrink=15, similarity='jaccard')

    # Create BayesianSearch object
    recommender_class = ItemKNNScoresHybridRecommender

    parameterSearch = SearchBayesianSkopt(
        recommender_class,
        evaluator_validation=evaluator_validation,
        evaluator_test=evaluator_test)

    # weight_list = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.8, 0.9]
    weight_list = [
        0.1, 0.3, 0.5, 0.7, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 7, 8, 9,
        10
    ]
def read_data_split_and_search():
    """
    This function provides a simple example on how to tune parameters of a given algorithm

    The BayesianSearch object will save:
        - A .txt file with all the cases explored and the recommendation quality
        - A _best_model file which contains the trained model and can be loaded with recommender.load_model()
        - A _best_parameter file which contains a dictionary with all the fit parameters, it can be passed to recommender.fit(**_best_parameter)
        - A _best_result_validation file which contains a dictionary with the results of the best solution on the validation
        - A _best_result_test file which contains a dictionary with the results, on the test set, of the best solution chosen using the validation set
    """

    seed = 1205
    parser = DataParser()

    URM_all = parser.get_URM_all()
    ICM_obj = parser.get_ICM_all()

    # SPLIT TO GET TEST PARTITION
    URM_train, URM_test = split_train_in_two_percentage_global_sample(
        URM_all, train_percentage=0.90, seed=seed)

    # SPLIT TO GET THE HYBRID VALID PARTITION
    URM_train, URM_valid_hybrid = split_train_in_two_percentage_global_sample(
        URM_train, train_percentage=0.85, seed=seed)

    URM_valid_hybrid = parser.filter_URM_test_by_range(URM_train,
                                                       URM_valid_hybrid,
                                                       (3, -1))

    collaborative_algorithm_list = [
        # EASE_R_Recommender
        # PipeHybrid001,
        # Random,
        # TopPop,
        # P3alphaRecommender,
        # RP3betaRecommender,
        # ItemKNNCFRecommender,
        # UserKNNCFRecommender,
        # MatrixFactorization_BPR_Cython,
        # MatrixFactorization_FunkSVD_Cython,
        # PureSVDRecommender,
        # NMFRecommender,
        # PureSVDItemRecommender
        # SLIM_BPR_Cython,
        # SLIMElasticNetRecommender
        # IALSRecommender
        # MF_MSE_PyTorch
        # MergedHybrid000
        # LinearHybrid002ggg
        HybridCombinationSearch
    ]

    content_algorithm_list = [
        # ItemKNNCBFRecommender
    ]

    from Base.Evaluation.Evaluator import EvaluatorHoldout

    evaluator_valid_hybrid = EvaluatorHoldout(URM_valid_hybrid,
                                              cutoff_list=[10])
    evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
    """
    earlystopping_keywargs = {"validation_every_n": 5,
                              "stop_on_validation": True,
                              "evaluator_object": evaluator_valid_hybrid,
                              "lower_validations_allowed": 5,
                              "validation_metric": 'MAP',
                              }
    
    print('IALS training...')
    ials = IALSRecommender(URM_train, verbose=False)
    ials_params = {'num_factors': 83, 'confidence_scaling': 'linear', 'alpha': 28.4278070726612,
                   'epsilon': 1.0234211788885077, 'reg': 0.0027328110246575004, 'epochs': 20}
    ials.fit(**ials_params, **earlystopping_keywargs)
    print("Done")
    
    
    print("PureSVD training...")
    psvd = PureSVDRecommender(URM_train, verbose=False)
    psvd_params = {'num_factors': 711}
    psvd.fit(**psvd_params)
    print("Done")
    """
    print("Rp3beta training...")
    rp3b = RP3betaRecommender(URM_train, verbose=False)
    rp3b_params = {
        'topK': 753,
        'alpha': 0.3873710051288722,
        'beta': 0.0,
        'normalize_similarity': False
    }
    rp3b.fit(**rp3b_params)
    print("Done")
    print("P3alpha training...")
    p3a = P3alphaRecommender(URM_train, verbose=False)
    p3a_params = {
        'topK': 438,
        'alpha': 0.41923120471415165,
        'normalize_similarity': False
    }
    p3a.fit(**p3a_params)
    print("Done")
    print("ItemKnnCF training...")
    icf = ItemKNNCFRecommender(URM_train, verbose=False)
    icf_params = {
        'topK': 565,
        'shrink': 554,
        'similarity': 'tversky',
        'normalize': True,
        'tversky_alpha': 1.9109121434662428,
        'tversky_beta': 1.7823834698905734
    }
    icf.fit(**icf_params)
    print("Done")
    print("UserKnnCF training...")
    ucf = UserKNNCFRecommender(URM_train, verbose=False)
    ucf_params = {
        'topK': 190,
        'shrink': 0,
        'similarity': 'cosine',
        'normalize': True
    }
    ucf.fit(**ucf_params)
    print("Done")
    print("ItemKnnCBF training...")
    icb = ItemKNNCBFRecommender(URM_train, ICM_obj, verbose=False)
    icb_params = {
        'topK': 205,
        'shrink': 1000,
        'similarity': 'cosine',
        'normalize': True,
        'feature_weighting': 'BM25'
    }
    icb.fit(**icb_params)
    print("Done")
    """
    print("SlimElasticNet training...")
    sen = SLIMElasticNetRecommender(URM_train, verbose=False)
    sen_params = {'topK': 954, 'l1_ratio': 3.87446082207643e-05, 'alpha': 0.07562657698792305}
    sen.fit(**sen_params)
    print("Done")
    """

    list_recommender = [icb, icf, ucf, p3a, rp3b]
    list_already_seen = []

    for rec_perm in combinations(list_recommender, 3):

        if rec_perm not in combinations(list_already_seen, 3):

            recommender_names = '_'.join(
                [r.RECOMMENDER_NAME for r in rec_perm])
            output_folder_path = "result_experiments_v3/seed_" + str(
                seed) + '_3--1' + '/' + recommender_names + '/'

            # If directory does not exist, create
            if not os.path.exists(output_folder_path):
                os.makedirs(output_folder_path)

            # TODO: setta I GIUSTI EVALUATOR QUI!!!!
            runParameterSearch_Collaborative_partial = partial(
                runParameterSearch_Collaborative,
                URM_train=URM_train,
                ICM_train=ICM_obj,
                metric_to_optimize="MAP",
                n_cases=50,
                n_random_starts=20,
                evaluator_validation_earlystopping=evaluator_valid_hybrid,
                evaluator_validation=evaluator_valid_hybrid,
                evaluator_test=evaluator_test,
                output_folder_path=output_folder_path,
                allow_weighting=False,
                # similarity_type_list = ["cosine", 'jaccard'],
                parallelizeKNN=False,
                list_rec=rec_perm)
            pool = multiprocessing.Pool(processes=int(
                multiprocessing.cpu_count()),
                                        maxtasksperchild=1)
            pool.map(runParameterSearch_Collaborative_partial,
                     collaborative_algorithm_list)
def runParameterSearch_Collaborative(recommender_class,
                                     URM_train,
                                     ICM_1,
                                     ICM_2,
                                     metric_to_optimize="PRECISION",
                                     evaluator_validation=None,
                                     evaluator_test=None,
                                     evaluator_validation_earlystopping=None,
                                     output_root_path="result_experiments/",
                                     parallelizeKNN=True,
                                     n_cases=100):
    from ParameterTuning.AbstractClassSearch import DictionaryKeys

    # If directory does not exist, create
    if not os.path.exists(output_root_path):
        os.makedirs(output_root_path)

    try:

        output_root_path_rec_name = output_root_path + recommender_class.RECOMMENDER_NAME

        parameterSearch = BayesianSearch(
            recommender_class,
            evaluator_validation=evaluator_validation,
            evaluator_test=evaluator_test)

        if recommender_class in [TopPop, Random]:
            recommender = recommender_class(URM_train)

            recommender.fit()

            output_file = open(
                output_root_path_rec_name + "_BayesianSearch.txt", "a")
            result_dict, result_baseline = evaluator_validation.evaluateRecommender(
                recommender)
            output_file.write(
                "ParameterSearch: Best result evaluated on URM_validation. Results: {}"
                .format(result_baseline))

            pickle.dump(
                result_dict.copy(),
                open(output_root_path_rec_name + "_best_result_validation",
                     "wb"),
                protocol=pickle.HIGHEST_PROTOCOL)

            result_dict, result_baseline = evaluator_test.evaluateRecommender(
                recommender)
            output_file.write(
                "ParameterSearch: Best result evaluated on URM_test. Results: {}"
                .format(result_baseline))

            pickle.dump(result_dict.copy(),
                        open(output_root_path_rec_name + "_best_result_test",
                             "wb"),
                        protocol=pickle.HIGHEST_PROTOCOL)

            output_file.close()

            return

        ##########################################################################################################

        if recommender_class is UserKNNCFRecommender:

            similarity_type_list = ['cosine']

            run_KNNCFRecommender_on_similarity_type_partial = partial(
                run_KNNCFRecommender_on_similarity_type,
                parameterSearch=parameterSearch,
                URM_train=URM_train,
                n_cases=n_cases,
                output_root_path=output_root_path_rec_name,
                metric_to_optimize=metric_to_optimize)

            if parallelizeKNN:
                pool = PoolWithSubprocess(processes=int(2), maxtasksperchild=1)
                resultList = pool.map(
                    run_KNNCFRecommender_on_similarity_type_partial,
                    similarity_type_list)

            else:

                for similarity_type in similarity_type_list:
                    run_KNNCFRecommender_on_similarity_type_partial(
                        similarity_type)

            return

        ##########################################################################################################

        if recommender_class is ItemKNNCFRecommender:

            similarity_type_list = ['cosine']

            run_KNNCFRecommender_on_similarity_type_partial = partial(
                run_KNNCFRecommender_on_similarity_type,
                parameterSearch=parameterSearch,
                URM_train=URM_train,
                n_cases=n_cases,
                output_root_path=output_root_path_rec_name,
                metric_to_optimize=metric_to_optimize)

            if parallelizeKNN:
                pool = PoolWithSubprocess(processes=int(2), maxtasksperchild=1)
                resultList = pool.map(
                    run_KNNCFRecommender_on_similarity_type_partial,
                    similarity_type_list)

            else:

                for similarity_type in similarity_type_list:
                    run_KNNCFRecommender_on_similarity_type_partial(
                        similarity_type)

            return

        ##########################################################################################################

        # if recommender_class is MultiThreadSLIM_RMSE:
        #
        #     hyperparamethers_range_dictionary = {}
        #     hyperparamethers_range_dictionary["topK"] = [50, 100]
        #     hyperparamethers_range_dictionary["l1_penalty"] = [1e-2, 1e-3, 1e-4]
        #     hyperparamethers_range_dictionary["l2_penalty"] = [1e-2, 1e-3, 1e-4]
        #
        #
        #     recommenderDictionary = {DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
        #                              DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
        #                              DictionaryKeys.FIT_POSITIONAL_ARGS: dict(),
        #                              DictionaryKeys.FIT_KEYWORD_ARGS: dict(),
        #                              DictionaryKeys.FIT_RANGE_KEYWORD_ARGS: hyperparamethers_range_dictionary}
        #
        #

        ##########################################################################################################

        if recommender_class is P3alphaRecommender:
            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["topK"] = [
                5, 10, 20, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800
            ]
            hyperparamethers_range_dictionary["alpha"] = range(0, 2)
            hyperparamethers_range_dictionary["normalize_similarity"] = [
                True, False
            ]

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS:
                dict(),
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        ##########################################################################################################

        if recommender_class is HybridRecommender:

            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["w_itemcf"] = [
                x * 0.1 + 1 for x in range(0, 10)
            ]
            hyperparamethers_range_dictionary["w_usercf"] = [
                x * 0.1 for x in range(0, 10)
            ]
            hyperparamethers_range_dictionary["w_cbart"] = [
                x * 0.1 for x in range(0, 10)
            ]
            hyperparamethers_range_dictionary["w_cbalb"] = [
                x * 0.1 for x in range(0, 10)
            ]
            hyperparamethers_range_dictionary["w_slim"] = [
                x * 0.1 for x in range(0, 10)
            ]
            #hyperparamethers_range_dictionary["w_svd"] = [x * 0.05 for x in range(0, 20)]
            #hyperparamethers_range_dictionary["w_rp3"] = [x * 0.05 for x in range(0, 20)]

            item = ItemKNNCFRecommender(URM_train)

            user = UserKNNCFRecommender(URM_train)

            SLIM = MultiThreadSLIM_ElasticNet(URM_train=URM_train)

            item.fit(topK=800, shrink=10, similarity='cosine', normalize=True)

            user.fit(topK=70, shrink=22, similarity='cosine', normalize=True)

            SLIM.fit(l1_penalty=1e-05,
                     l2_penalty=0,
                     positive_only=True,
                     topK=150,
                     alpha=0.00415637376180466)

            CBArt = ItemKNNCBFRecommender(ICM=ICM_1, URM_train=URM_train)
            CBArt.fit(topK=160,
                      shrink=5,
                      similarity='cosine',
                      normalize=True,
                      feature_weighting="none")

            CBAlb = ItemKNNCBFRecommender(ICM=ICM_2, URM_train=URM_train)
            CBAlb.fit(topK=160,
                      shrink=5,
                      similarity='cosine',
                      normalize=True,
                      feature_weighting="none")

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS: {
                    "ICM_Art": ICM_1,
                    "ICM_Alb": ICM_2,
                    "item": item,
                    "user": user,
                    "SLIM": SLIM,
                    "CBArt": CBArt,
                    "CBAlb": CBAlb,
                },
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        ##########################################################################################################

        if recommender_class is RP3betaRecommender:
            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["topK"] = [
                5, 10, 20, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800
            ]
            hyperparamethers_range_dictionary["alpha"] = range(0, 2)
            hyperparamethers_range_dictionary["beta"] = range(0, 2)
            hyperparamethers_range_dictionary["normalize_similarity"] = [True]
            hyperparamethers_range_dictionary["implicit"] = [True]

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS:
                dict(),
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        ##########################################################################################################

        if recommender_class is MatrixFactorization_FunkSVD_Cython:
            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["sgd_mode"] = ["adagrad", "adam"]
            # hyperparamethers_range_dictionary["epochs"] = [1, 5, 10, 20, 30, 50, 70, 90, 110]
            hyperparamethers_range_dictionary["num_factors"] = range(
                100, 1000, 20)
            hyperparamethers_range_dictionary["reg"] = [0.0, 1e-3, 1e-6, 1e-9]
            hyperparamethers_range_dictionary["learning_rate"] = [
                1e-2, 1e-3, 1e-4, 1e-5
            ]

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS: {
                    "validation_every_n": 5,
                    "stop_on_validation": True,
                    "evaluator_object": evaluator_validation_earlystopping,
                    "lower_validatons_allowed": 20,
                    "validation_metric": metric_to_optimize
                },
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        ##########################################################################################################

        if recommender_class is FunkSVD:
            hyperparamethers_range_dictionary = {}

            # hyperparamethers_range_dictionary["epochs"] = [1, 5, 10, 20, 30, 50, 70, 90, 110]
            hyperparamethers_range_dictionary["num_factors"] = range(
                100, 1000, 20)
            hyperparamethers_range_dictionary["reg"] = [
                0.0, 1e-03, 1e-06, 1e-09
            ]
            hyperparamethers_range_dictionary["learning_rate"] = [1e-02, 1e-03]

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS:
                dict(),
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        ##########################################################################################################

        if recommender_class is MatrixFactorization_AsySVD_Cython:
            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["sgd_mode"] = ["adagrad", "adam"]
            # hyperparamethers_range_dictionary["epochs"] = [1, 5, 10, 20, 30, 50, 70, 90, 110]
            hyperparamethers_range_dictionary["num_factors"] = range(
                100, 500, 10)
            hyperparamethers_range_dictionary["batch_size"] = [
                100, 200, 300, 400
            ]
            hyperparamethers_range_dictionary["positive_reg"] = [
                0.0, 1e-3, 1e-6, 1e-9
            ]
            hyperparamethers_range_dictionary["negative_reg"] = [
                0.0, 1e-3, 1e-6, 1e-9
            ]
            hyperparamethers_range_dictionary["learning_rate"] = [
                1e-2, 1e-3, 1e-4, 1e-5
            ]
            hyperparamethers_range_dictionary["user_reg"] = [
                1e-3, 1e-4, 1e-5, 1e-6
            ]

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {
                    'positive_threshold': 1
                },
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS: {
                    "validation_every_n": 5,
                    "stop_on_validation": True,
                    "evaluator_object": evaluator_validation_earlystopping,
                    "lower_validatons_allowed": 20,
                    "validation_metric": metric_to_optimize
                },
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        ##########################################################################################################

        if recommender_class is PureSVDRecommender:
            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["num_factors"] = list(
                range(0, 250, 5))

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        #########################################################################################################

        if recommender_class is SLIM_BPR_Cython:
            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["topK"] = [800, 900, 1000, 1200]
            # hyperparamethers_range_dictionary["epochs"] = [1, 5, 10, 20, 30, 50, 70, 90, 110]
            hyperparamethers_range_dictionary["sgd_mode"] = ["adagrad"]
            hyperparamethers_range_dictionary["lambda_i"] = [1e-6]
            hyperparamethers_range_dictionary["lambda_j"] = [1e-9]
            hyperparamethers_range_dictionary["learning_rate"] = [
                0.01, 0.001, 1e-4, 1e-5, 0.1
            ]

            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {
                    'train_with_sparse_weights': True,
                    'symmetric': True,
                    'positive_threshold': 1
                },
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS: {
                    "validation_every_n": 10,
                    "stop_on_validation": True,
                    "evaluator_object": evaluator_validation_earlystopping,
                    "lower_validatons_allowed": 3,
                    "validation_metric": metric_to_optimize
                },
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        ##########################################################################################################

        if recommender_class is MultiThreadSLIM_ElasticNet:
            hyperparamethers_range_dictionary = {}
            hyperparamethers_range_dictionary["topK"] = [
                3300, 4300, 5300, 6300, 7300
            ]
            hyperparamethers_range_dictionary["l1_penalty"] = [
                1e-5, 1e-6, 1e-4, 1e-3
            ]
            hyperparamethers_range_dictionary["l2_penalty"] = [1e-4]
            hyperparamethers_range_dictionary["alpha"] = range(0, 1)
            recommenderDictionary = {
                DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [URM_train],
                DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {},
                DictionaryKeys.FIT_POSITIONAL_ARGS:
                dict(),
                DictionaryKeys.FIT_KEYWORD_ARGS:
                dict(),
                DictionaryKeys.FIT_RANGE_KEYWORD_ARGS:
                hyperparamethers_range_dictionary
            }

        #########################################################################################################

        ## Final step, after the hyperparameter range has been defined for each type of algorithm
        best_parameters = parameterSearch.search(
            recommenderDictionary,
            n_cases=n_cases,
            output_root_path=output_root_path_rec_name,
            metric=metric_to_optimize)

    except Exception as e:

        print("On recommender {} Exception {}".format(recommender_class,
                                                      str(e)))
        traceback.print_exc()

        error_file = open(output_root_path + "ErrorLog.txt", "a")
        error_file.write("On recommender {} Exception {}\n".format(
            recommender_class, str(e)))
        error_file.close()
Beispiel #17
0
    def __init__(self,
                 URM_train,
                 ICM_train,
                 submission=False,
                 verbose=True,
                 seed=1205):
        super(UserWiseHybrid005, self).__init__(URM_train, verbose=verbose)
        recommenders = {
            'rp3b': RP3betaRecommender(URM_train),
            'p3a': P3alphaRecommender(URM_train),
            'sen': SLIMElasticNetRecommender(URM_train),
            'sbpr': SLIM_BPR_Cython(URM_train),
            'icb': ItemKNNCBFRecommender(URM_train, ICM_train),
            'icf': ItemKNNCFRecommender(URM_train),
            'ucf': UserKNNCFRecommender(URM_train)
        }
        print("Fitting rp3b...")
        params = {
            'topK': 1000,
            'alpha': 0.38192761611274967,
            'beta': 0.0,
            'normalize_similarity': False
        }
        recommenders['rp3b'].fit(**params)
        print("done.")
        print("Fitting p3a...")
        params = {
            'topK': 131,
            'alpha': 0.33660811631883863,
            'normalize_similarity': False
        }
        recommenders['p3a'].fit(**params)
        print("done.")
        print("Fitting sen...")
        params = {
            'topK': 992,
            'l1_ratio': 0.004065081925341167,
            'alpha': 0.003725005053334143
        }
        recommenders['sen'].fit(**params)
        print("done.")
        #print("Fitting sbpr...")
        #params = {'topK': 979, 'epochs': 130, 'symmetric': False, 'sgd_mode': 'adam', 'lambda_i': 0.004947329669424629,
        #          'lambda_j': 1.1534760845071758e-05, 'learning_rate': 0.0001}
        #recommenders['sbpr'].fit(**params)
        print("done.")
        print("Fitting icb...")
        params = {
            'topK': 65,
            'shrink': 0,
            'similarity': 'dice',
            'normalize': True
        }
        recommenders['icb'].fit(**params)
        print("done.")
        print("Fitting icf...")
        params = {
            'topK': 55,
            'shrink': 1000,
            'similarity': 'asymmetric',
            'normalize': True,
            'asymmetric_alpha': 0.0
        }
        recommenders['icf'].fit(**params)
        print("done.")
        print("Fitting ucf...")
        params = {
            'topK': 190,
            'shrink': 0,
            'similarity': 'cosine',
            'normalize': True
        }
        recommenders['ucf'].fit(**params)
        print("done.")

        self.__recommender_segmentation = [
            ((0, 3),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['rp3b'], recommenders['icf'],
                                   recommenders['icb']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 0.4577946628581237,
                                   'l1_ratio': 0.7434539743766688
                               }),
            ((3, 5),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['p3a'], recommenders['ucf'],
                                   recommenders['icb']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 0.3987236515679141,
                                   'l1_ratio': 0.15489605895390016
                               }),
            ((5, 10),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['rp3b'], recommenders['icb'],
                                   recommenders['sen']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 1.0,
                                   'l1_ratio': 0.3951763029766836
                               }),
            ((10, 17),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['p3a'], recommenders['icb'],
                                   recommenders['sen']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 0.9999772418587548,
                                   'l1_ratio': 0.28511052552468436
                               }),
            ((17, 30),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['icf'], recommenders['icb'],
                                   recommenders['sen']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 0.21686976560272436,
                                   'l1_ratio': 0.4598014054291886
                               }),
            ((30, 100),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['ucf'], recommenders['icb'],
                                   recommenders['sen']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 0.33535858857401674,
                                   'l1_ratio': 0.4046400351885727
                               }),
            ((100, 200),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['icf'], recommenders['icb'],
                                   recommenders['sen']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 0.21686976560272436,
                                   'l1_ratio': 0.4598014054291886
                               }),
            ((200, -1),
             HiddenRecommender(URM_train,
                               ICM_train, [
                                   recommenders['p3a'], recommenders['icb'],
                                   recommenders['sen']
                               ],
                               submission=submission,
                               verbose=verbose,
                               seed=seed), {
                                   'alpha': 0.9999772418587548,
                                   'l1_ratio': 0.28511052552468436
                               }),
        ]
from FeatureWeighting.User_CFW_D_Similarity_Linalg import User_CFW_D_Similarity_Linalg
from Utils.s_plus import dot_product
import scipy.sparse as sps
from Base.Evaluation.Evaluator import EvaluatorHoldout
from GraphBased.RP3betaRecommender import RP3betaRecommender
from ParameterTuning.SearchBayesianSkopt import SearchBayesianSkopt
from skopt.space import Real, Integer, Categorical
from ParameterTuning.SearchAbstractClass import SearchInputRecommenderArgs

data = DataManager()

ucm_age, ucm_region, ucm_all = data.get_ucm()

icm_price, icm_asset, icm_sub, icm_all = data.get_icm()

recommender_4 = UserKNNCFRecommender(data.get_urm())
recommender_4.fit(shrink=2, topK=600, normalize=True)

W_sparse_CF = recommender_4.W_sparse

cfw = User_CFW_D_Similarity_Linalg(URM_train=data.get_urm(),
                              UCM=ucm_all.copy(),
                              S_matrix_target=W_sparse_CF
                              )

cfw.fit(topK=1740, add_zeros_quota=0.3528735601555612, normalize_similarity=True)

weights = sps.diags(cfw.D_best)

ucm_weighted = ucm_all.dot(weights)
 def __init__(self,
              URM_train,
              ICM_train,
              submission=False,
              verbose=True,
              seed=1205):
     super(UserWiseHybrid004, self).__init__(URM_train, verbose=verbose)
     self.__recommender_segmentation = [
         ((0, 3),
          HiddenRecommender(URM_train,
                            ICM_train,
                            [(RP3betaRecommender(URM_train), {
                                'topK': 1000,
                                'alpha': 0.38192761611274967,
                                'beta': 0.0,
                                'normalize_similarity': False
                            }),
                             (ItemKNNCFRecommender(URM_train), {
                                 'topK': 100,
                                 'shrink': 1000,
                                 'similarity': 'asymmetric',
                                 'normalize': True,
                                 'asymmetric_alpha': 0.0
                             }),
                             (ItemKNNCBFRecommender(URM_train, ICM_train), {
                                 'topK': 205,
                                 'shrink': 1000,
                                 'similarity': 'cosine',
                                 'normalize': True,
                                 'feature_weighting': 'BM25'
                             })],
                            submission=submission,
                            verbose=verbose,
                            seed=seed), {
                                'alpha': 0.40426999639005445,
                                'l1_ratio': 1.0
                            }),
         ((3, 5),
          HiddenRecommender(
              URM_train,
              ICM_train, [(ItemKNNCFRecommender(URM_train), {
                  'topK': 100,
                  'shrink': 1000,
                  'similarity': 'asymmetric',
                  'normalize': True,
                  'asymmetric_alpha': 0.0
              }),
                          (UserKNNCFRecommender(URM_train), {
                              'topK': 190,
                              'shrink': 0,
                              'similarity': 'cosine',
                              'normalize': True
                          }),
                          (ItemKNNCBFRecommender(URM_train, ICM_train), {
                              'topK': 205,
                              'shrink': 1000,
                              'similarity': 'cosine',
                              'normalize': True,
                              'feature_weighting': 'BM25'
                          })],
              submission=submission,
              verbose=verbose,
              seed=seed), {
                  'alpha': 0.767469300493861,
                  'l1_ratio': 0.7325725081659069
              }),
         ((5, 10),
          HiddenRecommender(
              URM_train,
              ICM_train, [(RP3betaRecommender(URM_train), {
                  'topK': 1000,
                  'alpha': 0.38192761611274967,
                  'beta': 0.0,
                  'normalize_similarity': False
              }),
                          (ItemKNNCFRecommender(URM_train), {
                              'topK': 100,
                              'shrink': 1000,
                              'similarity': 'asymmetric',
                              'normalize': True,
                              'asymmetric_alpha': 0.0
                          }),
                          (ItemKNNCBFRecommender(URM_train, ICM_train), {
                              'topK': 205,
                              'shrink': 1000,
                              'similarity': 'cosine',
                              'normalize': True,
                              'feature_weighting': 'BM25'
                          })],
              submission=submission,
              verbose=verbose,
              seed=seed), {
                  'alpha': 0.40426999639005445,
                  'l1_ratio': 1.0
              }),
         ((10, 17),
          HiddenRecommender(
              URM_train,
              ICM_train, [(P3alphaRecommender(URM_train), {
                  'topK': 131,
                  'alpha': 0.33660811631883863,
                  'normalize_similarity': False
              }),
                          (UserKNNCFRecommender(URM_train), {
                              'topK': 190,
                              'shrink': 0,
                              'similarity': 'cosine',
                              'normalize': True
                          }),
                          (ItemKNNCBFRecommender(URM_train, ICM_train), {
                              'topK': 205,
                              'shrink': 1000,
                              'similarity': 'cosine',
                              'normalize': True,
                              'feature_weighting': 'BM25'
                          })],
              submission=submission,
              verbose=verbose,
              seed=seed), {
                  'alpha': 0.37776131907747645,
                  'l1_ratio': 0.44018901104481
              }),
         ((17, 100),
          HiddenRecommender(
              URM_train,
              ICM_train, [(ItemKNNCFRecommender(URM_train), {
                  'topK': 100,
                  'shrink': 1000,
                  'similarity': 'asymmetric',
                  'normalize': True,
                  'asymmetric_alpha': 0.0
              }),
                          (ItemKNNCBFRecommender(URM_train, ICM_train), {
                              'topK': 205,
                              'shrink': 1000,
                              'similarity': 'cosine',
                              'normalize': True,
                              'feature_weighting': 'BM25'
                          }),
                          (SLIMElasticNetRecommender(URM_train), {
                              'topK': 992,
                              'l1_ratio': 0.004065081925341167,
                              'alpha': 0.003725005053334143
                          })],
              submission=submission,
              verbose=verbose,
              seed=seed), {
                  'alpha': 0.7783657178315921,
                  'l1_ratio': 0.9570845000744118
              }),
         ((100, -1),
          HiddenRecommender(
              URM_train,
              ICM_train, [(P3alphaRecommender(URM_train), {
                  'topK': 131,
                  'alpha': 0.33660811631883863,
                  'normalize_similarity': False
              }),
                          (UserKNNCFRecommender(URM_train), {
                              'topK': 190,
                              'shrink': 0,
                              'similarity': 'cosine',
                              'normalize': True
                          }),
                          (ItemKNNCBFRecommender(URM_train, ICM_train), {
                              'topK': 205,
                              'shrink': 1000,
                              'similarity': 'cosine',
                              'normalize': True,
                              'feature_weighting': 'BM25'
                          })],
              submission=submission,
              verbose=verbose,
              seed=seed), {
                  'alpha': 0.37776131907747645,
                  'l1_ratio': 0.44018901104481
              }),
     ]
def read_data_split_and_search():
    """
    This function provides a simple example on how to tune parameters of a given algorithm

    The BayesianSearch object will save:
        - A .txt file with all the cases explored and the recommendation quality
        - A _best_model file which contains the trained model and can be loaded with recommender.load_model()
        - A _best_parameter file which contains a dictionary with all the fit parameters, it can be passed to recommender.fit(**_best_parameter)
        - A _best_result_validation file which contains a dictionary with the results of the best solution on the validation
        - A _best_result_test file which contains a dictionary with the results, on the test set, of the best solution chosen using the validation set
    """

    seed = 1205
    parser = DataParser()

    URM_all = parser.get_URM_all()
    ICM_obj = parser.get_ICM_all()

    # SPLIT TO GET TEST PARTITION
    URM_train, URM_test = split_train_in_two_percentage_global_sample(
        URM_all, train_percentage=0.90, seed=seed)

    # SPLIT TO GET THE HYBRID VALID PARTITION
    URM_train, URM_valid_hybrid = split_train_in_two_percentage_global_sample(
        URM_train, train_percentage=0.85, seed=seed)

    # SPLIT TO GET THE sub_rec VALID PARTITION
    URM_train_bis, URM_valid_sub = split_train_in_two_percentage_global_sample(
        URM_train, train_percentage=0.85, seed=seed)

    collaborative_algorithm_list = [
        #EASE_R_Recommender
        #PipeHybrid001,
        #Random,
        #TopPop,
        #P3alphaRecommender,
        #RP3betaRecommender,
        #ItemKNNCFRecommender,
        #UserKNNCFRecommender,
        #MatrixFactorization_BPR_Cython,
        #MatrixFactorization_FunkSVD_Cython,
        #PureSVDRecommender,
        #NMFRecommender,
        #PureSVDItemRecommender
        #SLIM_BPR_Cython,
        #SLIMElasticNetRecommender
        #IALSRecommender
        #MF_MSE_PyTorch
        #MergedHybrid000
        #LinearHybrid002ggg
        HybridCombinationSearch
    ]

    content_algorithm_list = [
        #ItemKNNCBFRecommender
    ]

    from Base.Evaluation.Evaluator import EvaluatorHoldout

    evaluator_valid_sub = EvaluatorHoldout(URM_valid_sub, cutoff_list=[10])
    evaluator_valid_hybrid = EvaluatorHoldout(URM_valid_hybrid,
                                              cutoff_list=[10])
    evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10])
    """
        # TODO: setta I GIUSTI EVALUATOR QUI!!!!
    runParameterSearch_Content_partial = partial(runParameterSearch_Content,
                                                 URM_train=URM_train,
                                                 ICM_object=ICM_obj,
                                                 ICM_name='1BookFeatures',
                                                 n_cases = 50,
                                                 n_random_starts = 20,
                                                 evaluator_validation= evaluator_valid_sub,
                                                 evaluator_test = evaluator_valid_hybrid,
                                                 metric_to_optimize = "MAP",
                                                 output_folder_path=output_folder_path,
                                                 parallelizeKNN = False,
                                                 allow_weighting = True,
                                                 #similarity_type_list = ['cosine']
                                                 )
    pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)
    pool.map(runParameterSearch_Content_partial, content_algorithm_list)
    """
    print("Rp3beta training...")
    rp3b = RP3betaRecommender(URM_train, verbose=False)
    rp3b_params = {
        'topK': 1000,
        'alpha': 0.38192761611274967,
        'beta': 0.0,
        'normalize_similarity': False
    }
    rp3b.fit(**rp3b_params)
    print("Done")
    print("P3alpha training...")
    p3a = P3alphaRecommender(URM_train, verbose=False)
    p3a_params = {
        'topK': 131,
        'alpha': 0.33660811631883863,
        'normalize_similarity': False
    }
    p3a.fit(**p3a_params)
    print("Done")
    print("ItemKnnCF training...")
    icf = ItemKNNCFRecommender(URM_train, verbose=False)
    icf_params = {
        'topK': 100,
        'shrink': 1000,
        'similarity': 'asymmetric',
        'normalize': True,
        'asymmetric_alpha': 0.0
    }
    icf.fit(**icf_params)
    print("Done")
    print("UserKnnCF training...")
    ucf = UserKNNCFRecommender(URM_train, verbose=False)
    ucf_params = {
        'topK': 190,
        'shrink': 0,
        'similarity': 'cosine',
        'normalize': True
    }
    ucf.fit(**ucf_params)
    print("Done")
    print("ItemKnnCBF training...")
    icb = ItemKNNCBFRecommender(URM_train, ICM_obj, verbose=False)
    icb_params = {
        'topK': 205,
        'shrink': 1000,
        'similarity': 'cosine',
        'normalize': True,
        'feature_weighting': 'BM25'
    }
    icb.fit(**icb_params)
    print("Done")
    print("SlimBPR training...")
    sbpr = SLIM_BPR_Cython(URM_train, verbose=False)
    sbpr_params = {
        'topK': 979,
        'epochs': 130,
        'symmetric': False,
        'sgd_mode': 'adam',
        'lambda_i': 0.004947329669424629,
        'lambda_j': 1.1534760845071758e-05,
        'learning_rate': 0.0001
    }
    sbpr.fit(**sbpr_params)
    print("Done")
    print("SlimElasticNet training...")
    sen = SLIMElasticNetRecommender(URM_train, verbose=False)
    sen_params = {
        'topK': 992,
        'l1_ratio': 0.004065081925341167,
        'alpha': 0.003725005053334143
    }
    sen.fit(**sen_params)
    print("Done")

    list_recommender = [rp3b, p3a, icf, ucf, icb, sen, sbpr]
    list_already_seen = [rp3b, p3a, icf, ucf, icb]

    for rec_perm in combinations(list_recommender, 3):

        if rec_perm not in combinations(list_already_seen, 3):

            recommender_names = '_'.join(
                [r.RECOMMENDER_NAME for r in rec_perm])
            output_folder_path = "result_experiments_v3/seed_" + str(
                seed) + '/' + recommender_names + '/'

            # If directory does not exist, create
            if not os.path.exists(output_folder_path):
                os.makedirs(output_folder_path)

            # TODO: setta I GIUSTI EVALUATOR QUI!!!!
            runParameterSearch_Collaborative_partial = partial(
                runParameterSearch_Collaborative,
                URM_train=URM_train,
                ICM_train=ICM_obj,
                metric_to_optimize="MAP",
                n_cases=50,
                n_random_starts=20,
                evaluator_validation_earlystopping=evaluator_valid_hybrid,
                evaluator_validation=evaluator_valid_hybrid,
                evaluator_test=evaluator_test,
                output_folder_path=output_folder_path,
                allow_weighting=False,
                #similarity_type_list = ["cosine", 'jaccard'],
                parallelizeKNN=False,
                list_rec=rec_perm)
            pool = multiprocessing.Pool(processes=int(
                multiprocessing.cpu_count()),
                                        maxtasksperchild=1)
            pool.map(runParameterSearch_Collaborative_partial,
                     collaborative_algorithm_list)
Beispiel #21
0
    def __init__(self,
                 URM_train,
                 ICM_train,
                 submission=False,
                 verbose=True,
                 seed=1205):
        super(UserWiseHybrid009, self).__init__(URM_train, verbose=verbose)
        recommenders = {
            'rp3b': RP3betaRecommender(URM_train),
            'p3a': P3alphaRecommender(URM_train),
            'sen': SLIMElasticNetRecommender(URM_train),
            'sbpr': SLIM_BPR_Cython(URM_train),
            'icb': ItemKNNCBFRecommender(URM_train, ICM_train),
            'icf': ItemKNNCFRecommender(URM_train),
            'ucf': UserKNNCFRecommender(URM_train),
            'sslim': SSLIMElasticNet(URM_train, ICM_train)
        }

        params = {
            'topK': 1000,
            'alpha': 0.38192761611274967,
            'beta': 0.0,
            'normalize_similarity': False
        }
        try:
            recommenders['rp3b'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["rp3b"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['rp3b'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['rp3b'].RECOMMENDER_NAME} ...")
            recommenders['rp3b'].fit(**params)
            recommenders['rp3b'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["rp3b"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        params = {
            'topK': 131,
            'alpha': 0.33660811631883863,
            'normalize_similarity': False
        }
        try:
            recommenders['p3a'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["p3a"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['p3a'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['p3a'].RECOMMENDER_NAME} ...")
            recommenders['p3a'].fit(**params)
            recommenders['p3a'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["p3a"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        params = {
            'topK': 992,
            'l1_ratio': 0.004065081925341167,
            'alpha': 0.003725005053334143
        }
        try:
            recommenders['sen'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["sen"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['sen'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['sen'].RECOMMENDER_NAME} ...")
            recommenders['sen'].fit(**params)
            recommenders['sen'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["sen"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        params = {
            'topK': 979,
            'epochs': 130,
            'symmetric': False,
            'sgd_mode': 'adam',
            'lambda_i': 0.004947329669424629,
            'lambda_j': 1.1534760845071758e-05,
            'learning_rate': 0.0001
        }
        try:
            recommenders['sbpr'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["sbpr"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['sbpr'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['sbpr'].RECOMMENDER_NAME} ...")
            recommenders['sbpr'].fit(**params)
            recommenders['sbpr'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["sbpr"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        params = {
            'topK': 65,
            'shrink': 0,
            'similarity': 'dice',
            'normalize': True
        }
        try:
            recommenders['icb'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["icb"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['icb'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['icb'].RECOMMENDER_NAME} ...")
            recommenders['icb'].fit(**params)
            recommenders['icb'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["icb"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        params = {
            'topK': 55,
            'shrink': 1000,
            'similarity': 'asymmetric',
            'normalize': True,
            'asymmetric_alpha': 0.0
        }
        try:
            recommenders['icf'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["icf"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['icf'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['icf'].RECOMMENDER_NAME} ...")
            recommenders['icf'].fit(**params)
            recommenders['icf'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["icf"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        params = {
            'topK': 190,
            'shrink': 0,
            'similarity': 'cosine',
            'normalize': True
        }
        try:
            recommenders['ucf'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["ucf"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['ucf'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['ucf'].RECOMMENDER_NAME} ...")
            recommenders['ucf'].fit(**params)
            recommenders['ucf'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["ucf"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        params = {
            'beta': 0.4849594591575789,
            'topK': 1000,
            'l1_ratio': 1e-05,
            'alpha': 0.001
        }
        try:
            recommenders['sslim'].load_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["sslim"].RECOMMENDER_NAME}_for_sub')
            print(f"{recommenders['sslim'].RECOMMENDER_NAME} loaded.")
        except:
            print(f"Fitting {recommenders['sslim'].RECOMMENDER_NAME} ...")
            recommenders['sslim'].fit(**params)
            recommenders['sslim'].save_model(
                f'stored_recommenders/seed_{str(seed)}_hybrid_sub/',
                f'{recommenders["sslim"].RECOMMENDER_NAME}_for_sub')
            print(f"done.")

        self.__recommender_segmentation = [
            ((0, 3),
             HiddenMergedRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['rp3b'], recommenders['icb'],
                     recommenders['icf']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.7276553525851246,
                     'l1_ratio': 0.6891035546237165,
                     'topK': 1000
                 }),
            ((3, 5),
             HiddenLinearRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['sslim'], recommenders['p3a'],
                     recommenders['icb']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.9847198829156348,
                     'l1_ratio': 0.9996962519963414
                 }),
            ((5, 10),
             HiddenLinearRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['icb'], recommenders['rp3b'],
                     recommenders['sen']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.9949623682515907,
                     'l1_ratio': 0.007879399002699851
                 }),
            ((10, 17),
             HiddenLinearRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['sslim'], recommenders['icb'],
                     recommenders['ucf']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.6461624491197696,
                     'l1_ratio': 0.7617220099582368
                 }),
            ((17, 30),
             HiddenLinearRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['sslim'], recommenders['p3a'],
                     recommenders['icb']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.8416340030829476,
                     'l1_ratio': 0.6651408407090509
                 }),
            ((30, 100),
             HiddenLinearRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['sslim'], recommenders['icb'],
                     recommenders['icf']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.996772013761913,
                     'l1_ratio': 0.7831508517025596
                 }),
            ((100, 200),
             HiddenLinearRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['sslim'], recommenders['rp3b'],
                     recommenders['icb']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.8416340030829476,
                     'l1_ratio': 0.6651408407090509
                 }),
            ((200, -1),
             HiddenMergedRecommender(
                 URM_train,
                 ICM_train, [
                     recommenders['sslim'], recommenders['p3a'],
                     recommenders['icb']
                 ],
                 submission=submission,
                 verbose=verbose,
                 seed=seed), {
                     'alpha': 0.859343616443417,
                     'l1_ratio': 0.8995038091652459,
                     'topK': 900
                 }),
        ]
Beispiel #22
0
from FeatureWeighting.User_CFW_D_Similarity_Linalg import User_CFW_D_Similarity_Linalg
from Hybrid.HybridNorm3Recommender import HybridNorm3Recommender
from MatrixFactorization.ALSRecommender import ALSRecommender
from MatrixFactorization.BPRRecommender import BPRRecommender
import similaripy as sim

data = DataManager()
urm_train = data.get_urm()

urm_train, urm_test = split_train_leave_k_out_user_wise(data.get_urm(),
                                                        temperature='normal')
urm_train, urm_valid = split_train_leave_k_out_user_wise(urm_train,
                                                         temperature='valid2')

urm_train_warm = data.create_test_warm_users(urm_train, threshold=10)
urm_test_warm = data.create_test_warm_users(urm_test, threshold=10)

evaluator_test_warm = EvaluatorHoldout(urm_test_warm, cutoff_list=[10])

recommender = UserKNNCFRecommender(urm_train)
recommender.fit(shrink=2, topK=600, normalize=True)

recommender_warm = UserKNNCFRecommender(urm_train_warm)
recommender_warm.fit(shrink=2, topK=500, normalize=True)

result, str_result = evaluator_test_warm.evaluateRecommender(recommender)
print('The Map of test of urm normal is : {}'.format(result[10]['MAP']))

result, str_result = evaluator_test_warm.evaluateRecommender(recommender_warm)
print('The Map of test of urm warm is : {}'.format(result[10]['MAP']))
Beispiel #23
0
print('URM_train.shape:', URM_train.shape)
print('URM_test.shape:', URM_test.shape)
print('URM_validation.shape:', URM_validation.shape)

print('number_of_users:', number_of_users)
print('number_of_items:', number_of_items)

# test(URM_train, 267)

cutoff = 5

print_statistics(URM_train)

CMN_wrapper_train = CMN_RecommenderWrapper(URM_train)
user_KNNCF_Recommender = UserKNNCFRecommender(URM_train)
item_KNNCF_Recommender = ItemKNNCFRecommender(URM_train)
rp3_beta_Recommender = RP3betaRecommender(URM_train)

evaluator_negative_item_sample = EvaluatorNegativeItemSample(
    URM_test, URM_test_negative, cutoff_list=[5, 10])

CMN_wrapper_train.loadModel('result_experiments/SIGIR/CMN_pinterest/',
                            'CMN_RecommenderWrapper_best_model')
d, s = evaluator_negative_item_sample.evaluateRecommender(CMN_wrapper_train)
print('CMN_wrapper_train')
print(s)

user_KNNCF_Recommender.loadModel('result_experiments/SIGIR/CMN_pinterest/',
                                 'UserKNNCFRecommender_cosine_best_model')
d, s = evaluator_negative_item_sample.evaluateRecommender(
Beispiel #24
0
    def __init__(self,
                 URM_train,
                 ICM_train,
                 submission=False,
                 verbose=True,
                 seed=1205):
        super(LinearOverMerged001, self).__init__(URM_train, verbose=verbose)
        self.URM_train = URM_train
        self.ICM_train = ICM_train
        self.__submission = submission
        self.__rec1 = UserKNNCFRecommender(URM_train, verbose=False)
        self.__rec1_params = {
            'topK': 190,
            'shrink': 0,
            'similarity': 'cosine',
            'normalize': True
        }
        self.seed = seed

        icb = ItemKNNCBFRecommender(URM_train, ICM_train, verbose=False)
        icb_params = {
            'topK': 65,
            'shrink': 0,
            'similarity': 'dice',
            'normalize': True
        }
        rp3b = RP3betaRecommender(URM_train, verbose=False)
        rp3b_params = {
            'topK': 1000,
            'alpha': 0.38192761611274967,
            'beta': 0.0,
            'normalize_similarity': False
        }
        sen = SLIMElasticNetRecommender(URM_train, verbose=False)
        sen_params = {
            'topK': 992,
            'l1_ratio': 0.004065081925341167,
            'alpha': 0.003725005053334143
        }

        if not self.__submission:
            try:
                icb.load_model(
                    f'stored_recommenders/seed_{str(self.seed)}_{icb.RECOMMENDER_NAME}/',
                    f'best_for_{self.RECOMMENDER_NAME}')
                print(f"{icb.RECOMMENDER_NAME} loaded.")
            except:
                print(f"Fitting {icb.RECOMMENDER_NAME} ...")
                icb.fit(**icb_params)
                print(f"done.")
                icb.save_model(
                    f'stored_recommenders/seed_{str(self.seed)}_{icb.RECOMMENDER_NAME}/',
                    f'best_for_{self.RECOMMENDER_NAME}')
            try:
                rp3b.load_model(
                    f'stored_recommenders/seed_{str(self.seed)}_{rp3b.RECOMMENDER_NAME}/',
                    f'best_for_{self.RECOMMENDER_NAME}')
                print(f"{rp3b.RECOMMENDER_NAME} loaded.")
            except:
                print(f"Fitting {rp3b.RECOMMENDER_NAME} ...")
                rp3b.fit(**rp3b_params)
                print(f"done.")
                rp3b.save_model(
                    f'stored_recommenders/seed_{str(self.seed)}_{rp3b.RECOMMENDER_NAME}/',
                    f'best_for_{self.RECOMMENDER_NAME}')
            try:
                sen.load_model(
                    f'stored_recommenders/seed_{str(self.seed)}_{sen.RECOMMENDER_NAME}/',
                    f'best_for_{self.RECOMMENDER_NAME}')
                print(f"{sen.RECOMMENDER_NAME} loaded.")
            except:
                print(f"Fitting {sen.RECOMMENDER_NAME} ...")
                sen.fit(**sen_params)
                print(f"done.")
                sen.save_model(
                    f'stored_recommenders/seed_{str(self.seed)}_{sen.RECOMMENDER_NAME}/',
                    f'best_for_{self.RECOMMENDER_NAME}')
        else:
            icb.fit(**icb_params)
            rp3b.fit(**rp3b_params)
            sen.fit(**sen_params)

        self.__rec2 = HiddenMergedRecommender(URM_train,
                                              ICM_train, [icb, rp3b, sen],
                                              verbose=False)
        self.__rec2_params = {
            'alpha': 0.6355738550417837,
            'l1_ratio': 0.6617849709204384,
            'topK': 538
        }

        self.__a = self.__b = None
Beispiel #25
0
    def __init__(self, URM_train, ICM_train, submission=False, verbose=True, seed=1205):
        super(UserWiseHybrid008, self).__init__(URM_train, verbose=verbose)
        recommenders = {
            'rp3b': RP3betaRecommender(URM_train),
            'p3a': P3alphaRecommender(URM_train),
            'sen': SLIMElasticNetRecommender(URM_train),
            'sbpr': SLIM_BPR_Cython(URM_train),
            'icb': ItemKNNCBFRecommender(URM_train,ICM_train),
            'icf': ItemKNNCFRecommender(URM_train),
            'ucf': UserKNNCFRecommender(URM_train)
        }
        #print("Fitting rp3b...")
        #params = {'topK': 1000, 'alpha': 0.38192761611274967, 'beta': 0.0, 'normalize_similarity': False}
        #recommenders['rp3b'].fit(**params)
        #print("done.")
        print("Fitting p3a...")
        params = {'topK': 131, 'alpha': 0.33660811631883863, 'normalize_similarity': False}
        recommenders['p3a'].fit(**params)
        print("done.")
        print("Fitting sen...")
        params = {'topK': 992, 'l1_ratio': 0.004065081925341167, 'alpha': 0.003725005053334143}
        recommenders['sen'].fit(**params)
        print("done.")
        print("Fitting sbpr...")
        params = {'topK': 979, 'epochs': 130, 'symmetric': False, 'sgd_mode': 'adam', 'lambda_i': 0.004947329669424629,
                  'lambda_j': 1.1534760845071758e-05, 'learning_rate': 0.0001}
        recommenders['sbpr'].fit(**params)
        print("done.")
        print("Fitting icb...")
        params = {'topK': 65, 'shrink': 0, 'similarity': 'dice', 'normalize': True}
        recommenders['icb'].fit(**params)
        print("done.")
        print("Fitting icf...")
        params = {'topK': 55, 'shrink': 1000, 'similarity': 'asymmetric', 'normalize': True, 'asymmetric_alpha': 0.0}
        recommenders['icf'].fit(**params)
        print("done.")
        print("Fitting ucf...")
        params = {'topK': 190, 'shrink': 0, 'similarity': 'cosine', 'normalize': True}
        recommenders['ucf'].fit(**params)
        print("done.")

        self.__recommender_segmentation = [
            ((0,6), HiddenRecommender(URM_train, ICM_train, [
                recommenders['p3a'],
                recommenders['ucf'],
                recommenders['icb']
            ], submission=submission, verbose=verbose, seed=seed),
             {'alpha': 0.3987236515679141, 'l1_ratio': 0.15489605895390016}),

            ((6,16), HiddenRecommender(URM_train, ICM_train, [
                recommenders['ucf'],
                recommenders['icb'],
                recommenders['sen']
            ], submission=submission, verbose=verbose, seed=seed),
             {'alpha': 0.33535858857401674, 'l1_ratio': 0.4046400351885727}),

            ((16,-1), HiddenRecommender(URM_train, ICM_train, [
                recommenders['icb'],
                recommenders['sen'],
                recommenders['sbpr']
            ], submission=submission, verbose=verbose, seed=seed),
             {'alpha': 0.7321778261479165, 'l1_ratio': 0.15333729621089734}),
        ]
Beispiel #26
0
 def __init__(self, URM_train, UCM, cold_users, warm_users):
     super(Hybrid002AlphaRecommender, self).__init__(URM_train)
     self.warm_recommender = UserKNNCFRecommender(URM_train)
     self.cold_recommender = UserKNNCBFRecommender(UCM, URM_train)
     self.cold_users = cold_users
     self.warm_users = warm_users
Beispiel #27
0
filename = P3alphaRecommender.RECOMMENDER_NAME \
           + ItemKNNCBFRecommender.RECOMMENDER_NAME \
           + ItemKNNCFRecommender.RECOMMENDER_NAME \
           + "hybrid_opt"

output_root_path += filename
output_file = open(output_root_path, "a")

P3alpha = P3alphaRecommender(URM_train)
P3alpha.fit(topK=100, alpha=0.7905462550621185, implicit=True, normalize_similarity=True)
# print("-------------------")
# print("--P3alpha fitted---")
# print("-------------------")

UserBased = UserKNNCFRecommender(URM_train)
UserBased.fit(topK=300, shrink=200)

ContentBased = ItemKNNCBFRecommender(ICM, URM_train)
ContentBased.fit(topK=50, shrink=100)
# print("-------------------")
# print("--KNNCBF fitted---")
# print("-------------------")
ItemKNNCF = ItemKNNCFRecommender(URM_train)
ItemKNNCF.fit(topK=300, shrink=100)
# print("-------------------")
# print("---KNNCF fitted----")
# print("-------------------")
PureSVD = PureSVDRecommender(URM_train)
PureSVD.fit(num_factors=240)
# print("-------------------")
Beispiel #28
0
    "topK": 466,
    "shrink": 9,
    "similarity": "dice",
    "normalize": False
}

URM_train = sps.csr_matrix(URM_train)
profile_length = np.ediff1d(URM_train.indptr)
block_size = int(len(profile_length) * 0.05)
sorted_users = np.argsort(profile_length)

slim_model = SLIM_BPR_Cython(URM_train, recompile_cython=False)
slim_model.fit(**slim_best_parameters)
rp3_model = RP3betaRecommender(URM_train)
rp3_model.fit(**rp3_best_parameters)
userCF_model = UserKNNCFRecommender(URM_train)
userCF_model.fit(**userKNNCF_best_parameters)

MAP_slim_per_group = []
MAP_rp3_per_group = []
MAP_userCF_per_group = []
cutoff = 10

URM_train = sps.csr_matrix(URM_train)
profile_length = np.ediff1d(URM_train.indptr)
block_size = int(len(profile_length) * 0.05)
sorted_users = np.argsort(profile_length)

for group_id in range(0, 20):
    start_pos = group_id * block_size
    end_pos = min((group_id + 1) * block_size, len(profile_length))
Beispiel #29
0
    def __init__(self, urm_train, eurm=False):
        super(HybridNormOrigRecommender, self).__init__(urm_train)
        self.data_folder = Path(__file__).parent.parent.absolute()
        self.eurm = eurm
        self.num_users = urm_train.shape[0]

        data = DataManager()

        urm_train = check_matrix(urm_train.copy(), 'csr')
        icm_price, icm_asset, icm_sub, icm_all = data.get_icm()
        ucm_age, ucm_region, ucm_all = data.get_ucm()

        recommender_1 = ItemKNNCBFRecommender(urm_train, icm_all)
        recommender_1.fit(shrink=40, topK=20, feature_weighting='BM25')

        recommender_7 = UserKNNCBFRecommender(urm_train, ucm_all)
        recommender_7.fit(shrink=1777,
                          topK=1998,
                          similarity='tversky',
                          feature_weighting='BM25',
                          tversky_alpha=0.1604953616,
                          tversky_beta=0.9862348646)

        # recommender_1 = HybridGenRecommender(urm_train, eurm=self.eurm)
        # recommender_1.fit()

        # recommender_2 = ItemKNNCFRecommender(urm_train)
        # recommender_2.fit(shrink=30, topK=20)

        recommender_2 = ItemKNNCFRecommender(urm_train)
        recommender_2.fit(topK=5,
                          shrink=500,
                          feature_weighting='BM25',
                          similarity='tversky',
                          normalize=False,
                          tversky_alpha=0.0,
                          tversky_beta=1.0)

        recommender_3 = UserKNNCFRecommender(urm_train)
        recommender_3.fit(shrink=2, topK=600, normalize=True)
        # recommender_3 = UserKNNCFRecommender(urm_train)
        # recommender_3.fit(topK=697, shrink=1000, feature_weighting='TF-IDF', similarity='tversky', normalize=False,
        #                   tversky_alpha=1.0, tversky_beta=1.0)

        recommender_4 = RP3betaRecommender(urm_train)
        recommender_4.fit(topK=16,
                          alpha=0.03374950051351756,
                          beta=0.24087176329409027,
                          normalize_similarity=False)

        recommender_5 = SLIM_BPR_Cython(urm_train)
        recommender_5.fit(lambda_i=0.0926694015,
                          lambda_j=0.001697250,
                          learning_rate=0.002391,
                          epochs=65,
                          topK=200)

        recommender_6 = ALSRecommender(urm_train)
        recommender_6.fit(alpha=5, iterations=40, reg=0.3)

        self.recommender_1 = recommender_1
        self.recommender_2 = recommender_2
        self.recommender_3 = recommender_3
        self.recommender_4 = recommender_4
        self.recommender_5 = recommender_5
        self.recommender_6 = recommender_6
        self.recommender_7 = recommender_7

        if self.eurm:

            if Path(self.data_folder / 'Data/uicm_orig_sparse.npz').is_file():
                print("Previous uicm_sparse found")
                self.score_matrix_1 = sps.load_npz(self.data_folder /
                                                   'Data/uicm_sparse.npz')
            else:
                print("uicm_sparse not found, create new one...")
                self.score_matrix_1 = self.recommender_1._compute_item_matrix_score(
                    np.arange(self.num_users))
                sps.save_npz(self.data_folder / 'Data/uicm_orig_sparse.npz',
                             self.score_matrix_1)

            self.score_matrix_2 = self.recommender_2._compute_item_matrix_score(
                np.arange(self.num_users))
            self.score_matrix_3 = self.recommender_3._compute_item_matrix_score(
                np.arange(self.num_users))
            self.score_matrix_4 = self.recommender_4._compute_item_matrix_score(
                np.arange(self.num_users))
            self.score_matrix_5 = self.recommender_5._compute_item_matrix_score(
                np.arange(self.num_users))
            self.score_matrix_6 = self.recommender_6._compute_item_score(
                np.arange(self.num_users))

            self.score_matrix_1 = normalize(self.score_matrix_2,
                                            norm='max',
                                            axis=1)
            self.score_matrix_2 = normalize(self.score_matrix_2,
                                            norm='max',
                                            axis=1)
            self.score_matrix_3 = normalize(self.score_matrix_3,
                                            norm='max',
                                            axis=1)
            self.score_matrix_4 = normalize(self.score_matrix_4,
                                            norm='max',
                                            axis=1)
            self.score_matrix_5 = normalize(self.score_matrix_5,
                                            norm='max',
                                            axis=1)
            self.score_matrix_6 = normalize(self.score_matrix_6,
                                            norm='max',
                                            axis=1)
# recommenderELASTIC.save_model('model/', file_name='SLIM_ElasticNet_max')
recommenderELASTIC.load_model('model/',
                              file_name='SLIM_ElasticNet_l1ratio_0_5')

# recommenderAlphaGRAPH = P3alphaRecommender(URM_all)
# recommenderAlphaGRAPH.fit(topK=10, alpha=0.41, implicit=True, normalize_similarity=True)

recommenderBetaGRAPH = RP3betaRecommender(URM_all)
recommenderBetaGRAPH.fit(topK=54,
                         implicit=True,
                         normalize_similarity=True,
                         alpha=1e-6,
                         beta=0.2,
                         min_rating=0)

recommenderUserKNN = UserKNNCFRecommender(URM_all)
recommenderUserKNN.fit(topK=550,
                       shrink=0,
                       similarity='jaccard',
                       normalize=True)

recommenderIALS = IALSRecommender(URM_all)
# recommenderIALS.fit(epochs=200, alpha=1, reg=1e-4)
# recommenderIALS.save_model('model/', file_name='IALS_1_200_max')
recommenderIALS.load_model('model/', file_name='IALS_1_200_max')

recommenderUserCBF = UserKNNCBFRecommender(URM_all, UCM_all)
recommenderUserCBF.fit(topK=3000,
                       shrink=10,
                       similarity='cosine',
                       normalize=True)