def __init__(self, data: DataObject):
     super(Hybrid100AlphaRecommender, self).__init__(data.urm_train)
     self.data = data
     self.rec1 = UserKNNCBFRecommender(data.ucm_all, data.urm_train)
     self.rec2 = MatrixFactorization_AsySVD_Cython(data.urm_train)
     self.random_seed = data.random_seed
     try:
         self.rec1.load_model(
             "stored_recommenders/user_cbf/",
             f"{self.random_seed}_topK=5000_shrink=5_feature_weighting=TF-IDF_similarity=euclidean"
         )
     except:
         self.rec1.fit(topK=5000,
                       shrink=5,
                       feature_weighting="TF-IDF",
                       similarity="euclidean")
         self.rec1.save_model(
             "stored_recommenders/user_cbf/",
             f"{self.random_seed}_topK=5000_shrink=5_feature_weighting=TF-IDF_similarity=euclidean"
         )
     self.rec2.load_model("Hybrid", "AsySVD")
     cold = data.ids_cold_user
     train_cold = data.urm_train_users_by_type[0][1]
     # if train_cold.shape[0] > 0:
     #     target_users = np.append(cold, train_cold)
     # else:
     #     target_users = cold
     target_users = data.ids_user
     self.hybrid_rec = Hybrid1CXAlphaRecommender(
         data,
         recommenders=[self.rec1, self.rec2],
         recommended_users=target_users,
         max_cutoff=30)
    def __init__(self,
                 data: DataObject,
                 k: int,
                 leave_k_out=0,
                 threshold=0,
                 probability=0.2):
        super(Hybrid400AlphaRecommender, self).__init__(data.urm_train)
        self.data = data
        self.max_cutoff = 30

        rec = ItemKNNCBFRecommender(data.urm_train, data.icm_all_augmented)
        rec.fit(topK=10)

        print(f"Not augmented {data.augmented_urm.nnz}")

        data.augmented_urm = augment_with_item_similarity_best_scores(
            data.augmented_urm,
            rec.W_sparse,
            500,
            value=0.3,
            remove_seen=False)
        # print(f"After User CBF {data.augmented_urm.nnz}")
        # data.augmented_urm = augment_with_best_recommended_items(data.augmented_urm, rec,
        #                                                          data.urm_train_users_by_type[1][1], 1, value=0.2)
        #
        print(f"After Item CBF {data.augmented_urm.nnz}")

        rec = Hybrid100AlphaRecommender(data)
        rec.fit()

        data.augmented_urm = augment_with_best_recommended_items(
            data.augmented_urm,
            rec,
            data.urm_train_users_by_type[0][1],
            1,
            value=1)
        # data.augmented_urm = augment_with_best_recommended_items(data.augmented_urm, rec,
        #                                                          data.ids_warm_train_users, 2, value=0.1)
        print(f"After User CBF {data.augmented_urm.nnz}")
        # print(f"After User CBF {data.augmented_urm.nnz}")

        rec = None

        recs = Parallel(n_jobs=6)(delayed(
            par(data,
                leave_k_out=leave_k_out,
                threshold=threshold,
                probability=probability).split_and_fit)(i) for i in range(k))
        self.hybrid_rec = Hybrid1CXAlphaRecommender(
            data,
            recommenders=recs,
            recommended_users=data.ids_user,
            max_cutoff=self.max_cutoff)
        self.hybrid_rec.weights = np.array([
            np.sqrt(np.array(fib(30)[::-1])).astype(np.int).tolist()
            for _ in range(k)
        ])
 def __init__(self, data: DataObject):
     super(Hybrid109AlphaRecommender, self).__init__(data.urm_train)
     self.data = data
     self.rec1 = RP3betaRecommender(data.urm_train)
     self.rec2 = ItemKNNCFRecommender(data.urm_train)
     self.rec1.fit(topK=20, alpha=0.11, beta=0.18)
     self.rec2.fit(topK=18, shrink=850, similarity='jaccard', feature_weighting='BM25')
     cold = data.ids_cold_user
     self.target_users = data.urm_train_users_by_type[10][1]
     self.hybrid_rec = Hybrid1CXAlphaRecommender(data, recommenders=[self.rec1, self.rec2],
                                                 recommended_users=self.target_users, max_cutoff=30)
Exemplo n.º 4
0
 def __init__(self, data: DataObject):
     super(Hybrid104AlphaRecommender, self).__init__(data.urm_train)
     self.data = data
     self.rec1 = RP3betaRecommender(data.urm_train)
     self.rec1.fit(topK=26, alpha=0.25, beta=0.21)
     self.rec2 = ItemKNNCFRecommender(data.urm_train)
     self.rec2.fit(topK=10, shrink=1000, similarity="tanimoto", feature_weighting="BM25")
     target_users = data.urm_train_users_by_type[4][1]
     self.target_users = target_users
     self.hybrid_rec = Hybrid1CXAlphaRecommender(data, recommenders=[self.rec1, self.rec2],
                                                 recommended_users=target_users, max_cutoff=30)
 def __init__(self, data: DataObject, k: int, leave_k_out=0, threshold=0, probability=0.2):
     super(Hybrid500AlphaRecommender, self).__init__(data.urm_train)
     self.data = data
     self.max_cutoff = 30
     recs = Parallel(n_jobs=16)(
         delayed(par(data,
                     leave_k_out=leave_k_out,
                     threshold=threshold,
                     probability=probability).split_and_fit)
         (i)
         for i in range(k))
     self.hybrid_rec = Hybrid1CXAlphaRecommender(data, recommenders=recs,
                                                 recommended_users=data.ids_user, max_cutoff=self.max_cutoff)
Exemplo n.º 6
0
 def __init__(self, data: DataObject):
     super(Hybrid102AlphaRecommender, self).__init__(data.urm_train)
     self.data = data
     self.rec1 = UserKNNCFRecommender(data.urm_train)
     self.rec1.fit(topK=1000, shrink=4500, similarity="cosine", feature_weighting="TF-IDF")
     self.rec2 = ItemKNNCFRecommender(data.urm_train)
     self.rec2.fit(topK=2000, shrink=800, similarity="cosine", feature_weighting="TF-IDF")
     self.rec3 = SLIM_BPR_Cython(data.urm_train)
     self.rec3.fit(epochs=120, topK=800, lambda_i=0.1, lambda_j=0.1, learning_rate=0.0001)
     self.rec4 = RP3betaRecommender(data.urm_train)
     self.rec4.fit(topK=30, alpha=0.21, beta=0.25)
     target_users = data.urm_train_users_by_type[2][1]
     self.target_users = target_users
     self.hybrid_rec = Hybrid1CXAlphaRecommender(data, recommenders=[self.rec1, self.rec2, self.rec3],
                                                 recommended_users=target_users, max_cutoff=30)
Exemplo n.º 7
0
 def __init__(self, data: DataObject):
     super(Hybrid100AlphaRecommender, self).__init__(data.urm_train)
     self.data = data
     self.rec1 = UserKNNCBFRecommender(data.ucm_all, data.urm_train)
     self.rec2 = TopPop(data.urm_train)
     self.rec1.fit(topK=5000,
                   shrink=5,
                   feature_weighting="TF-IDF",
                   similarity="euclidean")
     self.rec2.fit()
     cold = data.ids_cold_user
     train_cold = data.urm_train_users_by_type[0][1]
     if train_cold.shape[0] > 0:
         target_users = np.append(cold, train_cold)
     else:
         target_users = cold
     self.target_users = target_users
     self.hybrid_rec = Hybrid1CXAlphaRecommender(
         data,
         recommenders=[self.rec1, self.rec2],
         recommended_users=target_users,
         max_cutoff=20)
                 feature_weighting="TF-IDF",
                 similarity="cosine")
        base_recommenders.append([rec1, rec2, rec3, rec4, rec5])

        # TODO: Edit here
        # Insert the type of users to test the recommenders
        n, t_users, description = datas[j].urm_train_users_by_type[1]

        tested_users.append(t_users)

        # TODO: Edit here
        # This code auto assign the weights
        # It is possible to manually assign the weights
        # For example, we can assign the weights of a previously tuned recommender
        cached_hybrid_rec = Hybrid1CXAlphaRecommender(datas[j],
                                                      base_recommenders[j],
                                                      tested_users[j],
                                                      max_cutoff=max_cutoff)
        # Edit here the weight if necessary
        # The size of the weights matrix must be bigger of the cutoff
        cached_hybrid_rec.weights = np.array([
            [
                1.0, 0.7803233193098372, 0.4042650517804784,
                0.26444366709538053, 0.2752283977870088, 0.23751321242187357,
                0.24058696112550104, 0.23590434442820032, 0.1992990685621503,
                0.1878790301115923, 0.16690930495160153, 0.15325757938895435,
                0.1655181857400707, 0.16904924923310471, 0.13097329147255055,
                0.12288596504305309, 0.11857054225592471, 0.1219683401906944,
                0.12310761424074275, 0.12706253865684247, 0.10616907384825704,
                0.11435056385016795, 0.10762546479680782, 0.10853202794520854,
                0.10581279995920735, 0.030542092731989226, 0.02815866621379197,
                0.029606210545982038, 0.030081711307335356,