Exemple #1
0
 def evaluate_eurm(self, target_pids):
         assert(self.mode=='offline')
         eurm = sps.csr_matrix(self.eurm[target_pids])
         eurm = post.eurm_remove_seed(eurm, self.dr)
         rec_list = post.eurm_to_recommendation_list(eurm)
         res = self.ev.evaluate(rec_list, str(self) , verbose=self.verbose_ev, return_result='all')
         return res
Exemple #2
0
def recsys(shrink):
    t1 = 0.25
    t2 = 0.65
    c = 0.4
    shrink = 50
    k = 200
    config = ('l=%.2f t1=%.2f t2=%.2f c=%.2f k=%d shrink=%d binary=False' %
              (l, t1, t2, c, k, shrink))
    #print(config)
    sim = ss.s_plus_similarity(urm.T,
                               urm,
                               k=k,
                               t1=t1,
                               t2=t2,
                               c=c,
                               l=l,
                               normalization=True,
                               shrink=shrink,
                               binary=False,
                               verbose=True)
    #Computing ratings and remove seed
    eurm = ss.dot_product(t_urm, sim.T, k=750)
    del sim
    eurm = eurm_remove_seed(eurm, dr)
    #evaluation
    res = ev.evaluate(eurm_to_recommendation_list(eurm), 'ciao', verbose=False)
    del eurm
    return res[0:3], config
Exemple #3
0
 def fast_evaluate_eurm(self,
                        eurm,
                        target_pids=None,
                        name='fast_evaluation',
                        verbose=True,
                        do_plot=False,
                        show_plot=False,
                        save=False,
                        return_result='all'):
     """
     Directly evaluate a eurm of shape (10K, 2.2M) removing seed tracks and converting it
     into a recommendation list.
     """
     if target_pids is None:
         target_pids = self.datareader.get_test_pids()
     eurm = sparse.csr_matrix(eurm[target_pids])
     #eurm = post.eurm_remove_seed(eurm,  datareader=self.datareader) no needed, now it's done in the next function
     rec_list = post.eurm_to_recommendation_list(eurm,
                                                 verbose=False,
                                                 datareader=self.datareader,
                                                 remove_seed=True)
     result = self.evaluate(rec_list,
                            name,
                            verbose=verbose,
                            return_result=return_result,
                            do_plot=do_plot,
                            show_plot=show_plot,
                            save=save)
     return result
Exemple #4
0
def recsys(alpha, beta):
    alpha = alpha
    beta = beta
    k = 200
    shrink = 100
    config = ('alpha=%.2f beta=%.2f k=%d shrink=%d binary=False' %
              (alpha, beta, k, shrink))
    #print(config)
    sim = p3r3.p3alpha_rp3beta_similarity(p_iu,
                                          p_ui,
                                          pop,
                                          k=k,
                                          shrink=shrink,
                                          alpha=alpha,
                                          beta=beta,
                                          verbose=True,
                                          mode=1)
    #Computing ratings and remove seed
    eurm = ss.dot_product(t_urm, sim, k=750)
    del sim
    eurm = eurm_remove_seed(eurm, dr)
    #evaluation
    res = ev.evaluate(eurm_to_recommendation_list(eurm), 'ciao', verbose=False)
    del eurm
    return res[0:3], config
Exemple #5
0
    def fitnessFunction(self, individual):

        # Convert list into a numpy array
        individual = np.array(individual)

        # Make a copy of the UCM and filter it for each column
        if self.verbose:
            print('Filtering UCM...')
        start = time.time()
        UCM_filtered = self.UCM.copy()
        UCM_filtered = UCM_filtered.astype(np.float64)
        inplace_csr_column_scale(UCM_filtered, individual)
        if self.verbose:
            print('UCM filtered in', time.time() - start, 'sec')

        # Compute similarity
        if self.verbose:
            print('Computing similarity...')
        start = time.time()
        similarity = tversky_similarity(UCM_filtered, shrink=200, alpha=0.1,
                                        beta=1, target_items=self.test_playlists_indices,
                                        binary=False)
        similarity = similarity.tocsr()
        if self.verbose:
            print('Similarity computed in', time.time() - start, 'sec')

        # Compute eurm
        if self.verbose:
            print('Computing eurm...')
        start = time.time()
        eurm = dot_product(similarity, self.URM_train, k=500)
        if self.verbose:
            print('eurm computed in', time.time() - start, 'sec')
            print('Converting eurm in csr...')
        start = time.time()
        eurm = eurm.tocsr()
        eurm = eurm[self.test_playlists_indices, :]
        if self.verbose:
            print('eurm converted in', time.time() - start, 'sec')

        # Evaluate
        rec_list = eurm_to_recommendation_list(eurm)
        print('current', self.current)

        score_cat_1 = self.evaluator.evaluate_single_metric(rec_list, name='Genetic', metric='prec',
                                                            level='track', cat=1, verbose=False)
        score_cat_2 = self.evaluator.evaluate_single_metric(rec_list, name='Genetic', metric='prec',
                                                            level='track', cat=2, verbose=False)
        score = (score_cat_1 + score_cat_2) / 2

        self.current += 1

        if self.verbose:
            print(score)

        print("Numfeatures {}".format(np.sum(individual)))
        print('\n')

        return score,
Exemple #6
0
def evaluate_shrinked(W_sparse, urm_shrinked,  pids_shrinked ):

    W_sparse = W_sparse[pids_shrinked]

    eurm = dot_product(W_sparse, urm_shrinked, k=750).tocsr()

    eurm = eurm_remove_seed(eurm=eurm)

    rec_list = eurm_to_recommendation_list(eurm)


    ev.evaluate(recommendation_list=rec_list,
                name="slim_structure_parametribase_BPR_epoca_0_noepoche",
                return_overall_mean=False,
                show_plot=False, do_plot=True)
Exemple #7
0
def recsys(shrink):
    config = ('alpha=0.4 k=200 shrink=%d binary=False' % (shrink))
    print(config)
    sim = ss.cosine_similarity(urm.T,
                               urm,
                               k=200,
                               alpha=0.4,
                               shrink=shrink,
                               binary=False,
                               verbose=True)
    #Computing ratings and remove seed
    eurm = ss.dot_product(t_urm, sim.T, k=750)
    del sim
    eurm = eurm_remove_seed(eurm, dr)
    #evaluation
    res = ev.evaluate(eurm_to_recommendation_list(eurm), 'ciao', verbose=False)
    del eurm
    return res[0:3], config
    def obiettivo(self, x):

        eurm = sum(x[i] * matrix
                   for i, matrix in enumerate(self.matrices_array))

        # real objective function
        ris = -self.ev.evaluate_single_metric(eurm_to_recommendation_list(
            eurm, cat=self.cat, remove_seed=False, verbose=False),
                                              verbose=False,
                                              cat=self.cat,
                                              name="ens" + str(self.cat),
                                              metric=self.target_metric,
                                              level='track')
        # memory variables
        if self.x0 is None:
            self.x0 = [[x]]
            self.y0 = [ris]
        else:
            self.x0.append(x)
            self.y0.append(ris)

        self.global_counter += 1
        if ris < self.best_score:
            print("[NEW BEST]")
            self.pretty_print(ris, x)
            self.best_score = ris
            self.best_params = x.copy()
            self.best_params_dict = dict(zip(self.matrices_names, x.copy()))
            b = list()
            if os.path.isfile("best/cat" + str(self.cat) + ".plk"):
                b.append(self.best_params_dict)
                b.append(ris)
                save_obj(b, "best/cat" + str(self.cat))
            else:
                b.append(self.best_params_dict)
                b.append(ris)
                save_obj(b, "best/cat" + str(self.cat))
        elif self.verbose:
            self.pretty_print(ris, x)

        return ris
def recsys(shrink):
    alpha = 0.25
    beta = 0.65
    k = 200
    config = ('alpha=%.2f beta=%.2f k=%d shrink=%d binary=False' %
              (alpha, beta, k, shrink))
    #print(config)
    sim = ss.tversky_similarity(urm.T,
                                urm,
                                k=k,
                                alpha=alpha,
                                beta=beta,
                                shrink=shrink,
                                binary=False,
                                verbose=True)
    #Computing ratings and remove seed
    eurm = ss.dot_product(t_urm, sim.T, k=750)
    del sim
    eurm = eurm_remove_seed(eurm, dr)
    #evaluation
    res = ev.evaluate(eurm_to_recommendation_list(eurm), 'ciao', verbose=False)
    del eurm
    return res[0:3], config
    rprec = []
    for i in range(0, 10):
        print("[ Ensembling cat", i + 1, "]")
        rprec.append(ensembler(matrix[i], w[i], normalization_type="max"))
    res = sps.vstack(rprec).tocsr()

    import time
    name = "ensemble-" + mode + "-data-" + time.strftime(
        "%x") + "-" + time.strftime("%X")
    name = name.replace("/", "_")
    sps.save_npz("results/" + name + ".npz", res)

    print("[ Initizalizing Datereader ]")
    dr = Datareader(verbose=False, mode=mode, only_load="False")

    res = eurm_to_recommendation_list(res, datareader=dr)

    if mode == "offline":
        print("[ Initizalizing Evaluator ]")
        ev = Evaluator(dr)
        ev.evaluate(res, name="ens")

    if mode == "online":
        print("[ Initizalizing Submitter ]")
        sb = Submitter(dr)
        sb.submit(recommendation_list=res,
                  name=name,
                  track="main",
                  verify=True,
                  gzipped=False)
matrix = [a, b, c, d, e, f, g]

a = float(sys.argv[1])
b = float(sys.argv[2])
c = float(sys.argv[3])
d = float(sys.argv[4])
e = float(sys.argv[5])
f = float(sys.argv[6])
g = float(sys.argv[7])

res = ensembler(matrix, [a, b, c, d, e, f, g], normalization_type="max")

ev = Evaluator(dr)
ret = [
    -ev.evaluate_single_metric(eurm_to_recommendation_list(res, cat=cat),
                               cat=cat,
                               name="ens" + str(cat),
                               metric='prec',
                               level='track')
]

if os.path.isfile("best.npy"):
    best = np.load("best.npy")
    if ret[0] < best[-1].astype(np.float):
        b = sys.argv[1:]
        b.append(ret[0])
        np.save("best", b)
else:
    b = sys.argv[1:]
    b.append(ret[0])
# rp3b = sps.load_npz(ROOT_DIR + "/data/sub/EURM-rp3beta-online.npz")
# knn_c_i_al = sps.load_npz(ROOT_DIR + "/data/sub/KNN CONTENT ITEM-album-top_k=850-sm_type=cosine-shrink=100.npz")
# knn_c_i_ar = sps.load_npz(ROOT_DIR + "/data/sub/KNN CONTENT ITEM-artist-top_k=850-sm_type=cosine-shrink=100.npz")
nlp = sps.load_npz(ROOT_DIR + "/data/eurm_nlp_offline.npz")
# cf_u = sps.load_npz(ROOT_DIR + "/data/sub/eurm_cfu_online.npz")

eurm_ens = sps.load_npz(ROOT_DIR + "/data/ENSEMBLED.npz")

#matrix = [rp3b, knn_c_i_ar, knn_c_i_al, nlp, cf_u]

#eurm_ens = ensembler(matrix, [0.720, 0.113, 0.177, 0.194, 1.0], normalization_type="max")

# HOLEBOOST
hb = HoleBoost(similarity=sim, eurm=eurm_ens, datareader=dr, norm=norm_l1_row)
eurm_ens = hb.boost_eurm(categories=[8, 10], k=300, gamma=5)

# NINEBOOST
nb = TailBoost(similarity=sim, eurm=eurm_ens, datareader=dr, norm=norm_l2_row)
eurm_ens = nb.boost_eurm(last_tracks=10, k=100, gamma=0.01)

rec_list = eurm_to_recommendation_list(eurm_ens)
rec_list_nlp = eurm_to_recommendation_list(nlp)

indices = dr.get_test_pids_indices(cat=1)
for i in indices:
    rec_list[i] = rec_list_nlp[i]

# EVALUATION
ev.evaluate(rec_list, name='ens_with_cfu_nineboosted', show_plot=False)
        print(arg)
        best = list(arg[1:].astype(np.float))
        w.append(best)

    for i in tqdm(range(1,11)):
        if mode == "offline":

            CBF_ALBUM = sps.load_npz(mode+"/offline-cbf_item_album-cat"+str(i)+".npz")
            CBF_ARTISTA = sps.load_npz(mode+"/offline-cbf_item_artist-cat"+str(i)+".npz")
            NLP = norm_max_row(sps.load_npz(mode + "/nlp_eurm_offline_bm25-cat" + str(1) + ".npz"))
            RP3BETA = sps.load_npz(mode+"/offline-rp3beta-cat"+str(i)+".npz")
            CF_USER = sps.load_npz(mode + "/cfu_eurm-cat"+str(i)+".npz")
            SLIM = sps.load_npz(mode +"/slim_bpr_completo_test1-cat"+str(i)+".npz")
            CBF_USER_ARTIST = sps.load_npz(mode +"/eurm_cbfu_artists_offline-cat"+str(i)+".npz")


        matrix = [CBF_ALBUM, CBF_ARTISTA, NLP, RP3BETA, CF_USER, SLIM, CBF_USER_ARTIST]

        we = w[i-1]

        res.append(ensembler(matrix, we, normalization_type="lele"))

    ret = sps.vstack(res).tocsr()
    if mode == "offline":
        ev.evaluate(eurm_to_recommendation_list(ret), "best_test", verbose=True)

#    sps.save_npz("ensemble_per_cat_"+mode+"_new_data_28_maggio.npz", ret)
    if mode == "online":
        sb = Submitter(dr)
        sb.submit(recommendation_list=eurm_to_recommendation_list_submission(ret), name="best_test", track="main", verify=True, gzipped=False)
"""
simple code to evaluate a recommender, you need a sparse matrix of shape (10k,2,2kk)
"""

from utils.datareader import Datareader
from utils.evaluator import Evaluator
import utils.post_processing as post
import scipy.sparse as sps

filename = "file.npz"
output_name = "matrix_factorization"

if __name__ == '__main__':

    dr = Datareader(mode="offline", only_load=True)
    ev = Evaluator(dr)
    pids = dr.get_test_playlists().transpose()[0]
    algorithm_eurm_full = sps.load_npz(filename)
    algorithm_eurm_small = algorithm_eurm_full[pids]

    ev.evaluate(post.eurm_to_recommendation_list(algorithm_eurm_small),
                name=output_name,
                verbose=True,
                show_plot=True,
                save=True)
                                 k=knn,
                                 verbose=1,
                                 binary=False)
        sim = sim.tocsr()

        # Prediction
        eurm = dot_product(sim, urm, k=topk)
        eurm = eurm.tocsr()
        eurm = eurm[test_pids, :]

        # Save eurm
        if save_eurm:
            sps.save_npz('eurm_' + name + '_' + mode + '.npz', eurm)

        # Evaluation
        ev.evaluate(recommendation_list=eurm_to_recommendation_list(
            eurm, datareader=dr),
                    name=complete_name)

    elif mode == "online":
        # Initialization
        dr = Datareader(verbose=False, mode=mode, only_load=True)
        test_pids = list(dr.get_test_pids())
        sb = Submitter(dr)
        urm = dr.get_urm()

        # UCM
        ucm_artists = dr.get_ucm_albums()
        ucm_artists = bm25_row(ucm_artists)

        # Do not train on challenge set
        ucm_artists_T = ucm_artists.copy()
from utils.post_processing import eurm_to_recommendation_list, eurm_remove_seed
from personal.Ervin.Word2Vec_recommender import W2VRecommender
from personal.Ervin.ItemRank import ItemRank
from personal.Ervin.tf_collaborative_user import TF_collaborative_user
from recommenders.knn_collaborative_item import Knn_collaborative_item


if __name__ == '__main__':
    dr = Datareader(only_load=True, mode='offline', test_num='1', verbose=False)
    pid = dr.get_test_playlists().transpose()[0]
    urm = dr.get_urm()
    urm.data = np.ones(len(urm.data))
    ev = Evaluator(dr)

    TFRec = Knn_collaborative_item()
    W2V = W2VRecommender()
    TFRec.fit(urm, pid)
    W2V.fit(urm, pid)

    TFRec.compute_model(verbose=True, top_k=850)
    TFRec.compute_rating(top_k=750, verbose=True, small=True)
    W2V.compute_model(verbose=True, size=50, window=None)
    W2V.compute_rating(verbose=True, small=True, top_k=750)
    TFRec.eurm = norm_l1_row(eurm_remove_seed(TFRec.eurm, dr))
    W2V.eurm = norm_l1_row(eurm_remove_seed(W2V.eurm, dr))

    for alpha in np.arange(0.9, 0, -0.1):
        print('[ Alpha = {:.1f}]'.format(alpha))
        eurm = alpha * TFRec.eurm + (1-alpha)*W2V.eurm
        ev.evaluate(recommendation_list=eurm_to_recommendation_list(eurm, remove_seed=False, datareader=dr),
                name="KNNItem_W2V"+str(alpha), old_mode=False, save=True)
Exemple #17
0
        #Computing similarity/model
        rec.compute_model(top_k=knn,
                          sm_type=tversky_similarity,
                          shrink=200,
                          alpha=0.1,
                          beta=1,
                          binary=True,
                          verbose=True)

        #Computing ratings
        rec.compute_rating(top_k=topk, verbose=True, small=True)

        #evaluation and saving
        sps.save_npz(complete_name + ".npz", rec.eurm)
        ev = Evaluator(dr)
        ev.evaluate(eurm_to_recommendation_list(rec.eurm), name=complete_name)

    elif mode == "online":
        """Submission"""
        #Data initialization
        dr = Datareader(verbose=True, mode=mode, only_load=False)

        #Recommender algorithm initialization
        rec = Knn_collabrative_user()

        #Getting for the recommender algorithm
        urm = dr.get_urm()
        pid = dr.get_test_pids()

        #Fitting data
        rec.fit(urm, pid)
        warnings.warn(
            'This function still use the old version of the remove seed, it should be replaced soon by the one in post_processing class'
        )

        self.urm = sps.csr_matrix(self.urm[self.pid])
        tmp = self.urm.tocoo()
        row = tmp.row
        col = tmp.col
        min = self.eurm.tocoo().min()
        self.eurm = sps.lil_matrix(self.eurm)
        self.eurm[row, col] = -1
        self.eurm = sps.csr_matrix(self.eurm)

        return self.eurm


if __name__ == '__main__':
    from utils.datareader import Datareader
    dr = Datareader(verbose=False, mode="offline", only_load="False")

    rec = Top_pop()
    rec.fit(dr.get_urm(), dr.get_test_playlists().transpose()[0])
    eurm = rec.compute_rating().tocsr()
    sps.save_npz("top_pop online.npz", eurm.tocsr())
    exit()
    import utils.evaluator as ev
    from utils.post_processing import eurm_to_recommendation_list
    eva = ev.Evaluator(dr)

    eva.evaluate(eurm_to_recommendation_list(eurm), "cacca TOPTOP")
        # INJECTING URM POS with only last 25 songs
        rec.urm = dr.get_last_n_songs_urm(n=cut)

        #Computing ratings
        rec.compute_rating(top_k=topk, verbose=True, small=True)
        lastsongs_eurm = rec.eurm.copy()

        sps.save_npz(complete_name + "_LAST.npz", rec.eurm)

        eurm = norm_max_row(normal_eurm) + norm_max_row(lastsongs_eurm)

        sps.save_npz(complete_name + ".npz", rec.eurm)

        #evaluation
        ev = Evaluator(dr)
        ev.evaluate(eurm_to_recommendation_list(rec.eurm), name, verbose=True)

#TODO
    if mode == "online":

        ### Submission ###

        #Data initialization
        dr = Datareader(verbose=True, mode='online', only_load=True)

        #Recommender algorithm initialization
        rec = R_p_3_beta()

        #Submitter initialization
        sb = Submitter(dr)
        # Compute similarity (playlists x playlists)
        sim = tversky_similarity(ucm,
                                 ucm.T,
                                 k=knn,
                                 shrink=0,
                                 alpha=1,
                                 beta=0.1)
        sim = sim.tocsr()

        # Recommendation
        eurm = dot_product(sim, urm, k=topk)
        eurm = eurm.tocsr()
        eurm = eurm[test_pids, :]

        rec_list = eurm_to_recommendation_list(eurm, dr)

        if save_eurm:
            sps.save_npz(mode + "_" + name + ".npz", eurm, compressed=False)

        # Submission
        ev = Evaluator(dr)
        ev.evaluate(rec_list, name=name)

    elif mode == 'online':
        # Setup
        sb = Submitter(dr)
        urm = dr.get_urm()
        test_pids = dr.get_test_pids()

        # Init object
Exemple #21
0
matrix = [a, b, c, d, e, f, g, h]

a = float(sys.argv[1])
b = float(sys.argv[2])
c = float(sys.argv[3])
d = float(sys.argv[4])
e = float(sys.argv[5])
f = float(sys.argv[6])
g = float(sys.argv[7])
h = float(sys.argv[8])


res = ensembler(matrix, [a, b, c, d, e, f, g, h], normalization_type="max")

ev = Evaluator(dr)
ret = [-ev.evaluate_single_metric(eurm_to_recommendation_list(res, cat=cat), cat=cat, name="ens"+str(cat), metric='prec', level='track')]


if os.path.isfile("best.npy"):
    best = np.load("best.npy")
    if ret[0] < best[-1].astype(np.float):
        b = sys.argv[1:]
        b.append(ret[0])
        np.save("best", b)
else:
    b = sys.argv[1:]
    b.append(ret[0])
    np.save("best", b)

np.save("ret", ret)
eurm_knn_album = sps.load_npz(
    ROOT_DIR +
    "/data/offline/ENSEMBLE - KNN CONTENT ITEM - album - top_k = 100 - sm_type = cosine - shrink = 100.npz"
)
eurm_knn_artist = sps.load_npz(
    ROOT_DIR +
    "/data/offline/ENSEMBLE - KNN CONTENT ITEM - artist - top_k = 100 - sm_type = cosine - shrink = 100.npz"
)
eurm_rp3 = sps.load_npz(
    ROOT_DIR +
    "/data/offline/ENSEMBLE - RP3BETA - top_k=100 - shrink=100 - alpha=0.5 - beta=0.4.npz"
)
eurm_nlp = sps.load_npz(ROOT_DIR + "/data/eurm_nlp_offline.npz")

# Convert in rec_list
rec_list_rp3 = eurm_to_recommendation_list(eurm_rp3)
rec_list_knn_album = eurm_to_recommendation_list(eurm_knn_album)
rec_list_knn_artist = eurm_to_recommendation_list(eurm_knn_artist)
rec_list_nlp = eurm_to_recommendation_list(eurm_nlp)

# Round Robin
RR = RoundRobin(
    [rec_list_rp3, rec_list_knn_album, rec_list_knn_artist, rec_list_nlp],
    weights=None)
rec_list_rr = rec_list_rp3

for k in [20, 50, 70, 90, 100, 120, 150, 180, 200, 230]:
    for i in tqdm(range(1000, len(rec_list_rp3)), desc='Round Robin ' + k):
        prediction = RR.rr_avg(playlist_index=i, rec_index=0, cut_off=k, K=k)
        #prediction = RR.rr_jmp(playlist_index=i, K=k)
Exemple #23
0
                                  URM_validation=None)

    cfw.fit()

    weights = sps.diags(cfw.D_best)

    sps.save_npz("ICM_fw_maurizio", weights)

    ICM_weighted = ICM.dot(weights)

    sps.save_npz("ICM_fw_maurizio", ICM_weighted)

    ######## NOI
    urm = dr.get_urm()
    pid = dr.get_test_pids()

    cbfi = Knn_content_item()
    cbfi.fit(urm, ICM_weighted, pid)

    cbfi.compute_model(top_k=knn,
                       sm_type=COSINE,
                       shrink=0,
                       binary=False,
                       verbose=True)
    cbfi.compute_rating(top_k=topk, verbose=True, small=True)

    sps.save_npz(complete_name + ".npz", cbfi.eurm)
    ev = Evaluator(dr)
    ev.evaluate(recommendation_list=eurm_to_recommendation_list(cbfi.eurm),
                name=complete_name)
def objective_function(x):
    global best_score, global_counter, best_params, start_time, x0,y0

    eurm = sum(x[i] * matrix for i, matrix in enumerate(matrices_array))

    # real objective function
    ris = -ev.evaluate_single_metric(eurm_to_recommendation_list(eurm, cat=cat, remove_seed=False, verbose=False),
                                     verbose=False,
                                     cat=cat,
                                     name="ens" + str(cat),
                                     metric=target_metric,
                                     level='track')

    if x0 is None:
        x0 = [x]
        y0 = [ris]
    else:
        x0.append(x)
        y0.append(ris)




    global_counter += 1
    if ris < best_score:
        best_score = ris
        best_params = x.copy()

        pretty_print(ris, x, start_time)

        best_params_dict = dict(zip(matrices_names+['norm'], x.copy()+[norm_name]))
        if not os.path.exists(ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/best_params/'):
            os.mkdir(ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/best_params/')
        dump_params_dict(your_dict=best_params_dict, name= "cat" + str(cat) + "_params_dict",
                 path=ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/best_params/')
    elif global_counter%25==0:
        pretty_print(ris, x, start_time)

    ## print and save memory x0,y0 every 25 calls
    if global_counter%25==0:
        if not os.path.exists(ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/memory/'):
            os.mkdir(ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/memory/')

        save_obj(x0, "cat" + str(cat) + "_x0_MEMORY",
                 path=ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/memory/')

        save_obj(y0, "cat" + str(cat) + "_y0_MEMORY",
                 path=ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/memory/')


    #### condition with no results
    if global_counter == 10 and best_score ==0:
        best_params_dict = dict(zip(matrices_names + ['norm'], [0.0 for a in range(len(x))] + [norm_name]))
        if not os.path.exists(ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/best_params/'):
            os.mkdir(ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/best_params/')
        dump_params_dict(your_dict=best_params_dict, name="cat" + str(cat) + "_params_dict",
                         path=ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/best_params/')
        pretty_print(ris, x, start_time)
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        print("CAT"+str(cat)+" HAS NO lines, EXIT ")
        exit()

    start_time = time.time()
    return ris
import utils.pre_processing as pre
from boosts.hole_boost import HoleBoost
from utils.datareader import Datareader
from utils.definitions import ROOT_DIR
from utils.evaluator import Evaluator
from utils.post_processing import eurm_to_recommendation_list

# Initialization
dr = Datareader(mode='offline', only_load=True)
ev = Evaluator(dr)

# Load matrices
eurm = sparse.load_npz(ROOT_DIR + '/data/eurm_rp3_offline.npz')
sim = sparse.load_npz(ROOT_DIR + '/data/sim_offline.npz')
print('Loaded')

# Normalization
eurm = pre.norm_l2_row(eurm)
sim = pre.norm_l2_row(sim)

# HoleBoost

h = HoleBoost(sim, eurm, dr)
eurm_b = h.boost_eurm(categories=[2, 3, 4, 5, 6, 7, 8, 9, 10], k=200, gamma=10)

#sparse.save_npz(ROOT_DIR + '/data/eurm_boosted_online.npz', eurm_b)
rec_list = eurm_to_recommendation_list(eurm_b)

# Evaluation
ev.evaluate(rec_list, name='rp3_l2_all_200_10', save=True, show_plot=False)
        best_params_dict = read_params_dict(name='cat' + str(cat) + '_params_dict',
                 path=ROOT_DIR + '/bayesian_scikit/' + configuration_name + '/best_params/')


        norm = best_params_dict['norm']
        del best_params_dict['norm']
        # cutting and  dot the value from ensemble
        eurms_full = [ value_from_bayesian * norms[norm](matrices_loaded[name][start_index:end_index])
                        for name, value_from_bayesian in best_params_dict.items()]
        # and summing up
        eurms_cutted[cat-1] = sum( [ matrix for matrix in eurms_full] )

        # adding to reclist
        rec_list[start_index:end_index] = eurm_to_recommendation_list(eurm=eurms_cutted[cat-1],
                                                                      cat=cat,
                                                                      verbose=False)[start_index:end_index]

    eurm = eurms_cutted[0]
    for i in range(1,10):
        eurm = sps.vstack([eurm, eurms_cutted[i]])


    sps.save_npz(file='../'+configuration_name+'/ensembled_'+configuration_name+'_'+mode, matrix=eurm)

    if mode=='offline':
        ev = Evaluator(dr)
        ev.evaluate(recommendation_list=rec_list, name=configuration_name)
    else:
        sb = Submitter(dr)
        sb.submit(recommendation_list=rec_list, name=configuration_name)
Exemple #27
0
    # TopPop Album
    album = artists_dic[track_ind]
    playlists = ucm_album.indices[ucm_album.indptr[album]:ucm_album.
                                  indptr[album + 1]]

    top = urm[playlists].sum(axis=0).A1.astype(np.int32)

    track_ind_rec = top.argsort()[-501:][::-1]

    eurm2[row, track_ind_rec] = top[track_ind_rec]

eurm1 = eurm1.tocsr()[pids_all]
eurm2 = eurm2.tocsr()[pids_all]

eurm1 = eurm_remove_seed(eurm1, dr)
eurm2 = eurm_remove_seed(eurm2, dr)

sps.save_npz("test1.npz", eurm1)

rec_list1 = eurm_to_recommendation_list(eurm1)
rec_list2 = eurm_to_recommendation_list(eurm2)
rec_list3 = append_rec_list(rec_list1 + rec_list2)

ev = Evaluator(dr)
ev.evaluate(rec_list1, name="enstest", level='track')
ev.evaluate(rec_list2, name="enstest", level='track')
ev.evaluate(rec_list3, name="enstest", level='track')

# rec.append(list(top_p))
Exemple #28
0
        self.eurm = self.eurm.T
        self.eurm.eliminate_zeros()

        if verbose:
            print("time: " + str(int(time.time() - start_time) / 60))

        return self.eurm.tocsr()


if __name__ == '__main__':
    dr = Datareader(only_load=True,
                    mode='offline',
                    test_num='1',
                    verbose=False)
    pid = dr.get_test_playlists().transpose()[0]
    urm = dr.get_urm()
    ev = Evaluator(dr)

    urm.data = np.ones(urm.data.shape[0])

    IR = ItemRank()
    IR.fit(urm, pid)

    IR.compute_model(verbose=True, top_k=850)
    IR.compute_rating(top_k=750, verbose=True, small=True, iter=2)

    ev.evaluate(recommendation_list=eurm_to_recommendation_list(
        IR.eurm, remove_seed=True, datareader=dr),
                name="ItemRank",
                old_mode=False)