def test_RANK_Spearman_different_list_sizes(self):
     TEST_DATA = ['classical', 'invented', 'baroque']
     GT_DATA = ['classical', 'instrumental', 'piano', 'baroque']
     spearman = SpearmanRho()
     spearman.load_ground_truth(GT_DATA)
     spearman.load_test(TEST_DATA)
     assert_raises(ValueError, spearman.compute) #Raise: GT & TEST list have different sizes
Exemplo n.º 2
0
    def __init__(self):
        super(TestRanking, self).__init__()
        # Rank-based metrics:  KendallTau, SpearmanRho, MeanReciprocalRank, ReciprocalRank
        self.kendall = KendallTau()
        self.kendall.load(self.GT_RANKING, self.TEST_RANKING)
        self.spearman = SpearmanRho()
        self.spearman.load(self.GT_RANKING, self.TEST_RANKING)
        self.mrr = MeanReciprocalRank()

        for elem in self.TEST_DECISION:
            self.mrr.load(self.GT_DECISION, elem)
Exemplo n.º 3
0
 def test_RANK_Spearman_load_ground_truth(self):
     spearman = SpearmanRho()
     spearman.load_ground_truth(self.GT_DATA)
     assert_equal(len(spearman.get_ground_truth()), len(self.TEST_DATA))
Exemplo n.º 4
0
 def test_RANK_Spearman_load_test(self):
     spearman = SpearmanRho()
     spearman.load_test(self.TEST_DATA)
     assert_equal(len(spearman.get_test()), len(self.TEST_DATA))
Exemplo n.º 5
0
 def test_RANK_Spearman_compute_floats(self):
     spearman = SpearmanRho(self.DATA_PRED)
     assert_equal(spearman.compute(), 0.947368)  #0.95 ?
Exemplo n.º 6
0
MIN_RATING = 0.0
MAX_RATING = 5.0
ITEMID = 1
USERID = 1
print svd.predict(ITEMID, USERID, MIN_RATING,
                  MAX_RATING)  # predicted rating value
print svd.get_matrix().value(ITEMID, USERID)  # real rating value

print ''
print 'GENERATING RECOMMENDATION'
print svd.recommend(USERID, n=5, only_unknowns=True, is_row=False)

#Evaluation using prediction-based metrics
rmse = RMSE()
mae = MAE()
spearman = SpearmanRho()
kendall = KendallTau()
#decision = PrecisionRecallF1()
for rating, item_id, user_id in test.get():
    try:
        pred_rating = svd.predict(item_id, user_id)
        rmse.add(rating, pred_rating)
        mae.add(rating, pred_rating)
        spearman.add(rating, pred_rating)
        kendall.add(rating, pred_rating)
    except KeyError:
        continue

print ''
print 'EVALUATION RESULT'
print 'RMSE=%s' % rmse.compute()