def test_eval_score_needed_metrics_explicit_split(self):
        c = MetricCalculator(self.score_split_list)

        system_res, each_user_res = c.eval_metrics([
            MAE(),
            MSE(),
            RMSE(),
        ])

        self.assertIsInstance(system_res, pd.DataFrame)
        self.assertIsInstance(each_user_res, pd.DataFrame)
    def test_pop_invalid_metric(self):
        recsys = ContentBasedRS(
            ClassifierRecommender({'Plot': 'tfidf'}, SkKNN(), threshold=3),
            self.ratings_original, movies_dir)

        # Tries to calc score predictions with a pure ranking algorithm
        metric_list = [MAE()]

        valid_metric = PredictionCalculator(self.split_list,
                                            recsys).calc_predictions(
                                                self.test_items_list,
                                                metric_list)
        score_truth = ScoresNeededMetric.score_truth_list
        rank_truth = RankingNeededMetric.rank_truth_list

        # The metric is excluded from the valid ones and nothing is calculated since
        # there aren't any others
        self.assertEqual(len(valid_metric), 0)
        self.assertEqual(len(score_truth), 0)
        self.assertEqual(len(rank_truth), 0)

        # Tries to calc score predictions with a pure ranking algorithm but there are also
        # other type of metrics
        metric_ranking = NDCG()
        metric_score = MAE()
        metric_list = [metric_score, metric_ranking]

        valid_metric = PredictionCalculator(self.split_list,
                                            recsys).calc_predictions(
                                                self.test_items_list,
                                                metric_list)
        score_truth = ScoresNeededMetric.score_truth_list
        rank_truth = RankingNeededMetric.rank_truth_list

        # The metric MAE is excluded from the valid ones but NDCG is valid so predictions
        # for that metric (RankingNeededMetric) are calculated
        self.assertIn(metric_ranking, valid_metric)
        self.assertNotIn(metric_score, valid_metric)

        self.assertEqual(len(score_truth), 0)
        self.assertGreater(len(rank_truth), 0)
Пример #3
0
    def test_all(self):
        ratings_filename = os.path.join(contents_path, '..', 'datasets',
                                        'examples', 'new_ratings.csv')

        ratings_frame = RatingsImporter(
            CSVFile(ratings_filename)).import_ratings()

        rs = ContentBasedRS(
            LinearPredictor(
                {"Plot": ['tfidf', 'embedding']},
                SkLinearRegression(),
            ), ratings_frame, items_dir)

        catalog = set([
            os.path.splitext(f)[0] for f in os.listdir(items_dir)
            if os.path.isfile(os.path.join(items_dir, f)) and f.endswith('xz')
        ])

        em = EvalModel(rs,
                       KFoldPartitioning(),
                       metric_list=[
                           Precision(sys_average='micro'),
                           PrecisionAtK(1, sys_average='micro'),
                           RPrecision(),
                           Recall(),
                           RecallAtK(3, ),
                           FMeasure(1, sys_average='macro'),
                           FMeasureAtK(2, beta=1, sys_average='micro'),
                           NDCG(),
                           NDCGAtK(3),
                           MRR(),
                           MRRAtK(5, ),
                           Correlation('pearson', top_n=5),
                           Correlation('kendall', top_n=3),
                           Correlation('spearman', top_n=4),
                           MAE(),
                           MSE(),
                           RMSE(),
                           CatalogCoverage(catalog),
                           CatalogCoverage(catalog, k=2),
                           CatalogCoverage(catalog, top_n=3),
                           GiniIndex(),
                           GiniIndex(top_n=3),
                           DeltaGap({
                               'primo': 0.5,
                               'secondo': 0.5
                           })
                       ],
                       methodology=TestItemsMethodology())

        result = em.fit()
    def test_calc_scores_content_based(self):
        recsys = ContentBasedRS(
            LinearPredictor({'Plot': 'tfidf'}, SkLinearRegression()),
            self.ratings_original, movies_dir)

        # We just need a Metric of the ScoresNeededMetric class to test
        metric_list = [MAE()]

        valid_metric = PredictionCalculator(self.split_list,
                                            recsys).calc_predictions(
                                                self.test_items_list,
                                                metric_list)
        score_truth = ScoresNeededMetric.score_truth_list

        # We expect this to be empty, since there are no RankingNeededMetric in the metric list
        rank_truth = RankingNeededMetric.rank_truth_list

        self.assertEqual(valid_metric, metric_list)
        self.assertGreater(len(score_truth), 0)
        self.assertEqual(len(rank_truth), 0)
Пример #5
0
 def setUpClass(cls) -> None:
     cls.metric = MAE()