コード例 #1
0
ファイル: test_reco_binary.py プロジェクト: fidelity/jurity
    def test_auc_extended(self):
        # Test immediate calculation of AUC
        metric = BinaryRecoMetrics.AUC(click_column='click')
        actual = pd.DataFrame({
            Constants.user_id: [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
            Constants.item_id: [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3],
            'click': [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1]
        })

        predicted = pd.DataFrame({
            Constants.user_id: [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
            Constants.item_id: [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3],
            'click':
            [0.3, 0.9, 0.4, 0.3, 0.2, 0.9, 0.2, 0.6, 0.9, 0.2, 0.7, 0.8]
        })

        auc = metric.get_score(actual, predicted)
        self.assertEqual(0.78125, auc)

        # Test accumulated calculation
        metric = BinaryRecoMetrics.AUC(click_column='click')
        _, results = metric.get_score(actual,
                                      predicted,
                                      batch_accumulate=True,
                                      return_extended_results=True)

        self.assertEqual(0.78125, results['auc'])
        self.assertEqual(12, results['support'])
コード例 #2
0
    def test_reco_quick_start_example(self):
        # Data
        actual = pd.DataFrame({
            "user_id": [1, 2, 3, 4],
            "item_id": [1, 2, 0, 3],
            "clicks": [0, 1, 0, 0]
        })
        predicted = pd.DataFrame({
            "user_id": [1, 2, 3, 4],
            "item_id": [1, 2, 2, 3],
            "clicks": [0.8, 0.7, 0.8, 0.7]
        })

        # Metrics
        auc = BinaryRecoMetrics.AUC(click_column="clicks")
        ctr = BinaryRecoMetrics.CTR(click_column="clicks")
        ncdg_k = RankingRecoMetrics.NDCG(click_column="clicks", k=3)
        precision_k = RankingRecoMetrics.Precision(click_column="clicks", k=2)
        recall_k = RankingRecoMetrics.Recall(click_column="clicks", k=2)
        map_k = RankingRecoMetrics.MAP(click_column="clicks", k=2)

        # Scores
        self.assertEqual(auc.get_score(actual, predicted), 0.25)
        self.assertEqual(ctr.get_score(actual, predicted), 0.3333333333333333)
        self.assertEqual(ncdg_k.get_score(actual, predicted), 1)
        self.assertEqual(precision_k.get_score(actual, predicted), 1)
        self.assertEqual(recall_k.get_score(actual, predicted), 1)
        self.assertEqual(map_k.get_score(actual, predicted), 1)
コード例 #3
0
ファイル: test_reco_binary.py プロジェクト: fidelity/jurity
    def test_auc_one_class(self):
        # Test immediate calculation of AUC
        metric = BinaryRecoMetrics.AUC(click_column='click')
        actual = pd.DataFrame({
            Constants.user_id: [1, 2, 3, 4],
            Constants.item_id: [1, 2, 0, 3],
            'click': [0, 0, 0, 0]
        })

        predicted = pd.DataFrame({
            Constants.user_id: [1, 2, 3, 4],
            Constants.item_id: [1, 2, 2, 3],
            'click': [0.1, 0.9, 0.1, 0.1]
        })

        auc = metric.get_score(actual, predicted)
        self.assertTrue(np.isnan(auc))