Пример #1
0
 def test_kflk_perf_metric(self):
     np.random.seed(0)
     groundtruths = np.random.normal(0, 1.0, [4, 10]) + \
                   np.tile(np.array([1, 2, 3, 4]), [10, 1]).T
     predictions = [1, 2, 3, 4]
     metric = KflkPerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 0.95, places=6)
     self.assertAlmostEqual(result['AUC_BW'], 0.9166666666666666, places=6)
     self.assertAlmostEqual(result['AUC_DS'], 0.95, places=6)
     self.assertAlmostEqual(result['CC_0'], 1.0, places=6)
     self.assertAlmostEqual(result['THR'], 3.0, places=6)
Пример #2
0
 def test_kflk_perf_metric(self):
     np.random.seed(0)
     groundtruths = np.random.normal(0, 1.0, [4, 10]) + \
                   np.tile(np.array([1, 2, 3, 4]), [10, 1]).T
     predictions = [1, 2, 3, 4]
     metric = KflkPerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 0.95, places=6)
     self.assertAlmostEqual(result['AUC_BW'], 0.9166666666666666, places=6)
     self.assertAlmostEqual(result['AUC_DS'], 0.95, places=6)
     self.assertAlmostEqual(result['CC_0'], 1.0, places=6)
     self.assertAlmostEqual(result['THR'], 3.0, places=6)
Пример #3
0
 def test_kflk_metrics_performance(self):
     mat_filepath = config.ROOT + '/python/test/resource/data_Toyama.mat'
     mat_dict = scipy.io.loadmat(mat_filepath)
     results = KflkPerfMetric._metrics_performance(mat_dict['objScoDif'], mat_dict['signif'])
     self.assertAlmostEqual(np.mean(results['AUC_DS']), 0.69767003960902052, places=6)
     self.assertAlmostEqual(np.mean(results['AUC_BW']), 0.94454700301894534, places=6)
     self.assertAlmostEqual(np.mean(results['CC_0']), 0.88105386206276415, places=6)
     self.assertAlmostEqual(np.mean(results['THR']), 6.2392849606450556, places=6)
Пример #4
0
 def test_kflk_metrics_performance(self):
     mat_filepath = config.ROOT + '/python/test/resource/data_Toyama.mat'
     mat_dict = scipy.io.loadmat(mat_filepath)
     results = KflkPerfMetric._metrics_performance(mat_dict['objScoDif'],
                                                   mat_dict['signif'])
     self.assertAlmostEqual(np.mean(results['AUC_DS']),
                            0.69767003960902052,
                            places=6)
     self.assertAlmostEqual(np.mean(results['AUC_BW']),
                            0.94454700301894534,
                            places=6)
     self.assertAlmostEqual(np.mean(results['CC_0']),
                            0.88105386206276415,
                            places=6)
     self.assertAlmostEqual(np.mean(results['THR']),
                            6.2392849606450556,
                            places=6)
Пример #5
0
    def get_stats(cls, ys_label, ys_label_pred, ys_label_raw=None):

        # cannot have None
        assert all(x is not None for x in ys_label)
        assert all(x is not None for x in ys_label_pred)

        # RMSE
        rmse = RmsePerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # spearman
        srcc = SrccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # pearson
        pcc = PccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # kendall
        kendall = KendallPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        stats = {
            'RMSE': rmse,
            'SRCC': srcc,
            'PCC': pcc,
            'KENDALL': kendall,
            'ys_label': list(ys_label),
            'ys_label_pred': list(ys_label_pred)
        }

        if ys_label_raw is not None:
            # KFLK
            kflk = KflkPerfMetric(ys_label_raw, ys_label_pred) \
                .evaluate()['score']
            stats['KFLK'] = kflk

        return stats