Esempio n. 1
0
 def test_respow_perf_metric2(self):
     np.random.seed(0)
     groundtruths = np.random.normal(0, 10.0, [100, 30]) + np.tile(np.array(np.arange(100)), [30, 1]).T
     predictions = np.arange(100)
     metric = ResolvingPowerPerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 9.0014569671225111, places=6)
Esempio n. 2
0
 def test_respow_perf_metric(self):
     np.random.seed(0)
     groundtruths = np.random.normal(0, 1.0, [4, 10]) + np.tile(np.array([1, 2, 3, 4]), [10, 1]).T
     predictions = [1, 2, 3, 4]
     metric = ResolvingPowerPerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['resolving_power_95perc'], 1.2176359647113211, places=6)
     self.assertAlmostEqual(result['score'], 1.2176359647113211, places=6)
Esempio n. 3
0
    def get_stats(cls, ys_label, ys_label_pred, **kwargs):

        # cannot have None
        assert all(x is not None for x in ys_label)
        assert all(x is not None for x in ys_label_pred)

        # RMSE
        rmse = RmsePerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # spearman
        srcc = SrccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # pearson
        pcc = PccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # kendall
        kendall = KendallPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        stats = {'RMSE': rmse,
                 'SRCC': srcc,
                 'PCC': pcc,
                 'KENDALL': kendall,
                 'ys_label': list(ys_label),
                 'ys_label_pred': list(ys_label_pred)}

        ys_label_raw = kwargs['ys_label_raw'] if 'ys_label_raw' in kwargs else None

        if ys_label_raw is not None:
            try:
                # AUC
                auc = AucPerfMetric(ys_label_raw, ys_label_pred) \
                    .evaluate()['score']
                stats['AUC'] = auc
            except TypeError: # AUC would not work with dictionary-style dataset
                stats['AUC'] = float('nan')

            try:
                # ResPow
                respow = ResolvingPowerPerfMetric(ys_label_raw, ys_label_pred) \
                    .evaluate()['score']
                stats['ResPow'] = respow
            except TypeError: # ResPow would not work with dictionary-style dataset
                stats['ResPow'] = float('nan')

        if 'ys_label_stddev' in kwargs and 'ys_label_stddev' and kwargs['ys_label_stddev'] is not None:
            stats['ys_label_stddev'] = kwargs['ys_label_stddev']

        return stats