Esempio n. 1
0
    def get_stats(cls, ys_label, ys_label_pred, **kwargs):

        # cannot have None
        assert all(x is not None for x in ys_label)
        assert all(x is not None for x in ys_label_pred)

        # RMSE
        rmse = RmsePerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # spearman
        srcc = SrccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # pearson
        pcc = PccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # kendall
        kendall = KendallPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        stats = {'RMSE': rmse,
                 'SRCC': srcc,
                 'PCC': pcc,
                 'KENDALL': kendall,
                 'ys_label': list(ys_label),
                 'ys_label_pred': list(ys_label_pred)}

        ys_label_raw = kwargs['ys_label_raw'] if 'ys_label_raw' in kwargs else None

        if ys_label_raw is not None:
            try:
                # AUC
                auc = AucPerfMetric(ys_label_raw, ys_label_pred) \
                    .evaluate()['score']
                stats['AUC'] = auc
            except TypeError: # AUC would not work with dictionary-style dataset
                stats['AUC'] = float('nan')

            try:
                # ResPow
                respow = ResolvingPowerPerfMetric(ys_label_raw, ys_label_pred) \
                    .evaluate()['score']
                stats['ResPow'] = respow
            except TypeError: # ResPow would not work with dictionary-style dataset
                stats['ResPow'] = float('nan')

        if 'ys_label_stddev' in kwargs and 'ys_label_stddev' and kwargs['ys_label_stddev'] is not None:
            stats['ys_label_stddev'] = kwargs['ys_label_stddev']

        return stats
Esempio n. 2
0
    def get_stats(cls, ys_label, ys_label_pred, **kwargs):

        # cannot have None
        assert all(x is not None for x in ys_label)
        assert all(x is not None for x in ys_label_pred)

        # RMSE
        rmse = RmsePerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # spearman
        srcc = SrccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # pearson
        pcc = PccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # kendall
        kendall = KendallPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        stats = {
            'RMSE': rmse,
            'SRCC': srcc,
            'PCC': pcc,
            'KENDALL': kendall,
            'ys_label': list(ys_label),
            'ys_label_pred': list(ys_label_pred)
        }

        ys_label_raw = kwargs[
            'ys_label_raw'] if 'ys_label_raw' in kwargs else None

        if ys_label_raw is not None:
            try:
                # KFLK
                kflk = KflkPerfMetric(ys_label_raw, ys_label_pred) \
                    .evaluate()['score']
                stats['KFLK'] = kflk
            except TypeError:  # KFLK would not work with dictionary-style dataset
                stats['KFLK'] = float('nan')

        return stats
Esempio n. 3
0
    def get_stats(cls, ys_label, ys_label_pred, ys_label_raw=None):

        # cannot have None
        assert all(x is not None for x in ys_label)
        assert all(x is not None for x in ys_label_pred)

        # RMSE
        rmse = RmsePerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # spearman
        srcc = SrccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # pearson
        pcc = PccPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        # kendall
        kendall = KendallPerfMetric(ys_label, ys_label_pred) \
            .evaluate(enable_mapping=True)['score']

        stats = {
            'RMSE': rmse,
            'SRCC': srcc,
            'PCC': pcc,
            'KENDALL': kendall,
            'ys_label': list(ys_label),
            'ys_label_pred': list(ys_label_pred)
        }

        if ys_label_raw is not None:
            # KFLK
            kflk = KflkPerfMetric(ys_label_raw, ys_label_pred) \
                .evaluate()['score']
            stats['KFLK'] = kflk

        return stats
Esempio n. 4
0
 def test_rmse_perf_metric2(self):
     groundtruths = [1, 2, 3, 4]
     predictions = [1, 2, 3, 5]
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 0.5, places=6)
Esempio n. 5
0
 def test_rmse_perf_metric_enable_mapping(self):
     groundtruths = np.arange(0, 1, 0.0001)
     predictions = np.arange(0, 1, 0.0001)
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate(enable_mapping=True)
     self.assertAlmostEqual(result['score'], 0.022753642178052261, places=6)
Esempio n. 6
0
 def test_rmse_perf_metric2(self):
     groundtruths = [1, 2, 3, 4]
     predictions = [1, 2, 3, 5]
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 0.5, places=6)
Esempio n. 7
0
 def test_rmse_perf_metric_enable_mapping(self):
     groundtruths = np.arange(0, 1, 0.0001)
     predictions = np.arange(0, 1, 0.0001)
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate(enable_mapping=True)
     self.assertAlmostEqual(result['score'], 0.022753642178052261, places=6)