def get_stats(cls, ys_label, ys_label_pred, ys_label_raw=None): # cannot have None assert all(x is not None for x in ys_label) assert all(x is not None for x in ys_label_pred) # RMSE rmse = RmsePerfMetric(ys_label, ys_label_pred) \ .evaluate(enable_mapping=True)['score'] # spearman srcc = SrccPerfMetric(ys_label, ys_label_pred) \ .evaluate(enable_mapping=True)['score'] # pearson pcc = PccPerfMetric(ys_label, ys_label_pred) \ .evaluate(enable_mapping=True)['score'] # kendall kendall = KendallPerfMetric(ys_label, ys_label_pred) \ .evaluate(enable_mapping=True)['score'] stats = { 'RMSE': rmse, 'SRCC': srcc, 'PCC': pcc, 'KENDALL': kendall, 'ys_label': list(ys_label), 'ys_label_pred': list(ys_label_pred) } if ys_label_raw is not None: # KFLK kflk = KflkPerfMetric(ys_label_raw, ys_label_pred) \ .evaluate()['score'] stats['KFLK'] = kflk return stats
def test_rmse_perf_metric2(self): groundtruths = [1, 2, 3, 4] predictions = [1, 2, 3, 5] metric = RmsePerfMetric(groundtruths, predictions) result = metric.evaluate() self.assertAlmostEqual(result['score'], 0.5, places=6)
def test_rmse_perf_metric_enable_mapping(self): groundtruths = np.arange(0, 1, 0.0001) predictions = np.arange(0, 1, 0.0001) metric = RmsePerfMetric(groundtruths, predictions) result = metric.evaluate(enable_mapping=True) self.assertAlmostEqual(result['score'], 0.022753642178052261, places=6)