def test_kendall_perf_metric_enable_mapping(self): groundtruths = [1, 2, 3, 4] predictions = [1, 2, 3, 5] metric = KendallPerfMetric(groundtruths, predictions) result = metric.evaluate(enable_mapping=True) self.assertAlmostEqual(result['score'], 1.0, places=6)
def test_kendall_perf_metric2(self): groundtruths = [1, 2, 3, 4] predictions = [1, 2, 5, 3] metric = KendallPerfMetric(groundtruths, predictions) result = metric.evaluate() self.assertAlmostEqual(result['score'], 0.66666666666666663, places=6)