Esempio n. 1
0
 def test_rmse_perf_metric2(self):
     groundtruths = [1, 2, 3, 4]
     predictions = [1, 2, 3, 5]
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 0.5, places=6)
Esempio n. 2
0
 def test_rmse_perf_metric2(self):
     groundtruths = [1, 2, 3, 4]
     predictions = [1, 2, 3, 5]
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 0.5, places=6)
Esempio n. 3
0
 def test_rmse_perf_metric_enable_mapping(self):
     groundtruths = np.arange(0, 1, 0.0001)
     predictions = np.arange(0, 1, 0.0001)
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate(enable_mapping=True)
     self.assertAlmostEqual(result['score'], 0.022753642178052261, places=6)
Esempio n. 4
0
 def test_rmse_perf_metric_enable_mapping(self):
     groundtruths = np.arange(0, 1, 0.0001)
     predictions = np.arange(0, 1, 0.0001)
     metric = RmsePerfMetric(groundtruths, predictions)
     result = metric.evaluate(enable_mapping=True)
     self.assertAlmostEqual(result['score'], 0.022753642178052261, places=6)