Exemplo n.º 1
0
 def test_auc_perf_metric(self):
     np.random.seed(1)
     groundtruths = np.random.normal(0, 1.0, [4, 10]) + np.tile(np.array([1, 2, 3, 4]), [10, 1]).T
     predictions = [1, 2, 3, 4]
     metric = AucPerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'], 0.9375, places=6)
     self.assertAlmostEqual(result['AUC_BW'], 0.9999999999999999, places=6)
     self.assertAlmostEqual(result['AUC_DS'], 0.9375, places=6)
     self.assertAlmostEqual(result['CC_0'], 1.0, places=6)
     self.assertAlmostEqual(result['THR'], 3.0, places=6)
Exemplo n.º 2
0
 def test_auc_perf_multiple_metrics(self):
     np.random.seed(1)
     groundtruths = np.random.normal(0, 1.0, [4, 10]) + np.tile(np.array([1, 2, 3, 4]), [10, 1]).T
     predictions = [[1, 2, 3, 4], [3, 1, 2, 4]]
     metric = AucPerfMetric(groundtruths, predictions)
     result = metric.evaluate()
     self.assertAlmostEqual(result['score'][0], 0.9999999999999999, places=6)
     self.assertAlmostEqual(result['AUC_BW'][0], 0.9999999999999999, places=6)
     self.assertAlmostEqual(result['AUC_DS'][0], 0.9375, places=6)
     self.assertAlmostEqual(result['CC_0'][0], 1.0, places=6)
     self.assertAlmostEqual(result['THR'][0], 1.0, places=6)
     self.assertAlmostEqual(result['score'][1], 0.8125, places=6)
     self.assertAlmostEqual(result['AUC_BW'][1], 0.8125, places=6)
     self.assertAlmostEqual(result['AUC_DS'][1], 0.6250, places=6)
     self.assertAlmostEqual(result['CC_0'][1], 0.75, places=6)
     self.assertAlmostEqual(result['THR'][1], 2, places=6)
     self.assertAlmostEqual(result['pDS_DL'][0, 1], 0.02746864, places=6)
     self.assertAlmostEqual(result['pBW_DL'][0, 1], 0.06136883, places=6)
     self.assertAlmostEqual(result['pCC0_b'][0, 1], 0.03250944, places=6)