def test_auc_numpy(self): # Data actual = np.array([1, 1, 0, 1, 0, 0]) likelihoods = np.array([1, 1, 1, 0.5, 0.5, 0.5]) # Metric metric = BinaryClassificationMetrics.AUC() # Score score = metric.get_score(actual, likelihoods) self.assertEqual(score, 0.6666666666666667)
def test_auc(self): # Data actual = [1, 1, 0, 1, 0, 0] likelihoods = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # Metric metric = BinaryClassificationMetrics.AUC() # Score score = metric.get_score(actual, likelihoods) self.assertEqual(score, 0.5)
def test_auc_non_zero_one_input(self): # Data actual = ['a', 'b', 'a', 'a'] likelihoods = [0, 0, 0, 0.5, 0.5, 0.5] # Metric metric = BinaryClassificationMetrics.AUC() # Score with self.assertRaises(ValueError): metric.get_score(actual, likelihoods)
def test_auc_likelihood_input(self): # Data actual = [1, 1, 0, 1, 0, 0] likelihoods = [100, 0, 0, 0.5, 0.5, 0.5] # Metric metric = BinaryClassificationMetrics.AUC() # Score with self.assertRaises(ValueError): metric.get_score(actual, likelihoods)
def test_auc_pandas(self): # Data actual = pd.Series([0, 0, 0, 0, 0, 1]) likelihoods = pd.Series([0, 0, 0, 0.5, 0.5, 0.5]) # Metric metric = BinaryClassificationMetrics.AUC() # Score score = metric.get_score(actual, likelihoods) self.assertEqual(score, 0.8)