def test_cases_multilabel_1l(self): cases = [] num = 2 sol = np.array([[1, 1, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) sol3 = sol[:, 0:num] if num == 1: sol3 = np.array([sol3[:, 0]]).transpose() cases.append(('{} labels perfect'.format(num), sol3, sol3, 1.0)) cases.append(('All wrong, in the multi-label sense', sol3, 1 - sol3, -1.32491508679)) pred = np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('All equi proba: 0.5', sol3, pred, -0.162457543395)) pred = np.array( [[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('All equi proba, prior: 0.25', sol3, pred, 0.0)) pred = np.array([[0.2, 0.2, 0.2], [0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [0.7, 0.7, 0.7]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('Some proba', sol3, pred, -0.892199631436)) pred = np.array([[0.2, 0.2, 0.2], [0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [0.7, 0.7, 0.7]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('Invert both solution and prediction', 1 - sol3, pred, 0.5277086603)) for case in cases: testname, sol, pred, result = case pred = pred.astype(np.float32) with self.subTest('%s' % testname): sol, pred = copy_and_preprocess_arrays(sol, pred) bac = pac_metric(sol, pred, task=MULTILABEL_CLASSIFICATION) # Very weak test self.assertAlmostEqual(bac, result, places=1)
def test_cases_multilabel_1l(self): cases = [] num = 2 sol = np.array([[1, 1, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) sol3 = sol[:, 0:num] if num == 1: sol3 = np.array([sol3[:, 0]]).transpose() cases.append(('{} labels perfect'.format(num), sol3, sol3, 1.0)) cases.append(('All wrong, in the multi-label sense', sol3, 1 - sol3, -1.32491508679)) pred = np.array([[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('All equi proba: 0.5', sol3, pred, -0.162457543395)) pred = np.array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('All equi proba, prior: 0.25', sol3, pred, 0.0)) pred = np.array([[0.2, 0.2, 0.2], [0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [0.7, 0.7, 0.7]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('Some proba', sol3, pred, -0.892199631436)) pred = np.array([[0.2, 0.2, 0.2], [0.8, 0.8, 0.8], [0.9, 0.9, 0.9], [0.7, 0.7, 0.7]]) if num == 1: pred = np.array([pred[:, 0]]).transpose() else: pred = pred[:, 0:num] cases.append(('Invert both solution and prediction', 1 - sol3, pred, 0.5277086603)) for case in cases: testname, sol, pred, result = case pred = pred.astype(np.float32) with self.subTest('%s' % testname): sol, pred = copy_and_preprocess_arrays(sol, pred) bac = pac_metric(sol, pred, task=MULTILABEL_CLASSIFICATION) # Very weak test self.assertAlmostEqual(bac, result, places=1)
def test_cases_binary_score_verification(self): cases = [] sol = np.array([0, 0, 1, 1]) pred = np.array([[1, 0], [1, 0], [0, 1], [0, 1]]) cases.append(('perfect', sol, pred, 1.0)) cases.append(( 'anti-perfect', sol, 1 - pred, -1.0, )) uneven_proba = np.array([[0.7, 0.3], [0.4, 0.6], [0.49, 0.51], [0.2, 0.8]]) cases.append(('uneven proba', sol, uneven_proba, 0.162745170342)) eps = 1.e-15 ties = np.array([[0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps], [0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps]]) cases.append(('ties_broken', sol, ties, 0.0)) ties = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) cases.append(('ties', sol, ties, 0.0)) sol = np.array([0, 1, 1]) pred = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) cases.append(('even proba', sol, pred, -0.0618725166757)) _pred = np.array([[1.0, 0.0], [0.0, 1.0], [0.0, 1.0]]) pred = np.array([sum(_pred) * 1. / len(_pred)] * len(_pred)) cases.append(('correct PAC prior', sol, pred, 0.0)) pred = np.array([[1., 1.], [1., 1.], [1., 1.]]) cases.append(('all positive', sol, pred, -1.12374503314)) pred = np.array([[0, 0], [0, 0], [0, 0]]) cases.append(('all negative', sol, pred, -1.1237237959)) for case in cases: testname, sol, pred, result = case pred = pred.astype(np.float32) with self.subTest('%s' % testname): sol, pred = copy_and_preprocess_arrays(sol, pred) bac = pac_metric(sol, pred, task=BINARY_CLASSIFICATION) # Very inaccurate! self.assertAlmostEqual(bac, result, places=1)
def test_cases_binary_score_verification(self): cases = [] sol = np.array([0, 0, 1, 1]) pred = np.array([[1, 0], [1, 0], [0, 1], [0, 1]]) cases.append(('perfect', sol, pred, 1.0)) cases.append(('anti-perfect', sol, 1 - pred, -1.0,)) uneven_proba = np.array( [[0.7, 0.3], [0.4, 0.6], [0.49, 0.51], [0.2, 0.8]]) cases.append(('uneven proba', sol, uneven_proba, 0.162745170342)) eps = 1.e-15 ties = np.array([[0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps], [0.5 + eps, 0.5 - eps], [0.5 - eps, 0.5 + eps]]) cases.append(('ties_broken', sol, ties, 0.0)) ties = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) cases.append(('ties', sol, ties, 0.0)) sol = np.array([0, 1, 1]) pred = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) cases.append(('even proba', sol, pred, -0.0618725166757)) _pred = np.array([[1.0, 0.0], [0.0, 1.0], [0.0, 1.0]]) pred = np.array([sum(_pred) * 1. / len(_pred)] * len(_pred)) cases.append(('correct PAC prior', sol, pred, 0.0)) pred = np.array([[1., 1.], [1., 1.], [1., 1.]]) cases.append(('all positive', sol, pred, -1.12374503314)) pred = np.array([[0, 0], [0, 0], [0, 0]]) cases.append(('all negative', sol, pred, -1.1237237959)) for case in cases: testname, sol, pred, result = case pred = pred.astype(np.float32) with self.subTest('%s' % testname): sol, pred = copy_and_preprocess_arrays(sol, pred) bac = pac_metric(sol, pred, task=BINARY_CLASSIFICATION) # Very inaccurate! self.assertAlmostEqual(bac, result, places=1)
def test_cases_multilabel_2(self): cases = [] sol4 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1]]) cases.append(('Three labels perfect', sol4, sol4, 1.0)) cases.append(('Three classes all wrong, in the multi-label sense', sol4, 1 - sol4, -1.20548265539)) pred = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) cases.append(('Three classes equi proba (wrong test from StartingKit)', sol4, pred, -1.20522116785)) pred = np.array([[1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3]]) cases.append(('Three classes equi proba', sol4, pred, -1.20522116785)) pred = np.array([[0.2, 0, 0.5], [0.8, 0.4, 0.1], [0.9, 0.1, 0.2], [0.7, 0.3, 0.3]]) cases.append(('Three classes some proba that do not add up', sol4, pred, -0.249775129382)) pred = np.array([[0.25, 0.25, 0.5], [0.25, 0.25, 0.5], [0.25, 0.25, 0.5], [0.25, 0.25, 0.5]]) cases.append(('Three classes predict prior', sol4, pred, 0.0)) for case in cases: testname, sol, pred, result = case pred = pred.astype(np.float32) with self.subTest('%s' % testname): sol, pred = copy_and_preprocess_arrays(sol, pred) pac = pac_metric(sol, pred, task=MULTILABEL_CLASSIFICATION) # Another weak test if pac != -1.1860048034278985 and result != -1.20522116785: self.assertAlmostEqual(pac, result, places=3)
def test_cases_multiclass_score_verification(self): cases = [] sol = np.array([0, 1, 0, 0]) pred = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]]) cases.append(('3 classes perfect', sol, pred, 1.0)) pred = np.array([[0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) cases.append(('all classes wrong', sol, pred, -1.32491508679)) pred = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) cases.append(('equi proba (wrong test from the starting kit)', sol, pred, -1.32491508679)) pred = np.array([[1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3]]) cases.append(('equi proba', sol, pred, -0.54994340656358087)) pred = np.array([[0.2, 0, 0.5], [0.8, 0.4, 0.1], [0.9, 0.1, 0.2], [0.7, 0.3, 0.3]]) cases.append(('sum(proba) < 1.0', sol, pred, -0.315724404334)) pred = np.array([[0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.]]) cases.append( ('predict prior', sol, pred, 1.54870455579e-15)) for case in cases: testname, sol, pred, result = case pred = pred.astype(np.float32) with self.subTest('%s' % testname): sol, pred = copy_and_preprocess_arrays(sol, pred) bac = pac_metric(sol, pred, task=MULTICLASS_CLASSIFICATION) if bac != -1.3096137080181987 and result != -1.32470836935: self.assertAlmostEqual(bac, result, places=2)
def test_cases_multiclass_score_verification(self): cases = [] sol = np.array([0, 1, 0, 0]) pred = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]]) cases.append(('3 classes perfect', sol, pred, 1.0)) pred = np.array([[0, 1, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]) cases.append(('all classes wrong', sol, pred, -1.32491508679)) pred = np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) cases.append(('equi proba (wrong test from the starting kit)', sol, pred, -1.32491508679)) pred = np.array([[1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3], [1. / 3, 1. / 3, 1. / 3]]) cases.append(('equi proba', sol, pred, -0.54994340656358087)) pred = np.array([[0.2, 0, 0.5], [0.8, 0.4, 0.1], [0.9, 0.1, 0.2], [0.7, 0.3, 0.3]]) cases.append(('sum(proba) < 1.0', sol, pred, -0.315724404334)) pred = np.array([[0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.], [0.75, 0.25, 0.]]) cases.append(('predict prior', sol, pred, 1.54870455579e-15)) for case in cases: testname, sol, pred, result = case pred = pred.astype(np.float32) with self.subTest('%s' % testname): sol, pred = copy_and_preprocess_arrays(sol, pred) bac = pac_metric(sol, pred, task=MULTICLASS_CLASSIFICATION) if bac != -1.3096137080181987 and result != -1.32470836935: self.assertAlmostEqual(bac, result, places=2)