Ejemplo n.º 1
0
 def test_LCS_Algorithm(self):
     m, nb_cls = 30, 7
     y, yt = generate_simulated_data(m, 2, nb_cls)
     tem = LCS.LocalSearch_kappa_sum(yt, y)
     ans = 0.0
     for i in range(nb_cls - 1):
         for j in range(i + 1, nb_cls):
             ans += Kappa_Statistic_multiclass(yt[i], yt[j], y, m)[0]
     ans /= (nb_cls * (nb_cls - 1.) / 2.)
     # self.assertEqual(tem == ans, True)
     self.assertEqual(check_equal(tem, ans), True)
     nb_pru = 7
     yo, P = LCS.Local_Search(yt, y, nb_cls, nb_pru, 1e-6)
     self.assertEqual(sum(P) == nb_pru, True)
     self.assertEqual(len(yo) == nb_pru, True)
     #
     y, yt = generate_simulated_data(m, 4, nb_cls)
     tem = LCS.LocalSearch_kappa_sum(yt, y)
     ans = 0.0
     for i in range(nb_cls - 1):
         for j in range(i + 1, nb_cls):
             ans += Kappa_Statistic_multiclass(yt[i], yt[j], y, m)[0]
     ans /= (nb_cls * (nb_cls - 1.) / 2.)
     # self.assertEqual(tem == ans, True)
     self.assertEqual(abs(tem - ans) <= 1e-6, True)
     yo, P = LCS.Local_Search(yt, y, nb_cls, nb_pru, 1e-6)
     self.assertEqual(sum(P) == nb_pru, True)
Ejemplo n.º 2
0
 def test_generate_simulated_data_negative(self):
     y, yt = generate_simulated_data(200, 1, 71)
     vY = np.unique(np.concatenate([[y], yt])).tolist()
     self.assertEqual(len(vY), 2)
     self.assertEqual(((-1 in vY) and (1 in vY)), True)
     y, yt = generate_simulated_data(200, -1, 71)
     vY = np.unique(np.concatenate([[y], yt])).tolist()
     self.assertEqual(len(vY), 2)
     self.assertEqual(((-1 in vY) and (1 in vY)), True)
Ejemplo n.º 3
0
 def test_OO_pruning(self):
     m, L, nb_cls, nb_pru = 30, 4, 7, 3
     y, yt = generate_simulated_data(m, L, nb_cls)
     yo, P, flag = OO.Orientation_Ordering_Pruning(yt, y)
     self.assertEqual(sum(P) <= nb_cls, True)
     self.assertEqual(0. <= flag <= 180., True)
     del nb_pru, yo
Ejemplo n.º 4
0
 def test_SEP(self):
     m, nb_label, nb_cls = 30, 4, 7
     y, yt = generate_simulated_data(m, nb_label, nb_cls)
     nb_pru = 7
     yo, P = SEP.PEP_SEP(yt, y, nb_cls, nb_pru)
     self.assertEqual(sum(P) <= nb_cls, True)
     self.assertEqual(all(np.unique(yo) == np.unique(yt)), True)
Ejemplo n.º 5
0
 def test_ES_pruning(self):
     m, L, T, H = 30, 4, 7, 3
     _, yt = generate_simulated_data(m, L, T)
     # yo, P = ES.Early_Stopping(yt, T, H)
     _, P = ES.Early_Stopping(yt, T, H)
     self.assertEqual(sum(P), H)
     del L
Ejemplo n.º 6
0
 def test_Entropy_SK(self):
     m, T = 100, 21
     y1, yt1, y2, yt2 = negative_generate_simulate(m, T)
     d1 = Entropy_sk_multiclass(yt1, y1, m, T)
     d2 = Entropy_sk_multiclass(yt2, y2, m, T)
     self.assertEqual(d1, d2)
     y3, yt3 = generate_simulated_data(m, 7, T)
     d3 = Entropy_sk_multiclass(yt3, y3, m, T)
     self.assertEqual(d3 >= 0, True)
Ejemplo n.º 7
0
 def test_Difficulty(self):
     m, T = 100, 21
     y1, yt1, y2, yt2 = negative_generate_simulate(m, T)
     d1 = Difficulty_multiclass(yt1, y1, T)
     d2 = Difficulty_multiclass(yt2, y2, T)
     self.assertEqual(d1, d2)
     y3, yt3 = generate_simulated_data(m, 7, T)
     d3 = Difficulty_multiclass(yt3, y3, T)
     self.assertEqual(d3 >= 0, True)
Ejemplo n.º 8
0
 def test_Interrater_agreement(self):
     m, T = 100, 21
     y1, yt1, y2, yt2 = negative_generate_simulate(m, T)
     d1 = Interrater_agreement_multiclass(yt1, y1, m, T)
     d2 = Interrater_agreement_multiclass(yt2, y2, m, T)
     self.assertEqual(d1, d2)
     y3, yt3 = generate_simulated_data(m, 7, T)
     d3 = Interrater_agreement_multiclass(yt3, y3, m, T)
     self.assertEqual(isinstance(d3, float), True)
Ejemplo n.º 9
0
 def test_Coincident_Failure(self):
     m, T = 100, 21
     y1, yt1, y2, yt2 = negative_generate_simulate(m, T)
     d1 = Coincident_Failure_multiclass(yt1, y1, m, T)
     d2 = Coincident_Failure_multiclass(yt2, y2, m, T)
     self.assertEqual(d1, d2)
     y3, yt3 = generate_simulated_data(m, 7, T)
     d3 = Coincident_Failure_multiclass(yt3, y3, m, T)
     self.assertEqual(d3 >= 0, True)
Ejemplo n.º 10
0
 def test_Generalized_Diversity(self):
     m, T = 100, 21
     y1, yt1, y2, yt2 = negative_generate_simulate(m, T)
     d1 = Generalized_Diversity_multiclass(yt1, y1, m, T)
     d2 = Generalized_Diversity_multiclass(yt2, y2, m, T)
     self.assertEqual(d1, d2)
     y3, yt3 = generate_simulated_data(m, 7, T)
     d3 = Generalized_Diversity_multiclass(yt3, y3, m, T)
     self.assertEqual(d3 >= 0, True)
     self.assertEqual(d3 <= 1, True)
Ejemplo n.º 11
0
 def test_PEP(self):
     m, nb_label, nb_cls = 30, 4, 7
     y, yt = generate_simulated_data(m, nb_label, nb_cls)
     s = np.random.randint(2, size=nb_cls).tolist()
     Q, L = PEP.PEP_VDS(y, yt, nb_cls, s)
     yo, P = PEP.PEP_PEP(yt, y, nb_cls, 0.4)
     self.assertEqual(sum(P) <= nb_cls, True)
     self.assertEqual(all(np.unique(yo) == np.unique(yt)), True)
     self.assertEqual(len(Q) == nb_cls, True)
     self.assertEqual(len(L) == nb_cls, True)
Ejemplo n.º 12
0
 def test_GMM_Algorithm(self):
     m, nb_cls = 30, 7
     y, yt = generate_simulated_data(m, 2, nb_cls)
     tem = GMM.GMM_Kappa_sum(yt[0], yt[1:], y)
     ans = [Kappa_Statistic_multiclass(yt[0], j, y, m)[0] for j in yt[1:]]
     ans = sum(ans)
     self.assertEqual(tem == ans, True)
     #
     nb_pru = 7
     yo, P = GMM.GMM_Algorithm(yt, y, nb_cls, nb_pru)
     self.assertEqual(sum(P) == nb_pru, True)
     self.assertEqual(len(yo) == nb_pru, True)
     #
     y, yt = generate_simulated_data(m, 4, nb_cls)
     tem = GMM.GMM_Kappa_sum(yt[0], yt[1:], y)
     ans = [Kappa_Statistic_multiclass(yt[0], j, y, m)[0] for j in yt[1:]]
     ans = sum(ans)
     # self.assertEqual(tem == ans, True)
     self.assertEqual(abs(tem - ans) <= 1e-6, True)
     yo, P = GMM.GMM_Algorithm(yt, y, nb_cls, nb_pru)
     self.assertEqual(sum(P) == nb_pru, True)
Ejemplo n.º 13
0
 def test_KW_Variance(self):
     m, T = 100, 21
     y1, yt1, y2, yt2 = negative_generate_simulate(m, T)
     d1 = Kohavi_Wolpert_Variance_multiclass(yt1, y1, m, T)
     d2 = Kohavi_Wolpert_Variance_multiclass(yt2, y2, m, T)
     self.assertEqual(d1, d2)
     y3, yt3 = generate_simulated_data(m, 7, T)
     d3 = Kohavi_Wolpert_Variance_multiclass(yt3, y3, m, T)
     # self.assertEqual(d3 >= 3./8, True)  # only for binary
     # |-> Nope, all of them belong to [0, 1/2]
     self.assertEqual(d3 <= 1./2, True)
     self.assertEqual(d3 >= 0., True)
     self.assertEqual(0. < d1 <= 1./2, True)
     self.assertEqual(0. < d2 <= 1./2, True)
Ejemplo n.º 14
0
 def test_Kappa_Statistic(self):
     m = 100
     y1, yt1, y2, yt2 = negative_generate_simulate(m, 2)
     ha1, hb1 = yt1
     ha2, hb2 = yt2
     d1 = Kappa_Statistic_binary(ha1, hb1, m)
     d2 = Kappa_Statistic_binary(ha2, hb2, m)
     self.assertEqual(d1, d2)
     d3 = Kappa_Statistic_multiclass(ha1, hb1, y1, m)
     d4 = Kappa_Statistic_multiclass(ha2, hb2, y2, m)
     self.assertEqual(all(np.array(d3) == np.array(d4)), True)
     self.assertEqual(d1, d3[0])
     self.assertEqual(d2, d4[0])
     y3, yt3 = generate_simulated_data(m, 7, 2)
     d3, t1, t2 = Kappa_Statistic_multiclass(yt3[0], yt3[1], y3, m)
     self.assertEqual((t1 - t2) / check_zero(1. - t2), d3)
Ejemplo n.º 15
0
 def test_Double_Fault_Measure(self):
     m = 100
     y1, yt1, y2, yt2 = negative_generate_simulate(m, 2)
     ha1, hb1 = yt1
     ha2, hb2 = yt2
     d1 = Double_Fault_Measure_binary(ha1, hb1, y1, m)
     d2 = Double_Fault_Measure_binary(ha2, hb2, y2, m)
     self.assertEqual(d1, d2)
     d3 = Double_Fault_Measure_multiclass(ha1, hb1, y1, m)
     d4 = Double_Fault_Measure_multiclass(ha2, hb2, y2, m)
     self.assertEqual(d3, d4)
     self.assertEqual(d1, d3)
     self.assertEqual(d2, d4)
     y3, yt3 = generate_simulated_data(m, 7, 2)
     d3 = Double_Fault_Measure_multiclass(yt3[0], yt3[1], y3, m)
     self.assertEqual(0 <= d3 <= 1.0, True)
Ejemplo n.º 16
0
 def test_Disagreement_Measure(self):
     m = 100
     _, yt1, _, yt2 = negative_generate_simulate(m, 2)
     ha1, hb1 = yt1
     ha2, hb2 = yt2
     d1 = Disagreement_Measure_binary(ha1, hb1, m)
     d2 = Disagreement_Measure_binary(ha2, hb2, m)
     self.assertEqual(d1, d2)
     d3 = Disagreement_Measure_multiclass(ha1, hb1, m)
     d4 = Disagreement_Measure_multiclass(ha2, hb2, m)
     self.assertEqual(d3, d4)
     self.assertEqual(d1, d3)
     self.assertEqual(d2, d4)
     m = 100
     _, yt3 = generate_simulated_data(m, 7, 2)
     d3 = Disagreement_Measure_multiclass(yt3[0], yt3[1], m)
     self.assertEqual(0 <= d3 <= 1, True)
Ejemplo n.º 17
0
 def test_DREP(self):
     m, nb_cls = 30, 7
     h, ht, y, yt = negative_generate_simulate(m, nb_cls)
     fens = DREP.DREP_fxH(yt)
     self.assertEqual(all(np.unique(fens) == np.array([-1, 1])), True)
     tem_h = DREP.DREP_diff(ht[0], ht[1])
     tem_y = DREP.DREP_diff(yt[0], yt[1])
     self.assertEqual(tem_h == tem_y, True)
     #
     yo, P = DREP.DREP_Pruning(yt, y, nb_cls, 0.4)
     self.assertEqual(len(yo) == sum(P), True)
     self.assertEqual(sum(P) <= nb_cls, True)
     h, ht = generate_simulated_data(m, 4, 2)
     tem_h = DREP.DREP_diff(ht[0], ht[1])
     self.assertEqual(-1. <= tem_h <= 1., True)
     #
     del h
Ejemplo n.º 18
0
 def test_domination(self):
     m, nb_cls = 100, 7
     s = np.random.rand(nb_cls)
     sp = PEP_flipping_uniformly(s)
     ans = np.not_equal(sp, sp)
     ans = np.mean(ans) <= (1. / nb_cls)
     self.assertEqual(ans, True)
     #
     y, yt = generate_simulated_data(m, 2, nb_cls)
     ans = PEP_bi_objective(y, yt, s)
     fHs, s_ab = ans
     self.assertEqual(0. <= fHs <= 1., True)
     self.assertEqual(0 <= s_ab <= nb_cls, True)
     #
     g_s1, g_s2 = (0.1, 3), (0.2, 2)
     ans = PEP_weakly_dominate(g_s1, g_s2)
     self.assertEqual(ans, False)
     g_s1, g_s2 = (0.1, 3), (0.1, 4)
     ans = PEP_weakly_dominate(g_s1, g_s2)
     self.assertEqual(ans, True)
     g_s1, g_s2 = (0.1, 3), (0.3, 3)
     ans = PEP_weakly_dominate(g_s1, g_s2)
     self.assertEqual(ans, True)
     g_s1, g_s2 = (0.4, 3), (0.4, 3)
     ans = PEP_weakly_dominate(g_s1, g_s2)
     self.assertEqual(ans, True)
     #
     g_s1, g_s2 = (0.4, 3), (0.4, 3)
     ans = PEP_dominate(g_s1, g_s2)
     self.assertEqual(ans, False)
     g_s1, g_s2 = (0.2, 1), (0.4, 3)
     ans = PEP_dominate(g_s1, g_s2)
     self.assertEqual(ans, True)
     g_s1, g_s2 = (0.4, 2), (0.4, 3)
     ans = PEP_dominate(g_s1, g_s2)
     self.assertEqual(ans, True)
     g_s1, g_s2 = (0.3, 3), (0.4, 3)
     ans = PEP_dominate(g_s1, g_s2)
     self.assertEqual(ans, True)
Ejemplo n.º 19
0
 def test_assume(self):
     m, nb_cls = 100, 7
     y, yt = generate_simulated_data(m, 2, nb_cls)
     # weig = [0.1, 0.25, 0.2, 0.05, 0.15, 0.1, 0.15]
     weig = np.random.rand(nb_cls)
     weig /= np.sum(weig)
     weig = weig.tolist()
     #
     Hsx = PEP_Hs_x(y, yt, weig)
     from pyensemble.classify.voting import weighted_voting
     tem = weighted_voting(y, yt, weig)
     Hsx, tem = np.array(Hsx), np.array(tem)
     self.assertEqual(all(Hsx == tem), True)
     #
     diff = PEP_diff_hihj(yt[0], yt[1])
     self.assertEqual(0.0 <= diff <= 1.0, True)
     #
     err = PEP_err_hi(y, yt[0])
     self.assertEqual(0. <= err <= 1., True)
     err = PEP_err_hi(y, yt[1])
     self.assertEqual(0. <= err <= 1., True)
     #
     ans, _ = PEP_f_Hs(y, yt, weig)
     self.assertEqual(0. <= ans <= 1., True)
Ejemplo n.º 20
0
 def case_generate_simulated_data_positive(self, m, L, T):
     y, yt = generate_simulated_data(m, L, T)
     vY = np.unique(np.concatenate([[y], yt])).tolist()
     self.assertEqual(len(vY), L)
     self.assertEqual(((min(vY) == 0) and (max(vY)==L-1)), True)
Ejemplo n.º 21
0
 def test_KL_divergence_modify(self):
     m, L, nb_cls, nb_pru = 30, 4, 7, 3
     y, yt = generate_simulated_data(m, L, nb_cls)
     yo, P = KLplus.KL_divergence_Pruning_modify(yt, nb_cls, nb_pru)
     self.assertEqual(sum(P), nb_pru)
     del y, yo
Ejemplo n.º 22
0
 def test_Kappa_pruning(self):
     m, L, nb_cls, nb_pru = 30, 4, 7, 3
     y, yt = generate_simulated_data(m, L, nb_cls)
     _, P = KP.Kappa_Pruning(yt, y, nb_cls, nb_pru)
     self.assertEqual(sum(P), nb_pru)
Ejemplo n.º 23
0
 def test_PEP_modify(self):
     m, nb_label, nb_cls = 30, 4, 7
     y, yt = generate_simulated_data(m, nb_label, nb_cls)
     yo, P = PEPplus.PEP_PEP_modify(yt, y, nb_cls, 0.4)
     self.assertEqual(sum(P) <= nb_cls, True)
     self.assertEqual(all(np.unique(yo) == np.unique(yt)), True)
Ejemplo n.º 24
0
 def test_KL_divergence_pruning(self):
     m, L, nb_cls, nb_pru = 30, 4, 7, 3
     _, yt = generate_simulated_data(m, L, nb_cls)
     _, P = KL.KL_divergence_Pruning(yt, nb_cls, nb_pru)
     self.assertEqual(sum(P), nb_pru)
Ejemplo n.º 25
0
 def test_RE_pruning(self):
     m, L, nb_cls, nb_pru = 30, 4, 7, 3
     y, yt = generate_simulated_data(m, L, nb_cls)
     _, P = RE.Reduce_Error_Pruning(yt, y, nb_cls, nb_pru)
     self.assertEqual(sum(P), nb_pru)