Ejemplo n.º 1
0
 def init_profiles(self):
     b = self.model.categories_profiles.get_ordered_profiles()
     worst = self.pt_sorted.pt.get_worst(self.model.criteria)
     best = self.pt_sorted.pt.get_best(self.model.criteria)
     self.model.bpt = generate_random_profiles(b, model.criteria,
                                               worst = worst,
                                               best = best)
    def one_test2(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)

        bids = model.categories_profiles.get_ordered_profiles()
        bpt = generate_random_profiles(bids, model.criteria)

        model.bpt = model2.bpt = bpt
        aa = model.pessimist(pt)

        lp_weights.aa_ori = aa
        lp_weights.update_linear_program()

        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
Ejemplo n.º 3
0
    def one_test2(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)

        bids = model.categories_profiles.get_ordered_profiles()
        bpt = generate_random_profiles(bids, model.criteria)

        model.bpt = model2.bpt = bpt
        aa = model.pessimist(pt)

        lp_weights.aa_ori = aa
        lp_weights.update_linear_program()

        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
 def init_profiles(self):
     b = self.model.categories_profiles.get_ordered_profiles()
     worst = self.pt_sorted.pt.get_worst(self.model.criteria)
     best = self.pt_sorted.pt.get_best(self.model.criteria)
     self.model.bpt = generate_random_profiles(b,
                                               model.criteria,
                                               worst=worst,
                                               best=best)
Ejemplo n.º 5
0
def test_heur_mrsort_init_profiles(seed, na, nc, ncat, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()
    model3 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Initialize the second model with random generated profiles
    b = model.categories_profiles.get_ordered_profiles()
    model2.bpt = generate_random_profiles(b, model2.criteria)

    # Run the heuristic
    cats = model.categories_profiles.to_categories()
    pt_sorted = SortedPerformanceTable(pt)
    heur = HeurMRSortInitProfiles(model3, pt_sorted, aa)
    heur.solve()

    # Learn the weights and cut threshold
    cps = model.categories_profiles

    lp_weights = LpMRSortWeights(model2, pt, aa)
    lp_weights.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    # Compute the classification accuracy
    aa2 = model2.pessimist(pt)
    aa3 = model3.pessimist(pt)

    ca2 = compute_ca(aa, aa2)
    ca3 = compute_ca(aa, aa3)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g" % (seed, na, nc, ncat, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcerrors'] = pcerrors

    # Output params
    t['ca_rdom'] = ca2
    t['ca_heur'] = ca3

    return t
Ejemplo n.º 6
0
def test_heur_mrsort_init_profiles(seed, na, nc, ncat, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()
    model3 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Initialize the second model with random generated profiles
    b = model.categories_profiles.get_ordered_profiles()
    model2.bpt = generate_random_profiles(b, model2.criteria)

    # Run the heuristic
    cats = model.categories_profiles.to_categories()
    pt_sorted = SortedPerformanceTable(pt)
    heur = HeurMRSortInitProfiles(model3, pt_sorted, aa)
    heur.solve()

    # Learn the weights and cut threshold
    cps = model.categories_profiles

    lp_weights = LpMRSortWeights(model2, pt, aa)
    lp_weights.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    # Compute the classification accuracy
    aa2 = model2.pessimist(pt)
    aa3 = model3.pessimist(pt)

    ca2 = compute_ca(aa, aa2)
    ca3 = compute_ca(aa, aa3)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g" % (seed, na, nc, ncat, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcerrors'] = pcerrors

    # Output params
    t['ca_rdom'] = ca2
    t['ca_heur'] = ca3

    return t
Ejemplo n.º 7
0
def run_metaheuristic(pipe,
                      model,
                      pt,
                      aa,
                      algo,
                      n,
                      use_heur=False,
                      worst=None,
                      best=None):

    random.seed(0)
    pt_sorted = SortedPerformanceTable(pt)

    if use_heur is True:
        heur = HeurMRSortInitProfiles(model, pt_sorted, aa)
        heur.solve()
    else:
        model.bpt = generate_random_profiles(model.profiles,
                                             model.criteria,
                                             worst=worst,
                                             best=best)

    if algo == "Meta 3":
        meta = MetaMRSortProfiles3(model, pt_sorted, aa)
    elif algo == "Meta 4":
        meta = MetaMRSortProfiles4(model, pt_sorted, aa)
    else:
        print("Invalid algorithm %s" % algo)
        pipe.close()
        return

    f = compute_ca(aa, meta.aa)

    pipe.send([model.copy(), f])

    for i in range(1, n + 1):
        meta.optimize()
        f = compute_ca(aa, meta.aa)

        pipe.send([model.copy(), f])

        if f == 1:
            break

    pipe.close()
Ejemplo n.º 8
0
def run_metaheuristic(pipe, model, pt, aa, algo, n, use_heur = False,
                      worst = None, best = None):

    random.seed(0)
    pt_sorted = SortedPerformanceTable(pt)

    if use_heur is True:
        heur = HeurMRSortInitProfiles(model, pt_sorted, aa)
        heur.solve()
    else:
        model.bpt = generate_random_profiles(model.profiles,
                                             model.criteria,
                                             worst = worst,
                                             best = best)

    if algo == "Meta 3":
        meta = MetaMRSortProfiles3(model, pt_sorted, aa)
    elif algo == "Meta 4":
        meta = MetaMRSortProfiles4(model, pt_sorted, aa)
    else:
        print("Invalid algorithm %s" % algo)
        pipe.close()
        return

    f = compute_ca(aa, meta.aa)

    pipe.send([model.copy(), f])

    for i in range(1, n + 1):
        meta.optimize()
        f = compute_ca(aa, meta.aa)

        pipe.send([model.copy(), f])

        if f == 1:
            break

    pipe.close()
Ejemplo n.º 9
0
    def one_test(self, seed, na, nc, ncat, max_loop, n):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        bids = model2.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(bids, model.criteria)

        pt_sorted = SortedPerformanceTable(pt)

        meta = MetaMRSortProfiles4(model2, pt_sorted, aa)

        for i in range(1, max_loop + 1):
            ca = meta.optimize()
            if ca == 1:
                break

        aa2 = model2.pessimist(pt)

        self.assertEqual(i, n)
        self.assertEqual(aa, aa2)
    def one_test(self, seed, na, nc, ncat, max_loop, n):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        bids = model2.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(bids, model.criteria)

        pt_sorted = SortedPerformanceTable(pt)

        meta = MetaMRSortProfiles4(model2, pt_sorted, aa)

        for i in range(1, max_loop + 1):
            ca = meta.optimize()
            if ca == 1:
                break

        aa2 = model2.pessimist(pt)

        self.assertEqual(i, n)
        self.assertEqual(aa, aa2)
Ejemplo n.º 11
0
    from pymcda.generate import generate_categories_profiles
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.generate import generate_random_plinear_preference_function

    random.seed(123)

    criteria = generate_criteria(5)
    crit_weights = generate_random_criteria_weights(criteria)
    categories = generate_categories(5)
    cat_profiles = generate_categories_profiles(categories)

    a = generate_alternatives(100)
    pt = generate_random_performance_table(a, criteria)
    ap_best = pt.get_best(criteria)
    ap_worst = pt.get_worst(criteria)

    b = cat_profiles.get_ordered_profiles()
    bpt = generate_random_profiles(b, criteria)
    pf = generate_random_plinear_preference_function(criteria, ap_worst,
                                                     ap_best)
    print(crit_weights)
    print(categories)
    print(cat_profiles)
    print(bpt)
    print(pf)

    model = flowsort(criteria, crit_weights, cat_profiles, bpt, pf)
    print(model.get_assignments(pt))
Ejemplo n.º 12
0
    from pymcda.generate import generate_categories_profiles
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.generate import generate_random_plinear_preference_function

    random.seed(123)

    criteria = generate_criteria(5)
    crit_weights = generate_random_criteria_weights(criteria)
    categories = generate_categories(5)
    cat_profiles = generate_categories_profiles(categories)

    a = generate_alternatives(100)
    pt = generate_random_performance_table(a, criteria)
    ap_best = pt.get_best(criteria)
    ap_worst = pt.get_worst(criteria)

    b = cat_profiles.get_ordered_profiles()
    bpt = generate_random_profiles(b, criteria)
    pf = generate_random_plinear_preference_function(criteria, ap_worst,
                                                     ap_best)
    print(crit_weights)
    print(categories)
    print(cat_profiles)
    print(bpt)
    print(pf)

    model = flowsort(criteria, crit_weights, cat_profiles, bpt, pf)
    print(model.get_assignments(pt))
Ejemplo n.º 13
0
    from pymcda.electre_tri import ElectreTri
    from pymcda.types import AlternativePerformances

    a = generate_alternatives(2)
    c = generate_criteria(5)
    cv = generate_random_criteria_values(c, 1234)
    cv.normalize_sum_to_unity()

    worst = AlternativePerformances("worst", {crit.id: 0 for crit in c})
    best = AlternativePerformances("best", {crit.id: 1 for crit in c})
    pt = generate_random_performance_table(a, c)

    cat = generate_categories(3)
    cps = generate_categories_profiles(cat)
    b = cps.get_ordered_profiles()
    bpt = generate_random_profiles(b, c)
    bpt['b2'].performances['c3'] = 0.2

    lbda = random.uniform(0.5, 1)

    model = ElectreTri(c, cv, bpt, lbda, cps)

    app = QtGui.QApplication(sys.argv)

    sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
                                   QtGui.QSizePolicy.Expanding)
    sizePolicy.setHorizontalStretch(1)
    sizePolicy.setVerticalStretch(0)
    sizePolicy.setHeightForWidth(sizePolicy.hasHeightForWidth())

    view = _MyGraphicsview()
    aa = model.pessimist(pt)

    worst = pt.get_worst(model.criteria)
    best = b1

    print('Original model')
    print('==============')
    cids = model.criteria.keys()
    model.bpt.display(criterion_ids=cids)
    model.cv.display(criterion_ids=cids)
    print("lambda: %.7s" % model.lbda)
    model.vpt.display(criterion_ids=cids)
    model.veto_weights.display(criterion_ids=cids)

    model2 = model.copy()
    vpt = generate_random_profiles(model.profiles, model.criteria, None, 3,
                                   worst, best)
    model2.veto = PerformanceTable([b1 - vpt[b1.id]])
    print('Original random profiles')
    print('========================')
    model2.vpt.display(criterion_ids = cids)

    pt_sorted = SortedPerformanceTable(pt)
    meta = MetaMRSortVetoProfiles4(model2, pt_sorted, aa)

    t1 = time.time()

    i = 0
    for i in range(0, 1000):
        f = meta.good / meta.na
        print('%d: fitness: %g' % (i, f))
#        model2.vpt.display(criterion_ids=cids)
Ejemplo n.º 15
0
 def init_profiles(self):
     bpt = generate_random_profiles(self.model.profiles,
                                    self.model.criteria)
     self.model.bpt = bpt
     self.model.vpt = None
    import time
    import random
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_criteria_weights
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativePerformances
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(7, 2, 5)
    worst = AlternativePerformances("worst", {c.id: 0 for c in model.criteria})
    best = AlternativePerformances("best", {c.id: 1 for c in model.criteria})

    # Add veto
    vpt = generate_random_profiles(model.profiles, model.criteria, None, 3,
                                   worst, model.bpt['b1'])
    model.veto = PerformanceTable([model.bpt['b1'] - vpt['b1']])
    model.veto_weights = generate_random_criteria_weights(model.criteria)
    model.veto_lbda = random.random()

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    nmeta = 20
    nloops = 10

    print('Original model')
    print('==============')
    cids = model.criteria.keys()
Ejemplo n.º 17
0
 def init_profiles(self):
     bpt = generate_random_profiles(self.model.profiles,
                                    self.model.criteria)
     self.model.bpt = bpt
     self.model.vpt = None
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    sorted_pt = SortedPerformanceTable(pt)

    aa = model.pessimist(pt)

    for cat in model.categories_profiles.get_ordered_categories():
        pc = len(aa.get_alternatives_in_category(cat)) / len(aa) * 100
        print("Percentage of alternatives in %s: %g %%" % (cat, pc))

    # Learn the weights with random generated profiles
    for i in range(10):
        model2 = model.copy()
        b = model.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(b, model2.criteria)

        lp_weights = LpMRSortWeights(model2, pt, aa)
        lp_weights.solve()

        aa2 = model2.pessimist(pt)
        ca2 = compute_ca(aa, aa2)

        win2, loose2 = compute_winning_and_loosing_coalitions(
            model2.cv, model2.lbda)
        coal2_ni = list((set(winning) ^ set(win2)) & set(winning))
        coal2_add = list((set(winning) ^ set(win2)) & set(win2))

        print("Classification accuracy with random profiles: %g" % ca2)
        print("Coalitions: total: %d, common: %d, added: %d" % \
              (len(win2), (len(winning) - len(coal2_ni)), len(coal2_add)))
    model.bpt.display(criterion_ids=cids)
    model.cv.display(criterion_ids=cids)
    print("lambda: %.7s" % model.lbda)
    print("number of possible coalitions: %d" %
          compute_number_of_winning_coalitions(model.cv, model.lbda))

    model2 = model.copy()
    model2.bpt['b1'].performances['c1'] = 0.880
    model.bpt['b1'].performances['c2'] = 0.880
    model.bpt['b1'].performances['c3'] = 0.880
    model.bpt['b1'].performances['c4'] = 0.880
    model2.bpt['b1'].performances['c5'] = 0.880
    model.bpt['b1'].performances['c6'] = 0.880
    model.bpt['b1'].performances['c7'] = 0.880
    model2.bpt['b1'].performances['c8'] = 0.880
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)
    print('Original random profiles')
    print('========================')
    model2.bpt.display(criterion_ids = cids)

    pt_sorted = SortedPerformanceTable(pt)
    meta = MetaMRSortProfiles5(model2, pt_sorted, aa)

    t1 = time.time()

    i = 0
    for i in range(0, 101):
        f = meta.good / meta.na
        print('%d: fitness: %g' % (i, f))
        model2.bpt.display(criterion_ids=cids)
        if f == 1:
Ejemplo n.º 20
0
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    sorted_pt = SortedPerformanceTable(pt)

    aa = model.pessimist(pt)

    for cat in model.categories_profiles.get_ordered_categories():
        pc = len(aa.get_alternatives_in_category(cat)) / len(aa) * 100
        print("Percentage of alternatives in %s: %g %%" % (cat, pc))

    # Learn the weights with random generated profiles
    for i in range(10):
        model2 = model.copy()
        b = model.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(b, model2.criteria)

        lp_weights = LpMRSortWeights(model2, pt, aa)
        lp_weights.solve()

        aa2 = model2.pessimist(pt)
        ca2 = compute_ca(aa, aa2)

        win2, loose2 = compute_winning_and_loosing_coalitions(model2.cv,
                                                              model2.lbda)
        coal2_ni = list((set(winning) ^ set(win2)) & set(winning))
        coal2_add = list((set(winning) ^ set(win2)) & set(win2))

        print("Classification accuracy with random profiles: %g" % ca2)
        print("Coalitions: total: %d, common: %d, added: %d" % \
              (len(win2), (len(winning) - len(coal2_ni)), len(coal2_add)))
Ejemplo n.º 21
0
def test_meta_electre_tri_profiles(seed, na, nc, ncat, na_gen, pcerrors,
                                   max_loops):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Initiate model with random profiles
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Run the algorithm
    meta = algo(model2, pt_sorted, aa_err)

    ca2_iter = [1] * (max_loops + 1)
    aa2 = model2.pessimist(pt)
    ca2 = compute_ca(aa_err, aa2)
    ca2_best = ca2
    best_bpt = model2.bpt.copy()
    ca2_iter[0] = ca2
    nloops = 0

    for k in range(max_loops):
        if ca2_best == 1:
            break

        meta.optimize()
        nloops += 1

        aa2 = meta.aa
        ca2 = compute_ca(aa_err, aa2)

        ca2_iter[k + 1] = ca2

        if ca2 > ca2_best:
            ca2_best = ca2
            best_bpt =  model2.bpt.copy()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    model2.bpt = best_bpt
    aa2 = model2.pessimist(pt)

    ok = ok_errors = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

    total = len(a)
    ca_best = ok / total
    ca_best_errors = ok_errors / total
    ca2_best_errors = ok2_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d" % (seed, na, nc, ncat, na_gen,
                    pcerrors, max_loops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_best_errors'] = ca_best_errors
    t['ca2_best'] = ca2_best
    t['ca2_best_errors'] = ca2_best_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Ejemplo n.º 22
0
    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(10, 3, 123)

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)

    errors = 0.0
    nlearn = 1.0

    model2 = model.copy()
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)

    a_learn = random.sample(a, int(nlearn*len(a)))
    aa_learn = AlternativesAssignments([ aa[alt.id] for alt in a_learn ])
    pt_learn = PerformanceTable([ pt[alt.id] for alt in a_learn ])

    aa_err = aa_learn.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                            errors)

    print('Original model')
    print('==============')
    cids = model.criteria.keys()
    model.bpt.display(criterion_ids = cids,
                      alternative_ids = model.profiles)
    model.cv.display(criterion_ids = cids)
def test_meta_electre_tri_profiles(seed, na, nc, ncat, na_gen, pcerrors,
                                   max_loops):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Initiate model with random profiles
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Run the algorithm
    meta = algo(model2, pt_sorted, aa_err)

    ca2_iter = [1] * (max_loops + 1)
    aa2 = model2.pessimist(pt)
    ca2 = compute_ca(aa_err, aa2)
    ca2_best = ca2
    best_bpt = model2.bpt.copy()
    ca2_iter[0] = ca2
    nloops = 0

    for k in range(max_loops):
        if ca2_best == 1:
            break

        meta.optimize()
        nloops += 1

        aa2 = meta.aa
        ca2 = compute_ca(aa_err, aa2)

        ca2_iter[k + 1] = ca2

        if ca2 > ca2_best:
            ca2_best = ca2
            best_bpt = model2.bpt.copy()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    model2.bpt = best_bpt
    aa2 = model2.pessimist(pt)

    ok = ok_errors = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

    total = len(a)
    ca_best = ok / total
    ca_best_errors = ok_errors / total
    ca2_best_errors = ok2_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d" %
                    (seed, na, nc, ncat, na_gen, pcerrors, max_loops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_best_errors'] = ca_best_errors
    t['ca2_best'] = ca2_best
    t['ca2_best_errors'] = ca2_best_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t