model2.bpt.display(criterion_ids=cids)
    model2.cv.display(criterion_ids=cids)
    print("lambda: %.7s" % model.lbda)
    model2.vpt.display(criterion_ids=cids)
    model2.veto_weights.display(criterion_ids=cids)
    print("veto lambda: %.7s" % model.veto_lbda)

    aa2 = model2.get_assignments(pt)
    if aa2 != meta.aa:
        print('Error in classification accuracy computation!')

    total = len(a)
    nok = 0
    anok = []
    for alt in a:
        if aa(alt.id) != aa2(alt.id):
            anok.append(alt.id)
            nok += 1

    print("Good assignments: %3g %%" % (float(total - nok) / total * 100))
    print("Bad assignments : %3g %%" % (float(nok) / total * 100))

    if len(anok) > 0:
        print("Alternatives wrongly assigned:")
        print_pt_and_assignments(anok, model.criteria.keys(), [aa, aa2], pt)

    aps = [pt["%s" % aid] for aid in anok]
    display_electre_tri_models(
        [model, model2], [worst, worst], [best, best],
        [[ap for ap in model.vpt], [ap for ap in model2.vpt]])
Exemplo n.º 2
0
    print model2.veto_weights
    # Display learned model parameters
    print('Learned model')
    print('=============')
    model2.bpt.display(criterion_ids=cids)
    model2.cv.display(criterion_ids=cids)
    print("lambda: %.7s" % model2.lbda)

    # Compute assignment with the learned model
    aa2 = model2.pessimist(pt)

    # Compute CA
    total = len(a)
    nok = 0
    anok = []
    for alt in a:
        if aa(alt.id) != aa2(alt.id):
            anok.append(alt)
            nok += 1

    print("Good assignments: %3g %%" % (float(total - nok) / total * 100))
    print("Bad assignments : %3g %%" % (float(nok) / total * 100))

    if len(anok) > 0:
        print("Alternatives wrongly assigned:")
        print_pt_and_assignments(anok.keys(), model.criteria.keys(), [aa, aa2],
                                 pt)

    # Display models
    display_electre_tri_models([model, model2], [worst, worst], [best, best])
Exemplo n.º 3
0
    anok = []
    for alt in a:
        if aa(alt.id) <> aa_learned(alt.id):
            anok.append(alt)
            nok += 1

    print("Good assignments: %g %%" % (float(total-nok)/total*100))
    print("Bad assignments : %g %%" % (float(nok)/total*100))

    win1, loose1 = compute_winning_and_loosing_coalitions(model.cv,
                                                          model.lbda)
    win2, loose2 = compute_winning_and_loosing_coalitions(model2.cv,
                                                          model2.lbda)
    coali = list(set(win1) & set(win2))
    coal1e = list(set(win1) ^ set(coali))
    coal2e = list(set(win2) ^ set(coali))

    print("Number of coalitions original: %d"
          % len(win1))
    print("Number of coalitions learned: %d"
          % len(win2))
    print("Number of common coalitions: %d"
          % len(coali))
    print("Coallitions in original and not in learned: %s"
          % '; '.join(map(str, coal1e)))
    print("Coallitions in learned and not in original: %s"
          % '; '.join(map(str, coal2e)))

    display_electre_tri_models([model, model2],
                               [worst, worst], [best, best])
        coal2_ni = list((set(winning) ^ set(win2)) & set(winning))
        coal2_add = list((set(winning) ^ set(win2)) & set(win2))

        print("Classification accuracy with random profiles: %g" % ca2)
        print("Coalitions: total: %d, common: %d, added: %d" % \
              (len(win2), (len(winning) - len(coal2_ni)), len(coal2_add)))

    # Learn the weights with profiles generated by the heuristic
    model3 = model.copy()
    heur = HeurMRSortInitProfiles(model3, sorted_pt, aa)
    heur.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    aa3 = model3.pessimist(pt)
    ca3 = compute_ca(aa, aa3)

    win3, loose3 = compute_winning_and_loosing_coalitions(
        model3.cv, model3.lbda)
    coal3_ni = list((set(winning) ^ set(win3)) & set(winning))
    coal3_add = list((set(winning) ^ set(win3)) & set(win3))

    print("Classification accuracy with heuristic: %g" % ca3)
    print("Coalitions: total: %d, common: %d, added: %d" % \
          (len(win3), (len(winning) - len(coal3_ni)), len(coal3_add)))

    display_electre_tri_models([model], [pt.get_worst(model.criteria)],
                               [pt.get_best(model.criteria)],
                               [[ap for ap in model3.bpt]])
Exemplo n.º 5
0
    print("lambda: %.7s" % model.lbda)
    model2.vpt.display(criterion_ids = cids)
    model2.veto_weights.display(criterion_ids = cids)
    print("veto lambda: %.7s" % model.veto_lbda)

    aa2 = model2.get_assignments(pt)
    if aa2 != meta.aa:
        print('Error in classification accuracy computation!')

    total = len(a)
    nok = 0
    anok = []
    for alt in a:
        if aa(alt.id) != aa2(alt.id):
            anok.append(alt.id)
            nok += 1

    print("Good assignments: %3g %%" % (float(total-nok)/total*100))
    print("Bad assignments : %3g %%" % (float(nok)/total*100))

    if len(anok) > 0:
        print("Alternatives wrongly assigned:")
        print_pt_and_assignments(anok, model.criteria.keys(),
                                 [aa, aa2], pt)

    aps = [ pt["%s" % aid] for aid in anok ]
    display_electre_tri_models([model, model2],
                               [worst, worst], [best, best],
                               [[ap for ap in model.vpt],
                                [ap for ap in model2.vpt]])
    # Display learned model parameters
    print('Learned model')
    print('=============')
    model.bpt.display()
    model.cv.display()
    print("lambda: %.7s" % model.lbda)

    # Compute assignment with the learned model
    aa2 = model.pessimist(pt)

    # Compute CA
    total = len(aa)
    nok = 0
    anok = []
    for alt in aa:
        if aa(alt.id) != aa2(alt.id):
            anok.append(alt)
            nok += 1

    print("Good assignments: %3g %%" % (float(total - nok) / total * 100))
    print("Bad assignments : %3g %%" % (float(nok) / total * 100))

    if len(anok) > 0:
        print("Alternatives wrongly assigned:")
        print_pt_and_assignments(anok.keys(), model.criteria.keys(), [aa, aa2],
                                 pt)

    # Display models
    display_electre_tri_models([model], [worst], [best])
Exemplo n.º 7
0
    # Display learned model parameters
    print('Learned model')
    print('=============')
    model.bpt.display()
    model.cv.display()
    print("lambda: %.7s" % model.lbda)

    # Compute assignment with the learned model
    aa2 = model.pessimist(pt)

    # Compute CA
    total = len(aa)
    nok = 0
    anok = []
    for alt in aa:
        if aa(alt.id) != aa2(alt.id):
            anok.append(alt)
            nok += 1

    print("Good assignments: %3g %%" % (float(total-nok)/total*100))
    print("Bad assignments : %3g %%" % (float(nok)/total*100))

    if len(anok) > 0:
        print("Alternatives wrongly assigned:")
        print_pt_and_assignments(anok.keys(), model.criteria.keys(),
                                 [aa, aa2], pt)

    # Display models
    display_electre_tri_models([model], [worst], [best])
    pt_learning_too_high = []
    for a in a_learning:
        i1 = categories.index(aa_learning_m1[a].category_id)
        i2 = categories.index(aa_learning_m2[a].category_id)
        if i1 == i2:
            pt_learning_ok.append(pt_learning[a])
        elif i1 < i2:
            pt_learning_too_high.append(pt_learning[a])
        elif i1 > i2:
            pt_learning_too_low.append(pt_learning[a])

    a_test = aa_test_m1.keys()
    pt_test_ok = []
    pt_test_too_low = []
    pt_test_too_high = []
    for a in a_test:
        i1 = categories.index(aa_test_m1[a].category_id)
        i2 = categories.index(aa_test_m2[a].category_id)
        if i1 == i2:
            pt_test_ok.append(pt_test[a])
        elif i1 < i2:
            pt_test_too_high.append(pt_test[a])
        elif i1 > i2:
            pt_test_too_low.append(pt_test[a])

    display_electre_tri_models([m2, m2], [worst, worst], [best, best],
                               [m2.vpt, m2.vpt],
                               [pt_learning_too_low, pt_test_too_low],
                               None,
                               [pt_learning_too_high, pt_test_too_high])
Exemplo n.º 9
0
if __name__ == "__main__":
    m = generate_random_mrsort_model(5, 2, 123)

    winning, loosing = compute_winning_and_loosing_coalitions(m.cv, m.lbda)
    print("Number of winning coalitions: %d" % len(winning))
    print("List of coalitions:")
    display_coalitions(winning)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, m.criteria)

    aa = m.pessimist(pt)

    heur = HeurMRSortCoalitions(m.criteria, m.categories, pt, aa)
    aids = [aid[0] for aid in heur.sorted_extrem ]
    display_electre_tri_models([m], [pt.get_worst(m.criteria)],
                               [pt.get_best(m.criteria)],
                               [[pt[aid] for aid in aids[:100]]])

    coal2 = heur.find_coalitions(100)
    print("List of coalitions found:")
    display_coalitions(coal2)

    coal_ni = list((set(winning) ^ set(coal2)) & set(winning))
    print("List of coalitions not identified (%d):" % len(coal_ni))
    display_coalitions(coal_ni)

    coal_add = list((set(winning) ^ set(coal2)) & set(coal2))
    print("List of coalitions added (%s):" % len(coal_add))
    display_coalitions(coal_add)
Exemplo n.º 10
0
#!/usr/bin/python
import sys
sys.path.append("..")
from pymcda.electre_tri import ElectreTri
from pymcda.ui.graphic import display_electre_tri_models
from datasets import ticino
from datasets import loulouka

if __name__ == "__main__":
    etri = ElectreTri(ticino.c, ticino.cv, ticino.ptb, ticino.lbda, ticino.cps)
    etri2 = ElectreTri(loulouka.c, loulouka.cv, loulouka.ptb, loulouka.lbda,
                       loulouka.cps)

    worst_ticino = ticino.pt.get_worst(ticino.c)
    worst_loulouka = loulouka.pt.get_worst(loulouka.c)

    best_ticino = ticino.pt.get_best(ticino.c)
    best_loulouka = loulouka.pt.get_best(loulouka.c)

    display_electre_tri_models([etri, etri2], [worst_ticino, worst_loulouka],
                               [best_ticino, best_loulouka])
Exemplo n.º 11
0
        coal2_ni = list((set(winning) ^ set(win2)) & set(winning))
        coal2_add = list((set(winning) ^ set(win2)) & set(win2))

        print("Classification accuracy with random profiles: %g" % ca2)
        print("Coalitions: total: %d, common: %d, added: %d" % \
              (len(win2), (len(winning) - len(coal2_ni)), len(coal2_add)))

    # Learn the weights with profiles generated by the heuristic
    model3 = model.copy()
    heur = HeurMRSortInitProfiles(model3, sorted_pt, aa)
    heur.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    aa3 = model3.pessimist(pt)
    ca3 = compute_ca(aa, aa3)

    win3, loose3 = compute_winning_and_loosing_coalitions(model3.cv,
                                                          model3.lbda)
    coal3_ni = list((set(winning) ^ set(win3)) & set(winning))
    coal3_add = list((set(winning) ^ set(win3)) & set(win3))

    print("Classification accuracy with heuristic: %g" % ca3)
    print("Coalitions: total: %d, common: %d, added: %d" % \
          (len(win3), (len(winning) - len(coal3_ni)), len(coal3_add)))

    display_electre_tri_models([model], [pt.get_worst(model.criteria)],
                               [pt.get_best(model.criteria)],
                               [[ap for ap in model3.bpt]])