Exemplo n.º 1
0
    model2.bpt.display(criterion_ids=cids)
    model2.cv.display(criterion_ids=cids)
    print("lambda\t%.7s" % model2.lbda)
    if model2.vpt is not None:
        model2.vpt.display(criterion_ids=cids)
    if model2.veto_weights is not None:
        model2.veto_weights.display(criterion_ids=cids)
        print("veto_lambda\t%.7s" % model2.veto_lbda)

    aa_learned = model2.get_assignments(pt)

    total = len(a)
    nok = 0
    anok = []
    for alt in a:
        if aa(alt.id) != aa_learned(alt.id):
            anok.append(alt)
            nok += 1

    print("Good assignments: %g %%" % (float(total - nok) / total * 100))
    print("Bad assignments : %g %%" % (float(nok) / total * 100))

    matrix = compute_confusion_matrix(aa, aa_learned, model.categories)
    print_confusion_matrix(matrix, model.categories)

    model.id = "original"
    model2.id = "learned"
    display_electre_tri_models(
        [model, model2], [worst, worst], [best, best],
        [[ap for ap in model.vpt], [ap for ap in model2.vpt]])
stdev_auc_learning = np.std(np.array(table_auc_learning), axis=0)
stdev_auc_test = np.std(np.array(table_auc_test), axis=0)

nfiles = len(sys.argv) - 1
cmatrix_learning.update(
    (x, round(y / float(nfiles), 2)) for x, y in cmatrix_learning.items())
cmatrix_test.update(
    (x, round(y / float(nfiles), 2)) for x, y in cmatrix_test.items())

print("CA learning avg: %g +- %g" % (avg_ca_learning, stdev_ca_learning))
print("CA test avg: %g +- %g" % (avg_ca_test, stdev_ca_test))
print("AUC learning avg: %g +- %g" % (avg_auc_learning, stdev_auc_learning))
print("AUC test avg: %g +- %g" % (avg_auc_test, stdev_auc_test))

print("Confusion matrix learning set")
print_confusion_matrix(cmatrix_learning, m.categories)
print("Confusion matrix test set")
print_confusion_matrix(cmatrix_test, m.categories)

cmatrix_learning_total = sum(cmatrix_learning.values())
cmatrix_test_total = sum(cmatrix_test.values())
cmatrix_learning.update((x, round(100 * y / float(cmatrix_learning_total), 2))
                        for x, y in cmatrix_learning.items())
cmatrix_test.update((x, round(100 * y / float(cmatrix_test_total), 2))
                    for x, y in cmatrix_test.items())

print("Confusion matrix learning set")
print_confusion_matrix(cmatrix_learning, m.categories)
print("Confusion matrix test set")
print_confusion_matrix(cmatrix_test, m.categories)
Exemplo n.º 3
0
    model2.cv.display(criterion_ids = cids)
    print("lambda\t%.7s" % model2.lbda)
    if model2.vpt is not None:
        model2.vpt.display(criterion_ids = cids)
    if model2.veto_weights is not None:
        model2.veto_weights.display(criterion_ids = cids)
        print("veto_lambda\t%.7s" % model2.veto_lbda)

    aa_learned = model2.get_assignments(pt)

    total = len(a)
    nok = 0
    anok = []
    for alt in a:
        if aa(alt.id) <> aa_learned(alt.id):
            anok.append(alt)
            nok += 1

    print("Good assignments: %g %%" % (float(total-nok)/total*100))
    print("Bad assignments : %g %%" % (float(nok)/total*100))

    matrix = compute_confusion_matrix(aa, aa_learned, model.categories)
    print_confusion_matrix(matrix, model.categories)

    model.id = "original"
    model2.id = "learned"
    display_electre_tri_models([model, model2],
                               [worst, worst], [best, best],
                               [[ap for ap in model.vpt],
                                [ap for ap in model2.vpt]])
Exemplo n.º 4
0
stdev_auc_learning = np.std(np.array(table_auc_learning), axis=0)
stdev_auc_test = np.std(np.array(table_auc_test), axis=0)

nfiles = len(sys.argv) - 1
cmatrix_learning.update((x, round(y / float(nfiles), 2))
                        for x, y in cmatrix_learning.items())
cmatrix_test.update((x, round(y / float(nfiles), 2))
                    for x, y in cmatrix_test.items())

print("CA learning avg: %g +- %g" % (avg_ca_learning, stdev_ca_learning))
print("CA test avg: %g +- %g" % (avg_ca_test, stdev_ca_test))
print("AUC learning avg: %g +- %g" % (avg_auc_learning, stdev_auc_learning))
print("AUC test avg: %g +- %g" % (avg_auc_test, stdev_auc_test))

print("Confusion matrix learning set")
print_confusion_matrix(cmatrix_learning, m.categories)
print("Confusion matrix test set")
print_confusion_matrix(cmatrix_test, m.categories)

cmatrix_learning_total = sum(cmatrix_learning.values())
cmatrix_test_total = sum(cmatrix_test.values())
cmatrix_learning.update((x, round(100 * y / float(cmatrix_learning_total), 2))
                        for x, y in cmatrix_learning.items())
cmatrix_test.update((x, round(100 * y / float(cmatrix_test_total), 2))
                    for x, y in cmatrix_test.items())

print("Confusion matrix learning set")
print_confusion_matrix(cmatrix_learning, m.categories)
print("Confusion matrix test set")
print_confusion_matrix(cmatrix_test, m.categories)
Exemplo n.º 5
0
    print_pt_and_assignments(anok.keys(), data.c.keys(), [data.aa, aa2],
                             data.pt)

print("Model parameters:")
cids = model.criteria.keys()
if model_type == 'mrsort':
    print(model.bpt)
    print(model.cv)
    print("lambda: %.7s" % model.lbda)

    print("Weights and lambda optimization:")
    if algo == 'meta_mrsort':
        lp = LpMRSortPostWeights(model.cv, model.lbda)
        obj, model.cv, model.lbda = lp.solve()
    print(model.cv)
    print(model.lbda)

#    display_electre_tri_models([model], [worst], [best])
elif model_type == 'utadis':
    model.cfs.display(criterion_ids=cids)
    model.cat_values.display()
#    display_utadis_model(model.cfs)

print("t:   %g" % t_total)
print("CA:  %g" % ca)
print("AUC: %g" % auc)

print("Confusion matrix:")
print_confusion_matrix(
    compute_confusion_matrix(data.aa, aa2, data.cats.get_ordered_categories()))
Exemplo n.º 6
0
    print("Alternatives wrongly assigned:")
    print_pt_and_assignments(anok.keys(), data.c.keys(), [data.aa, aa2], data.pt)

print("Model parameters:")
cids = model.criteria.keys()
if model_type == 'mrsort':
    print(model.bpt)
    print(model.cv)
    print("lambda: %.7s" % model.lbda)

    print("Weights and lambda optimization:")
    if algo == 'meta_mrsort':
        lp = LpMRSortPostWeights(model.cv, model.lbda)
        obj, model.cv, model.lbda = lp.solve()
    print(model.cv)
    print(model.lbda)

#    display_electre_tri_models([model], [worst], [best])
elif model_type == 'utadis':
    model.cfs.display(criterion_ids = cids)
    model.cat_values.display()
#    display_utadis_model(model.cfs)

print("t:   %g" % t_total)
print("CA:  %g" % ca)
print("AUC: %g" % auc)

print("Confusion matrix:")
print_confusion_matrix(compute_confusion_matrix(data.aa, aa2,
                                                data.cats.get_ordered_categories()))