fauc = open('%s-auc.dat' % bname, 'w+')
    auc = m.auc(aa_learning, pt_learning)
    print("%.4f" % auc, end='', file=fauc)
    fauc.close()

    fmisclassified = open('%s-misclassified.dat' % bname, 'w+')
    print("{Alternative} ", file=fmisclassified, end='')
    print("{Original assignment} ", file=fmisclassified, end='')
    print("{Model assignment}", file=fmisclassified, end='')
    for c in criteria:
        print(" {%s}" % criteria_names[c], file=fmisclassified, end='')
    print("\n", file=fmisclassified, end='')

    misclassified_aids = []
    for aid in aa_learning.keys():
        aa1 = aa_learning[aid].category_id
        aa2 = aa_learned[aid].category_id
        if aa1 == aa2:
            continue

        misclassified_aids.append(aid)

    misclassified_aids.sort(key=lambda item: (len(item), item))

    for aid in misclassified_aids:
        ap = pt_learning[aid]
        aa1 = aa_learning[aid].category_id
        aa2 = aa_learned[aid].category_id
        print("%s " % aid, file=fmisclassified, end='')
        print("%s %s" % (aa1, aa2), file=fmisclassified, end='')
    ap1 = AlternativePerformances('a1', {'c1': 1, 'c2': 1, 'c3': 0, 'c4': 0})
    ap2 = AlternativePerformances('a2', {'c1': 0, 'c2': 0, 'c3': 1, 'c4': 1})
    ap3 = AlternativePerformances('a3', {'c1': 1, 'c2': 0, 'c3': 1, 'c4': 0})
    ap4 = AlternativePerformances('a4', {'c1': 1, 'c2': 0, 'c3': 0, 'c4': 1})
    ap5 = AlternativePerformances('a5', {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0})
    ap6 = AlternativePerformances('a6', {'c1': 0, 'c2': 1, 'c3': 0, 'c4': 1})
    pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6])

    aa1 = AlternativeAssignment('a1', 'cat1')
    aa2 = AlternativeAssignment('a2', 'cat1')
    aa3 = AlternativeAssignment('a3', 'cat2')
    aa4 = AlternativeAssignment('a4', 'cat2')
    aa5 = AlternativeAssignment('a5', 'cat2')
    aa6 = AlternativeAssignment('a6', 'cat2')
    aa = AlternativesAssignments([aa1, aa2, aa3, aa4, aa5, aa6])
    print_pt_and_assignments(aa.keys(), c.keys(), [aa], pt)

    model = MRSort(c, None, None, None, cps)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)

    # Run the MIP
    mip = MipCMRSort(model, pt, aa)
    mip.solve()

    # Display learned model parameters
    print('Learned model')
    print('=============')
    model.bpt.display()
    model.cv.display()
    table_ca_learning.append(ca_learning)
    table_ca_test.append(ca_test)

    # Compute area under the curve
    auc_learning = m.auc(aa_learning, pt_learning)
    auc_test = m.auc(aa_test, pt_test)

    table_auc_learning.append(auc_learning)
    table_auc_test.append(auc_test)

    if m.veto_lbda is not None:
        nveto += 1

    # Compute confusion matrices
    for a in aa_learning.keys():
        key = (aa_learning[a].category_id, aa_learning_m2[a].category_id)
        if key in cmatrix_learning:
            cmatrix_learning[key] += 1
        else:
            cmatrix_learning[key] = 1

    for a in aa_test.keys():
        key = (aa_test[a].category_id, aa_test_m2[a].category_id)
        if key in cmatrix_test:
            cmatrix_test[key] += 1
        else:
            cmatrix_test[key] = 1

print("nveto: %d" % nveto)
avg_ca_learning = sum(table_ca_learning) / float(len(table_ca_learning))
예제 #4
0
파일: mip_cmrsort.py 프로젝트: oso/pymcda
    ap1 = AlternativePerformances('a1', {'c1': 1, 'c2': 1, 'c3': 0, 'c4': 0})
    ap2 = AlternativePerformances('a2', {'c1': 0, 'c2': 0, 'c3': 1, 'c4': 1})
    ap3 = AlternativePerformances('a3', {'c1': 1, 'c2': 0, 'c3': 1, 'c4': 0})
    ap4 = AlternativePerformances('a4', {'c1': 1, 'c2': 0, 'c3': 0, 'c4': 1})
    ap5 = AlternativePerformances('a5', {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0})
    ap6 = AlternativePerformances('a6', {'c1': 0, 'c2': 1, 'c3': 0, 'c4': 1})
    pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6])

    aa1 = AlternativeAssignment('a1', 'cat1')
    aa2 = AlternativeAssignment('a2', 'cat1')
    aa3 = AlternativeAssignment('a3', 'cat2')
    aa4 = AlternativeAssignment('a4', 'cat2')
    aa5 = AlternativeAssignment('a5', 'cat2')
    aa6 = AlternativeAssignment('a6', 'cat2')
    aa = AlternativesAssignments([aa1, aa2, aa3, aa4, aa5, aa6])
    print_pt_and_assignments(aa.keys(), c.keys(), [aa], pt)

    model = MRSort(c, None, None, None, cps)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)

    # Run the MIP
    mip = MipCMRSort(model, pt, aa)
    mip.solve()

    # Display learned model parameters
    print('Learned model')
    print('=============')
    model.bpt.display()
    model.cv.display()
예제 #5
0
    table_ca_learning.append(ca_learning)
    table_ca_test.append(ca_test)

    # Compute area under the curve
    auc_learning = m.auc(aa_learning, pt_learning)
    auc_test = m.auc(aa_test, pt_test)

    table_auc_learning.append(auc_learning)
    table_auc_test.append(auc_test)

    if m.veto_lbda is not None:
        nveto += 1

    # Compute confusion matrices
    for a in aa_learning.keys():
        key = (aa_learning[a].category_id, aa_learning_m2[a].category_id)
        if key in cmatrix_learning:
            cmatrix_learning[key] += 1
        else:
            cmatrix_learning[key] = 1

    for a in aa_test.keys():
        key = (aa_test[a].category_id, aa_test_m2[a].category_id)
        if key in cmatrix_test:
            cmatrix_test[key] += 1
        else:
            cmatrix_test[key] = 1

print("nveto: %d" % nveto)
avg_ca_learning = sum(table_ca_learning) / float(len(table_ca_learning))
    matrix = compute_confusion_matrix(aa_test_m1, aa_test_m2, m2.categories)
    print_confusion_matrix(matrix, m2.categories)
    aids = [a.id for a in aa_test_m1 \
            if aa_test_m1[a.id].category_id != aa_test_m2[a.id].category_id]
    if len(aids) > 0:
        print("List of alternatives wrongly assigned:")
        print_pt_and_assignments(aids, None, [aa_test_m1, aa_test_m2],
                                 pt_test)

if type(m2) == MRSort:
    worst = AlternativePerformances('worst', {c.id: 0 for c in m2.criteria})
    best = AlternativePerformances('best', {c.id: 1 for c in m2.criteria})

    categories = m2.categories

    a_learning = aa_learning_m1.keys()
    pt_learning_ok = []
    pt_learning_too_low = []
    pt_learning_too_high = []
    for a in a_learning:
        i1 = categories.index(aa_learning_m1[a].category_id)
        i2 = categories.index(aa_learning_m2[a].category_id)
        if i1 == i2:
            pt_learning_ok.append(pt_learning[a])
        elif i1 < i2:
            pt_learning_too_high.append(pt_learning[a])
        elif i1 > i2:
            pt_learning_too_low.append(pt_learning[a])

    a_test = aa_test_m1.keys()
    pt_test_ok = []