コード例 #1
0
ファイル: test_mip_mrsort_veto.py プロジェクト: oso/pymcda
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(nc, ncat,
                            seed,
                            veto_func = veto_func,
                            veto_param = veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(nc, ncat,
                            seed,
                            veto_weights = indep_veto_weights,
                            veto_func = veto_func,
                            veto_param = veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues([CriterionValue(c.id,
                                                             w[c.id])
                                              for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" % (seed, na, nc, ncat,
                    na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, a, a_gen, pt, pt_gen, aa, aa2,
                  aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
コード例 #2
0
    'c5': 11
})

a45 = AlternativePerformances('a45', {
    'c1': 7,
    'c2': 7,
    'c3': 11,
    'c4': 11,
    'c5': 11
})

pt = PerformanceTable([eval("a%d" % i) for i in range(1, 46)])
aa = model.pessimist(pt)
print(aa)

nveto = [model.count_veto_pessimist(eval("a%d" % i)) for i in range(1, 46)]
print("Number of veto effects: %d" % sum(nveto))

model2 = MRSort(c, None, None, None, cps, None, None, None)
#model2.veto_lbda = model.veto_lbda
#model2.veto_weights = model.veto_weights

mip = MipMRSortVC(model2, pt, aa)
#mip = MipMRSort(model2, pt, aa)
mip.solve()

print(model2.cv)
print(model2.bpt)
print(model2.lbda)
print(model2.veto)
print(model2.veto_lbda)
コード例 #3
0
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(
            nc, ncat, seed, veto_func=veto_func, veto_param=veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(
            nc,
            ncat,
            seed,
            veto_weights=indep_veto_weights,
            veto_func=veto_func,
            veto_param=veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues(
            [CriterionValue(c.id, w[c.id]) for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" %
                    (seed, na, nc, ncat, na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, a,
                  a_gen, pt, pt_gen, aa, aa2, aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
コード例 #4
0
ファイル: test_veto.py プロジェクト: oso/pymcda
a39 = AlternativePerformances('a39', {'c1':  7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  7})

a40 = AlternativePerformances('a40', {'c1': 11, 'c2': 11, 'c3':  7, 'c4':  7, 'c5': 11})
a41 = AlternativePerformances('a41', {'c1': 11, 'c2':  7, 'c3': 11, 'c4':  7, 'c5': 11})
a42 = AlternativePerformances('a42', {'c1':  7, 'c2': 11, 'c3': 11, 'c4':  7, 'c5': 11})

a43 = AlternativePerformances('a43', {'c1': 11, 'c2':  7, 'c3':  7, 'c4': 11, 'c5': 11})
a44 = AlternativePerformances('a44', {'c1':  7, 'c2': 11, 'c3':  7, 'c4': 11, 'c5': 11})

a45 = AlternativePerformances('a45', {'c1':  7, 'c2':  7, 'c3': 11, 'c4': 11, 'c5': 11})

pt = PerformanceTable([eval("a%d" % i) for i in range(1, 46)])
aa = model.pessimist(pt)
print(aa)

nveto = [model.count_veto_pessimist(eval("a%d" % i)) for i in range(1, 46)]
print("Number of veto effects: %d" % sum(nveto))

model2 = MRSort(c, None, None, None, cps, None, None, None)
#model2.veto_lbda = model.veto_lbda
#model2.veto_weights = model.veto_weights

mip = MipMRSortVC(model2, pt, aa)
#mip = MipMRSort(model2, pt, aa)
mip.solve()

print(model2.cv)
print(model2.bpt)
print(model2.lbda)
print(model2.veto)
print(model2.veto_lbda)