Exemple #1
0
    def test001(self):
        random.seed(1)
        c = generate_criteria(4)

        cv1 = CriterionValue('c1', 0.25)
        cv2 = CriterionValue('c2', 0.25)
        cv3 = CriterionValue('c3', 0.25)
        cv4 = CriterionValue('c4', 0.25)
        cv = CriteriaValues([cv1, cv2, cv3, cv4])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        bp = AlternativePerformances('b1', {'c1': 0.5, 'c2': 0.5,
                                            'c3': 0.5, 'c4': 0.5})
        bpt = PerformanceTable([bp])
        lbda = 0.5

        etri = MRSort(c, cv, bpt, 0.5, cps)

        a = generate_alternatives(1000)
        pt = generate_random_performance_table(a, c)
        aas = etri.pessimist(pt)

        for aa in aas:
            w = 0
            perfs = pt[aa.id].performances
            for c, val in perfs.items():
                if val >= bp.performances[c]:
                    w += cv[c].value

            if aa.category_id == 'cat2':
                self.assertLess(w, lbda)
            else:
                self.assertGreaterEqual(w, lbda)
    def test002(self):
        random.seed(2)
        c = generate_criteria(4)

        cv1 = CriterionValue('c1', 0.25)
        cv2 = CriterionValue('c2', 0.25)
        cv3 = CriterionValue('c3', 0.25)
        cv4 = CriterionValue('c4', 0.25)
        cv = CriteriaValues([cv1, cv2, cv3, cv4])

        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75,
            'c4': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25,
            'c4': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])
        lbda = 0.5

        etri = MRSort(c, cv, bpt, 0.5, cps)

        a = generate_alternatives(1000)
        pt = generate_random_performance_table(a, c)
        aas = etri.pessimist(pt)

        for aa in aas:
            w1 = w2 = 0
            perfs = pt[aa.id].performances
            for c, val in perfs.items():
                if val >= bp1.performances[c]:
                    w1 += cv[c].value
                if val >= bp2.performances[c]:
                    w2 += cv[c].value

            if aa.category_id == 'cat3':
                self.assertLess(w1, lbda)
                self.assertLess(w2, lbda)
            elif aa.category_id == 'cat2':
                self.assertLess(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
            else:
                self.assertGreaterEqual(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
    fcoalitions = open('%s-wcoalitions.dat' % bname, 'w+')

    mwinning = compute_minimal_winning_coalitions(lp.fmins)
    for win in mwinning:
        win = list(win)
        win.sort(key=criteria_order.index)
        buf = ""
        for c in win:
            buf += "%s, " % criteria_names[c]
        print('[%s]' % buf[:-2], file=fcoalitions)

    fcoalitions.close()

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')
    aa_learned = m.pessimist(pt_learning)

    fca = open('%s-ca_learning.dat' % bname, 'w+')
    ca = compute_ca(aa_learning, aa_learned)
    print("%.4f" % ca, end='', file=fca)
    fca.close()

    fauc = open('%s-auc_learning.dat' % bname, 'w+')
    auc = m.auc(aa_learning, pt_learning)
    print("%.4f" % auc, end='', file=fauc)
    fauc.close()

    ca_learning.append(ca)
    auc_learning.append(auc)

    pt_test = PerformanceTable().from_xmcda(root, 'test_set')
Exemple #4
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)
    global aaa
    global allm
    global fct_ca
    global LOO

    # Separate learning data and test data
    if LOO:
        pt_learning, pt_test = data.pt.split_LOO(seed)
    else:
        pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    #import pdb; pdb.set_trace()

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)
    # if LOO:
    #     print(data.c, cvs, bpt, lbda, cat_profiles)
    #     print(model.categories_profiles.to_categories())
    #     print(model.categories)        
    #     import pdb; pdb.set_trace()

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = meta_mrsort(nmodels, model.criteria,
                       model.categories_profiles.to_categories(),
                       pt_sorted, aa_learning,
                       seed = seed * 100)
    # if LOO:
    #     print(nmodels, model.criteria,
    #                    model.categories_profiles.to_categories(),
    #                    pt_sorted, aa_learning)
        #import pdb; pdb.set_trace()
#lp_weights = lp_weights,
#heur_profiles = heur_profiles,
#lp_veto_weights = lp_veto_weights,
#heur_veto_profiles = heur_veto_profiles,

    for i in range(0, nloop):
        model, ca_learning, all_models = meta.optimize(nmeta, fct_ca)
        #import pdb; pdb.set_trace()

        if ca_learning == 1:
            break

    t_total = time.time() - t1

    aa_learning2 = model.pessimist(pt_learning)
    
    ca_learning = compute_ca(aa_learning, aa_learning2)
    ca_learning_good = compute_ca_good(aa_learning, aa_learning2)
    #import pdb; pdb.set_trace()
    auc_learning = model.auc(aa_learning, pt_learning)

    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                           model.categories)

    # Compute CA of test setting
    
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        ca_test_good = compute_ca_good(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)
        #import pdb; pdb.set_trace()

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    ca_good = compute_ca_good(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels,
                                           nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca    
    t['ca_learning_good'] = ca_learning_good
    t['ca_test_good'] = ca_test_good
    t['ca_all_good'] = ca_good
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    # import pdb; pdb.set_trace()
    aaa[seed]=dict()
    aaa[seed]['id'] = seed
    aaa[seed]['learning_asgmt_id'] = [i.id for i in aa_learning]
    aaa[seed]['learning_asgmt'] = [i.category_id for i in aa_learning]
    aaa[seed]['learning_asgmt2'] = [i.category_id for i in aa_learning2]        
    aaa[seed]['test_asgmt_id'] = [i.id for i in aa_test]
    aaa[seed]['test_asgmt'] = [i.category_id for i in aa_test]
    aaa[seed]['test_asgmt2'] = [i.category_id for i in aa_test2]
    aaa[seed]['criteria'] =  [i for i,j in model.criteria.items()]
    aaa[seed]['criteria_weights'] = [str(i.value) for i in model.cv.values()]
    aaa[seed]['profiles_values'] = [str(model.bpt['b1'].performances[i]) for i,j in model.criteria.items()]
    aaa[seed]['lambda'] = model.lbda
    #[model.bpt['b1'].performances[i] for i,j in model.criteria.items()]


    allm[seed]=dict()
    allm[seed]['id'] = seed
    current_model = 0
    allm[seed]['mresults'] = dict()
    for all_model in list(all_models)[1:]:
        current_model += 1 # skipping the 1rst model already treated
        allm[seed]['mresults'][current_model] = ["",""]
        aa_learning2_allm = all_model.model.pessimist(pt_learning)
        ca_learning_allm = compute_ca(aa_learning, aa_learning2_allm)
        ca_learning_good_allm = compute_ca_good(aa_learning, aa_learning2_allm)
        auc_learning_allm = all_model.model.auc(aa_learning, pt_learning)
        # diff_learning_allm = compute_confusion_matrix(aa_learning, aa_learning2_allm,
        #                                        all_model.model.categories)
        # Compute CA of test setting
        if len(aa_test) > 0:
            aa_test2_allm = all_model.model.pessimist(pt_test)
            ca_test_allm = compute_ca(aa_test, aa_test2_allm)
            ca_test_good_allm = compute_ca_good(aa_test, aa_test2_allm)
            auc_test_allm = all_model.model.auc(aa_test, pt_test)
            # diff_test_allm = compute_confusion_matrix(aa_test, aa_test2_allm,
            #                                    all_model.categories)
        else:
            ca_test_allm = 0
            auc_test_allm = 0
            ncat_allm = len(data.cats)
            # diff_test_allm = OrderedDict([((a, b), 0) for a in all_model.categories \
            #                                      for b in all_model.model.categories])
        # Compute CA of whole set
        aa2_allm = all_model.model.pessimist(data.pt)
        ca_allm = compute_ca(data.aa, aa2_allm)
        ca_good_allm = compute_ca_good(data.aa, aa2_allm)
        auc_allm = all_model.model.auc(data.aa, data.pt)
        #diff_all_allm = compute_confusion_matrix(data.aa, aa2_allm, all_model.model.categories) 
        allm[seed]['mresults'][current_model][0] = 'na_learning,na_test,ca_learning,ca_test,ca_all,ca_learning_good,ca_test_good,ca_all_good,auc_learning,auc_test,auc_all'
        allm[seed]['mresults'][current_model][1] =  str(len(aa_learning)) + "," + str(len(aa_test)) + "," + str(ca_learning_allm) +  "," + str(ca_test_allm) +  "," + str(ca_allm) + "," + str(ca_learning_good_allm) + "," + str(ca_test_good_allm) + "," + str(ca_good_allm) +  "," + str(auc_learning_allm) + "," + str(auc_test_allm) +  "," + str(auc_allm)
        #allm[seed]['mresults'][current_model][1] =
        #all_model.model.bpt['b1'].performances
        #all_model.model.cv.values()
        #import pdb; pdb.set_trace()

        # allm[seed][current_model]['na_learning'] = len(aa_learning)
        # allm[seed][current_model]['na_test'] = len(na_test)
        # allm[seed][current_model]['ca_learning'] = ca_learning_allm
        # allm[seed][current_model]['ca_test'] = ca_test_allm
        # allm[seed][current_model]['ca_all'] = ca_allm     
        # allm[seed][current_model]['ca_learning_good'] = ca_learning_good_allm
        # allm[seed][current_model]['ca_test_good'] = ca_test_good_allm
        # allm[seed][current_model]['ca_all_good'] = ca_good_allm
        # allm[seed][current_model]['auc_learning'] = auc_learning_allm
        # allm[seed][current_model]['auc_test'] = auc_test_allm
        # allm[seed][current_model]['auc_all'] = auc_allm


    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemple #5
0
def run_test(seed, data, pclearning):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize ELECTRE-TRI BM model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the linear program
    t1 = time.time()

    mip = mip_mrsort(model, pt_learning, aa_learning)
    obj = mip.solve()

    t_total = time.time() - t1

    # CA learning set
    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)
    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d" % (data.name, seed, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['obj'] = obj
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemple #6
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = meta_mrsort(nmodels, model.criteria,
                       model.categories_profiles.to_categories(),
                       pt_sorted, aa_learning,
                       seed = seed * 100)
#lp_weights = lp_weights,
#heur_profiles = heur_profiles,
#lp_veto_weights = lp_veto_weights,
#heur_veto_profiles = heur_veto_profiles,

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)

        if ca_learning == 1:
            break

    t_total = time.time() - t1

    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                           model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels,
                                           nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemple #7
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
for i, xmcda in enumerate(xmcda_csets):
    result = {}
    fmins = CriteriaSets().from_xmcda(xmcda)
    result['fmins'] = fmins
    result['vector'] = "".join(map(str, sorted([len(fmin)
                                   for fmin in sorted(fmins, key = len)])))
    print("\n%d. Fmin: %s" % (i + 1, ', '.join("%s" % f for f in fmins)))
    pt, aa = generate_binary_performance_table_and_assignments(criteria, cat,
                                                               fmins)
    aa.id = 'aa'
    a = Alternatives([Alternative(a.id) for a in aa])

    model = MRSort(criteria, None, bpt, None, cps)
    mip = MipMRSortWeights(model, pt, aa)
    obj = mip.solve()
    aa2 = model.pessimist(pt)
    aa2.id = 'aa_add'
    print("MipMRSortWeights: Objective: %d (/%d)" % (obj, len(aa)))
    anok = [a.id for a in aa if a.category_id != aa2[a.id].category_id]
    print("Alternative not restored: %s" % ','.join("%s" % a for a in anok))
    print(model.cv)
    print("lambda: %s" % model.lbda)
    result['obj_weights'] = obj

    mip = MipMRSortMobius(model, pt, aa)
    obj = mip.solve()
    aa3 = model.pessimist(pt)
    aa3.id = 'aa_capa'
    print("MipMRSortMobius:  Objective: %d (/%d)" % (obj, len(aa)))
    anok = [a.id for a in aa if a.category_id != aa3[a.id].category_id]
    print("Alternative not restored: %s" % ','.join("%s" % a for a in anok))
def run_test(seed, data, pclearning):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize ELECTRE-TRI BM model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the linear program
    t1 = time.time()

    mip = mip_mrsort(model, pt_learning, aa_learning)
    obj = mip.solve()

    t_total = time.time() - t1

    # CA learning set
    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                             model.categories)
    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d" % (data.name, seed, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning,
                  aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['obj'] = obj
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(
            nc, ncat, seed, veto_func=veto_func, veto_param=veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(
            nc,
            ncat,
            seed,
            veto_weights=indep_veto_weights,
            veto_func=veto_func,
            veto_param=veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues(
            [CriterionValue(c.id, w[c.id]) for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" %
                    (seed, na, nc, ncat, na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, a,
                  a_gen, pt, pt_gen, aa, aa2, aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
Exemple #11
0
    'c2': 11,
    'c3': 7,
    'c4': 11,
    'c5': 11
})

a45 = AlternativePerformances('a45', {
    'c1': 7,
    'c2': 7,
    'c3': 11,
    'c4': 11,
    'c5': 11
})

pt = PerformanceTable([eval("a%d" % i) for i in range(1, 46)])
aa = model.pessimist(pt)
print(aa)

nveto = [model.count_veto_pessimist(eval("a%d" % i)) for i in range(1, 46)]
print("Number of veto effects: %d" % sum(nveto))

model2 = MRSort(c, None, None, None, cps, None, None, None)
#model2.veto_lbda = model.veto_lbda
#model2.veto_weights = model.veto_weights

mip = MipMRSortVC(model2, pt, aa)
#mip = MipMRSort(model2, pt, aa)
mip.solve()

print(model2.cv)
print(model2.bpt)
    def test001(self):
        c = generate_criteria(5)
        w1 = CriterionValue('c1', 0.2)
        w2 = CriterionValue('c2', 0.2)
        w3 = CriterionValue('c3', 0.2)
        w4 = CriterionValue('c4', 0.2)
        w5 = CriterionValue('c5', 0.2)
        w = CriteriaValues([w1, w2, w3, w4, w5])

        b1 = AlternativePerformances('b1', {
            'c1': 10,
            'c2': 10,
            'c3': 10,
            'c4': 10,
            'c5': 10
        })
        bpt = PerformanceTable([b1])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        vb1 = AlternativePerformances('b1', {
            'c1': 2,
            'c2': 2,
            'c3': 2,
            'c4': 2,
            'c5': 2
        }, 'b1')
        v = PerformanceTable([vb1])
        vw = w.copy()

        a1 = AlternativePerformances('a1', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a2 = AlternativePerformances('a2', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a3 = AlternativePerformances('a3', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a4 = AlternativePerformances('a4', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a5 = AlternativePerformances('a5', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a6 = AlternativePerformances('a6', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a7 = AlternativePerformances('a7', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a8 = AlternativePerformances('a8', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a9 = AlternativePerformances('a9', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a10 = AlternativePerformances('a10', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a11 = AlternativePerformances('a11', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a12 = AlternativePerformances('a12', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a13 = AlternativePerformances('a13', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a14 = AlternativePerformances('a14', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a15 = AlternativePerformances('a15', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a16 = AlternativePerformances('a16', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a17 = AlternativePerformances('a17', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a18 = AlternativePerformances('a18', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a19 = AlternativePerformances('a19', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a20 = AlternativePerformances('a20', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a21 = AlternativePerformances('a21', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a22 = AlternativePerformances('a22', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a23 = AlternativePerformances('a23', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a24 = AlternativePerformances('a24', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a25 = AlternativePerformances('a25', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a26 = AlternativePerformances('a26', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a27 = AlternativePerformances('a27', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a28 = AlternativePerformances('a28', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a29 = AlternativePerformances('a29', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a30 = AlternativePerformances('a30', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a31 = AlternativePerformances('a31', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a32 = AlternativePerformances('a32', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a33 = AlternativePerformances('a33', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a34 = AlternativePerformances('a34', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a35 = AlternativePerformances('a35', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a36 = AlternativePerformances('a36', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 7
        })
        a37 = AlternativePerformances('a37', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 7
        })
        a38 = AlternativePerformances('a38', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a39 = AlternativePerformances('a39', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a40 = AlternativePerformances('a40', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 7,
            'c5': 11
        })
        a41 = AlternativePerformances('a41', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a42 = AlternativePerformances('a42', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a43 = AlternativePerformances('a43', {
            'c1': 11,
            'c2': 7,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a44 = AlternativePerformances('a44', {
            'c1': 7,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a45 = AlternativePerformances('a45', {
            'c1': 7,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        pt = PerformanceTable([
            a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
            a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28,
            a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41,
            a42, a43, a44, a45
        ])

        ap1 = AlternativeAssignment('a1', 'cat2')
        ap2 = AlternativeAssignment('a2', 'cat2')
        ap3 = AlternativeAssignment('a3', 'cat2')
        ap4 = AlternativeAssignment('a4', 'cat2')
        ap5 = AlternativeAssignment('a5', 'cat2')
        ap6 = AlternativeAssignment('a6', 'cat2')
        ap7 = AlternativeAssignment('a7', 'cat1')
        ap8 = AlternativeAssignment('a8', 'cat2')
        ap9 = AlternativeAssignment('a9', 'cat2')
        ap10 = AlternativeAssignment('a10', 'cat2')
        ap11 = AlternativeAssignment('a11', 'cat1')
        ap12 = AlternativeAssignment('a12', 'cat2')
        ap13 = AlternativeAssignment('a13', 'cat1')
        ap14 = AlternativeAssignment('a14', 'cat1')
        ap15 = AlternativeAssignment('a15', 'cat1')
        ap16 = AlternativeAssignment('a16', 'cat2')
        ap17 = AlternativeAssignment('a17', 'cat2')
        ap18 = AlternativeAssignment('a18', 'cat2')
        ap19 = AlternativeAssignment('a19', 'cat1')
        ap20 = AlternativeAssignment('a20', 'cat2')
        ap21 = AlternativeAssignment('a21', 'cat1')
        ap22 = AlternativeAssignment('a22', 'cat1')
        ap23 = AlternativeAssignment('a23', 'cat1')
        ap24 = AlternativeAssignment('a24', 'cat2')
        ap25 = AlternativeAssignment('a25', 'cat1')
        ap26 = AlternativeAssignment('a26', 'cat1')
        ap27 = AlternativeAssignment('a27', 'cat1')
        ap28 = AlternativeAssignment('a28', 'cat1')
        ap29 = AlternativeAssignment('a29', 'cat1')
        ap30 = AlternativeAssignment('a30', 'cat1')
        ap31 = AlternativeAssignment('a31', 'cat1')
        ap32 = AlternativeAssignment('a32', 'cat1')
        ap33 = AlternativeAssignment('a33', 'cat1')
        ap34 = AlternativeAssignment('a34', 'cat1')
        ap35 = AlternativeAssignment('a35', 'cat1')
        ap36 = AlternativeAssignment('a36', 'cat2')
        ap37 = AlternativeAssignment('a37', 'cat2')
        ap38 = AlternativeAssignment('a38', 'cat2')
        ap39 = AlternativeAssignment('a39', 'cat2')
        ap40 = AlternativeAssignment('a40', 'cat2')
        ap41 = AlternativeAssignment('a41', 'cat2')
        ap42 = AlternativeAssignment('a42', 'cat2')
        ap43 = AlternativeAssignment('a43', 'cat2')
        ap44 = AlternativeAssignment('a44', 'cat2')
        ap45 = AlternativeAssignment('a45', 'cat2')
        aa = AlternativesAssignments([
            ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8, ap9, ap10, ap11, ap12,
            ap13, ap14, ap15, ap16, ap17, ap18, ap19, ap20, ap21, ap22, ap23,
            ap24, ap25, ap26, ap27, ap28, ap29, ap30, ap31, ap32, ap33, ap34,
            ap35, ap36, ap37, ap38, ap39, ap40, ap41, ap42, ap43, ap44, ap45
        ])

        model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4)
        aa2 = model.pessimist(pt)
        ok = compare_assignments(aa, aa2)
        self.assertEqual(ok, 1, "One or more alternatives were wrongly "
                         "assigned")
Exemple #13
0
    def test001(self):
        c = generate_criteria(5)
        w1 = CriterionValue('c1', 0.2)
        w2 = CriterionValue('c2', 0.2)
        w3 = CriterionValue('c3', 0.2)
        w4 = CriterionValue('c4', 0.2)
        w5 = CriterionValue('c5', 0.2)
        w = CriteriaValues([w1, w2, w3, w4, w5])

        b1 = AlternativePerformances('b1', {'c1': 10, 'c2': 10, 'c3': 10, 'c4': 10, 'c5': 10})
        bpt = PerformanceTable([b1])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        vb1 = AlternativePerformances('b1', {'c1': 2, 'c2': 2, 'c3': 2, 'c4': 2, 'c5': 2}, 'b1')
        v = PerformanceTable([vb1])
        vw = w.copy()

        a1 = AlternativePerformances('a1',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4':  9, 'c5': 11})
        a2 = AlternativePerformances('a2',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4': 11, 'c5':  9})
        a3 = AlternativePerformances('a3',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4': 11, 'c5': 11})
        a4 = AlternativePerformances('a4',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4':  9, 'c5':  9})
        a5 = AlternativePerformances('a5',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4':  9, 'c5': 11})
        a6 = AlternativePerformances('a6',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4': 11, 'c5':  9})
        a7 = AlternativePerformances('a7',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4': 11, 'c5': 11})
        a8 = AlternativePerformances('a8',   {'c1':  9, 'c2': 11, 'c3':  9, 'c4':  9, 'c5':  9})
        a9 = AlternativePerformances('a9',   {'c1':  9, 'c2': 11, 'c3':  9, 'c4':  9, 'c5': 11})
        a10 = AlternativePerformances('a10', {'c1':  9, 'c2': 11, 'c3':  9, 'c4': 11, 'c5':  9})
        a11 = AlternativePerformances('a11', {'c1':  9, 'c2': 11, 'c3':  9, 'c4': 11, 'c5': 11})
        a12 = AlternativePerformances('a12', {'c1':  9, 'c2': 11, 'c3': 11, 'c4':  9, 'c5':  9})
        a13 = AlternativePerformances('a13', {'c1':  9, 'c2': 11, 'c3': 11, 'c4':  9, 'c5': 11})
        a14 = AlternativePerformances('a14', {'c1':  9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  9})
        a15 = AlternativePerformances('a15', {'c1':  9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11})
        a16 = AlternativePerformances('a16', {'c1': 11, 'c2':  9, 'c3':  9, 'c4':  9, 'c5':  9})
        a17 = AlternativePerformances('a17', {'c1': 11, 'c2':  9, 'c3':  9, 'c4':  9, 'c5': 11})
        a18 = AlternativePerformances('a18', {'c1': 11, 'c2':  9, 'c3':  9, 'c4': 11, 'c5':  9})
        a19 = AlternativePerformances('a19', {'c1': 11, 'c2':  9, 'c3':  9, 'c4': 11, 'c5': 11})
        a20 = AlternativePerformances('a20', {'c1': 11, 'c2':  9, 'c3': 11, 'c4':  9, 'c5':  9})
        a21 = AlternativePerformances('a21', {'c1': 11, 'c2':  9, 'c3': 11, 'c4':  9, 'c5': 11})
        a22 = AlternativePerformances('a22', {'c1': 11, 'c2':  9, 'c3': 11, 'c4': 11, 'c5':  9})
        a23 = AlternativePerformances('a23', {'c1': 11, 'c2':  9, 'c3': 11, 'c4': 11, 'c5': 11})
        a24 = AlternativePerformances('a24', {'c1': 11, 'c2': 11, 'c3':  9, 'c4':  9, 'c5':  9})
        a25 = AlternativePerformances('a25', {'c1': 11, 'c2': 11, 'c3':  9, 'c4':  9, 'c5': 11})
        a26 = AlternativePerformances('a26', {'c1': 11, 'c2': 11, 'c3':  9, 'c4': 11, 'c5':  9})
        a27 = AlternativePerformances('a27', {'c1': 11, 'c2': 11, 'c3':  9, 'c4': 11, 'c5': 11})
        a28 = AlternativePerformances('a28', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  9, 'c5':  9})
        a29 = AlternativePerformances('a29', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  9, 'c5': 11})
        a30 = AlternativePerformances('a30', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  9})
        a31 = AlternativePerformances('a31', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  7})
        a32 = AlternativePerformances('a32', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  7, 'c5': 11})
        a33 = AlternativePerformances('a33', {'c1': 11, 'c2': 11, 'c3':  7, 'c4': 11, 'c5': 11})
        a34 = AlternativePerformances('a34', {'c1': 11, 'c2':  7, 'c3': 11, 'c4': 11, 'c5': 11})
        a35 = AlternativePerformances('a35', {'c1':  7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11})
        a36 = AlternativePerformances('a36', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  7, 'c5':  7})
        a37 = AlternativePerformances('a37', {'c1': 11, 'c2': 11, 'c3':  7, 'c4': 11, 'c5':  7})
        a38 = AlternativePerformances('a38', {'c1': 11, 'c2':  7, 'c3': 11, 'c4': 11, 'c5':  7})
        a39 = AlternativePerformances('a39', {'c1':  7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  7})
        a40 = AlternativePerformances('a40', {'c1': 11, 'c2': 11, 'c3':  7, 'c4':  7, 'c5': 11})
        a41 = AlternativePerformances('a41', {'c1': 11, 'c2':  7, 'c3': 11, 'c4':  7, 'c5': 11})
        a42 = AlternativePerformances('a42', {'c1':  7, 'c2': 11, 'c3': 11, 'c4':  7, 'c5': 11})
        a43 = AlternativePerformances('a43', {'c1': 11, 'c2':  7, 'c3':  7, 'c4': 11, 'c5': 11})
        a44 = AlternativePerformances('a44', {'c1':  7, 'c2': 11, 'c3':  7, 'c4': 11, 'c5': 11})
        a45 = AlternativePerformances('a45', {'c1':  7, 'c2':  7, 'c3': 11, 'c4': 11, 'c5': 11})
        pt = PerformanceTable([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
                               a12, a13, a14, a15, a16, a17, a18, a19, a20,
                               a21, a22, a23, a24, a25, a26, a27, a28, a29,
                               a30, a31, a32, a33, a34, a35, a36, a37, a38,
                               a39, a40, a41, a42, a43, a44, a45])

        ap1 = AlternativeAssignment('a1', 'cat2')
        ap2 = AlternativeAssignment('a2', 'cat2')
        ap3 = AlternativeAssignment('a3', 'cat2')
        ap4 = AlternativeAssignment('a4', 'cat2')
        ap5 = AlternativeAssignment('a5', 'cat2')
        ap6 = AlternativeAssignment('a6', 'cat2')
        ap7 = AlternativeAssignment('a7', 'cat1')
        ap8 = AlternativeAssignment('a8', 'cat2')
        ap9 = AlternativeAssignment('a9', 'cat2')
        ap10 = AlternativeAssignment('a10', 'cat2')
        ap11 = AlternativeAssignment('a11', 'cat1')
        ap12 = AlternativeAssignment('a12', 'cat2')
        ap13 = AlternativeAssignment('a13', 'cat1')
        ap14 = AlternativeAssignment('a14', 'cat1')
        ap15 = AlternativeAssignment('a15', 'cat1')
        ap16 = AlternativeAssignment('a16', 'cat2')
        ap17 = AlternativeAssignment('a17', 'cat2')
        ap18 = AlternativeAssignment('a18', 'cat2')
        ap19 = AlternativeAssignment('a19', 'cat1')
        ap20 = AlternativeAssignment('a20', 'cat2')
        ap21 = AlternativeAssignment('a21', 'cat1')
        ap22 = AlternativeAssignment('a22', 'cat1')
        ap23 = AlternativeAssignment('a23', 'cat1')
        ap24 = AlternativeAssignment('a24', 'cat2')
        ap25 = AlternativeAssignment('a25', 'cat1')
        ap26 = AlternativeAssignment('a26', 'cat1')
        ap27 = AlternativeAssignment('a27', 'cat1')
        ap28 = AlternativeAssignment('a28', 'cat1')
        ap29 = AlternativeAssignment('a29', 'cat1')
        ap30 = AlternativeAssignment('a30', 'cat1')
        ap31 = AlternativeAssignment('a31', 'cat1')
        ap32 = AlternativeAssignment('a32', 'cat1')
        ap33 = AlternativeAssignment('a33', 'cat1')
        ap34 = AlternativeAssignment('a34', 'cat1')
        ap35 = AlternativeAssignment('a35', 'cat1')
        ap36 = AlternativeAssignment('a36', 'cat2')
        ap37 = AlternativeAssignment('a37', 'cat2')
        ap38 = AlternativeAssignment('a38', 'cat2')
        ap39 = AlternativeAssignment('a39', 'cat2')
        ap40 = AlternativeAssignment('a40', 'cat2')
        ap41 = AlternativeAssignment('a41', 'cat2')
        ap42 = AlternativeAssignment('a42', 'cat2')
        ap43 = AlternativeAssignment('a43', 'cat2')
        ap44 = AlternativeAssignment('a44', 'cat2')
        ap45 = AlternativeAssignment('a45', 'cat2')
        aa = AlternativesAssignments([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8,
                                      ap9, ap10, ap11, ap12, ap13, ap14, ap15,
                                      ap16, ap17, ap18, ap19, ap20, ap21,
                                      ap22, ap23, ap24, ap25, ap26, ap27,
                                      ap28, ap29, ap30, ap31, ap32, ap33,
                                      ap34, ap35, ap36, ap37, ap38, ap39,
                                      ap40, ap41, ap42, ap43, ap44, ap45])

        model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4)
        aa2 = model.pessimist(pt)
        ok = compare_assignments(aa, aa2)
        self.assertEqual(ok, 1, "One or more alternatives were wrongly "
                         "assigned")
    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    import pdb
    pdb.set_trace()
    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root, 'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

    # Compute classification accuracy
    ca_learning = compute_ca(aa_learning, aa_learning_m2)
    ca_test = compute_ca(aa_test, aa_test_m2)

    table_ca_learning.append(ca_learning)
    table_ca_test.append(ca_test)

    # Compute area under the curve
    auc_learning = m.auc(aa_learning, pt_learning)
    auc_test = m.auc(aa_test, pt_test)

    table_auc_learning.append(auc_learning)
    table_auc_test.append(auc_test)
    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
except:
    pt_learning = None

try:
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')
except:
    pt_test = None

aa_learning_m1, aa_learning_m2 = None, None
aa_test_m1, aa_test_m2 = None, None

if root.find("ElectreTri[@id='initial']") is not None:
    m1 = MRSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.pessimist(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.pessimist(pt_test)
elif root.find("AVFSort[@id='initial']") is not None:
    m1 = AVFSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.get_assignments(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.get_assignments(pt_test)
else:
    if root.find("alternativesAffectations[@id='learning_set']") is not None:
        aa_learning_m1 = AlternativesAssignments().from_xmcda(root,
                                                              'learning_set')

    if root.find("alternativesAffectations[@id='test_set']") is not None:
        aa_test_m1 = AlternativesAssignments().from_xmcda(root, 'test_set')
Exemple #16
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" % (seed, na, nc, ncat,
                    na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
    b1 = AlternativePerformances('b1', {'c%d' % (i + 1): 0.5
                                        for i in range(ncriteria)})
    model.bpt = PerformanceTable([b1])
    cat = generate_categories(2)
    model.categories_profiles = generate_categories_profiles(cat)
    model.lbda = 0.6
    vb1 = AlternativePerformances('b1', {'c%d' % (i + 1): random.uniform(0,0.4)
                                         for i in range(ncriteria)})
    model.veto = PerformanceTable([vb1])
    model.veto_weights = model.cv.copy()
    model.veto_lbda = 0.4

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    worst = pt.get_worst(model.criteria)
    best = b1

    print('Original model')
    print('==============')
    cids = model.criteria.keys()
    model.bpt.display(criterion_ids=cids)
    model.cv.display(criterion_ids=cids)
    print("lambda: %.7s" % model.lbda)
    model.vpt.display(criterion_ids=cids)
    model.veto_weights.display(criterion_ids=cids)

    model2 = model.copy()
    vpt = generate_random_profiles(model.profiles, model.criteria, None, 3,
Exemple #18
0
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(nc, ncat,
                            seed,
                            veto_func = veto_func,
                            veto_param = veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(nc, ncat,
                            seed,
                            veto_weights = indep_veto_weights,
                            veto_func = veto_func,
                            veto_param = veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues([CriterionValue(c.id,
                                                             w[c.id])
                                              for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" % (seed, na, nc, ncat,
                    na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, a, a_gen, pt, pt_gen, aa, aa2,
                  aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
Exemple #19
0
    mwinning = compute_minimal_winning_coalitions(lp.fmins)
    for win in mwinning:
        win = list(win)
        win.sort(key=criteria_order.index)
        buf = ""
        for c in win:
            buf += "%s, " % criteria_names[c]
        print('[%s]' % buf[:-2], file=fcoalitions)

    fcoalitions.close()

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    aa_learning = AlternativesAssignments().from_xmcda(root,
                                                       'learning_set')
    aa_learned = m.pessimist(pt_learning)

    fca = open('%s-ca_learning.dat' % bname, 'w+')
    ca = compute_ca(aa_learning, aa_learned)
    print("%.4f" % ca, end = '', file=fca)
    fca.close()

    fauc = open('%s-auc_learning.dat' % bname, 'w+')
    auc = m.auc(aa_learning, pt_learning)
    print("%.4f" % auc, end = '', file=fauc)
    fauc.close()

    ca_learning.append(ca)
    auc_learning.append(auc)

    pt_test = PerformanceTable().from_xmcda(root, 'test_set')
    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root,
                                                       'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root,
                                                  'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

#    # Remove alternatives that cannot be corrected with a veto rule
#    aa_learning_m2p = discard_undersorted_alternatives(m.categories,
#                                                      aa_learning,
#                                                      aa_learning_m2)
#    aa_learning_m2p = discard_alternatives_in_category(aa_learning_m2p,
#                                                      m.categories[0])

    # Run the metaheuristic
    meta = MetaMRSortVCPop3(10, m, SortedPerformanceTable(pt_learning),
                            aa_learning)
    nloops = 10
    nmeta = 20
    for i in range(nloops):
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = meta_mrsort(nmodels,
                       model.criteria,
                       model.categories_profiles.to_categories(),
                       pt_sorted,
                       aa_learning,
                       seed=seed * 100)
    #lp_weights = lp_weights,
    #heur_profiles = heur_profiles,
    #lp_veto_weights = lp_veto_weights,
    #heur_veto_profiles = heur_veto_profiles,

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)

        if ca_learning == 1:
            break

    t_total = time.time() - t1

    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                             model.categories)

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" %
                    (data.name, seed, nloop, nmodels, nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning,
                  aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
    fprofiles.close()

    fcoalitions = open('%s-wcoalitions.dat' % bname, 'w+')

    mwinning = compute_minimal_winning_coalitions(lp.fmins)
    for win in mwinning:
        win = list(win)
        win.sort(key=criteria_order.index)
        buf = ""
        for c in win:
            buf += "%s, " % criteria_names[c]
        print('[%s]' % buf[:-2], file=fcoalitions)

    fcoalitions.close()

    aa_learned = m.pessimist(pt_learning)

    fca = open('%s-ca.dat' % bname, 'w+')
    ca = compute_ca(aa_learning, aa_learned)
    print("%.4f" % ca, end='', file=fca)
    fca.close()

    fauc = open('%s-auc.dat' % bname, 'w+')
    auc = m.auc(aa_learning, pt_learning)
    print("%.4f" % auc, end='', file=fauc)
    fauc.close()

    fmisclassified = open('%s-misclassified.dat' % bname, 'w+')
    print("{Alternative} ", file=fmisclassified, end='')
    print("{Original assignment} ", file=fmisclassified, end='')
    print("{Model assignment}", file=fmisclassified, end='')
Exemple #23
0
a36 = AlternativePerformances('a36', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  7, 'c5':  7})
a37 = AlternativePerformances('a37', {'c1': 11, 'c2': 11, 'c3':  7, 'c4': 11, 'c5':  7})
a38 = AlternativePerformances('a38', {'c1': 11, 'c2':  7, 'c3': 11, 'c4': 11, 'c5':  7})
a39 = AlternativePerformances('a39', {'c1':  7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  7})

a40 = AlternativePerformances('a40', {'c1': 11, 'c2': 11, 'c3':  7, 'c4':  7, 'c5': 11})
a41 = AlternativePerformances('a41', {'c1': 11, 'c2':  7, 'c3': 11, 'c4':  7, 'c5': 11})
a42 = AlternativePerformances('a42', {'c1':  7, 'c2': 11, 'c3': 11, 'c4':  7, 'c5': 11})

a43 = AlternativePerformances('a43', {'c1': 11, 'c2':  7, 'c3':  7, 'c4': 11, 'c5': 11})
a44 = AlternativePerformances('a44', {'c1':  7, 'c2': 11, 'c3':  7, 'c4': 11, 'c5': 11})

a45 = AlternativePerformances('a45', {'c1':  7, 'c2':  7, 'c3': 11, 'c4': 11, 'c5': 11})

pt = PerformanceTable([eval("a%d" % i) for i in range(1, 46)])
aa = model.pessimist(pt)
print(aa)

nveto = [model.count_veto_pessimist(eval("a%d" % i)) for i in range(1, 46)]
print("Number of veto effects: %d" % sum(nveto))

model2 = MRSort(c, None, None, None, cps, None, None, None)
#model2.veto_lbda = model.veto_lbda
#model2.veto_weights = model.veto_weights

mip = MipMRSortVC(model2, pt, aa)
#mip = MipMRSort(model2, pt, aa)
mip.solve()

print(model2.cv)
print(model2.bpt)