Beispiel #1
0
    def test002(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1',
                                      {'c1': 0.75, 'c2': 0.75, 'c3': 0.75})
        bp2 = AlternativePerformances('b2',
                                      {'c1': 0.25, 'c2': 0.25, 'c3': 0.25})
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        a = generate_alternatives(10000)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
Beispiel #2
0
    def sort_models(self, fct_ca=0, heuristic=False):
        cps = generate_categories_profiles(self.categories)
        # metas_sorted = sorted(self.metas, key = lambda (k): k.ca,
        #                       reverse = True)
        if not heuristic:
            if fct_ca == 1:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: k.ca_good,
                                      reverse=True)
            elif fct_ca == 2:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: k.ca_good + k.ca,
                                      reverse=True)
            elif fct_ca == 3:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: 1000 * k.ca_good + k.ca,
                                      reverse=True)
            else:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: k.ca,
                                      reverse=True)
        else:
            for m in self.metas:

                #print(self.pt_sorted)
                modelh = MRSort(copy.deepcopy(m.model.criteria),
                                copy.deepcopy(m.model.cv),
                                copy.deepcopy(m.model.bpt),
                                copy.deepcopy(m.model.lbda), cps)
                #self.clone_model(self.modelh,self.model)
                #import pdb; pdb.set_trace()

                wtotal = 1 - modelh.cv["c1"].value
                modelh.cv["c1"].value = 0
                #print(wtotal, modelh.cv)
                #                if wtotal != 0:
                #                    for el in modelh.cv:
                #                        el.value /= wtotal
                modelh.bpt['b1'].performances["c1"] = 0

                #                print(modelh.criteria,self.pt_sorted.pt)
                aa_learned = modelh.get_assignments(self.pt_sorted.pt)
                cah = 0
                for alt in self.aa_ori:
                    #print(alt)
                    #import pdb; pdb.set_trace()
                    if alt.category_id == aa_learned(alt.id):
                        cah += 1
                m.cah = cah / m.meta.na
                #if m.num == 0:
                #    print(wtotal, modelh.cv,modelh.lbda)
            #import pdb; pdb.set_trace()
            metas_sorted = sorted(self.metas,
                                  key=lambda k: ((k.ca) - k.cah),
                                  reverse=True)

        return metas_sorted
Beispiel #3
0
    def test001(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1',
                                      {'c1': 0.75, 'c2': 0.75, 'c3': 0.75})
        bp2 = AlternativePerformances('b2',
                                      {'c1': 0.25, 'c2': 0.25, 'c3': 0.25})
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        ap1 = AlternativePerformances('a1', {'c1': 0.3, 'c2': 0.3, 'c3': 0.3})
        ap2 = AlternativePerformances('a2', {'c1': 0.8, 'c2': 0.8, 'c3': 0.8})
        ap3 = AlternativePerformances('a3', {'c1': 0.3, 'c2': 0.3, 'c3': 0.1})
        ap4 = AlternativePerformances('a4', {'c1': 0.3, 'c2': 0.1, 'c3': 0.3})
        ap5 = AlternativePerformances('a5', {'c1': 0.1, 'c2': 0.3, 'c3': 0.3})
        ap6 = AlternativePerformances('a6', {'c1': 0.8, 'c2': 0.8, 'c3': 0.1})
        ap7 = AlternativePerformances('a7', {'c1': 0.8, 'c2': 0.1, 'c3': 0.8})
        ap8 = AlternativePerformances('a8', {'c1': 0.1, 'c2': 0.8, 'c3': 0.8})
        pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8])
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
    def test002(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        a = generate_alternatives(10000)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
    def test001(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        ap1 = AlternativePerformances('a1', {'c1': 0.3, 'c2': 0.3, 'c3': 0.3})
        ap2 = AlternativePerformances('a2', {'c1': 0.8, 'c2': 0.8, 'c3': 0.8})
        ap3 = AlternativePerformances('a3', {'c1': 0.3, 'c2': 0.3, 'c3': 0.1})
        ap4 = AlternativePerformances('a4', {'c1': 0.3, 'c2': 0.1, 'c3': 0.3})
        ap5 = AlternativePerformances('a5', {'c1': 0.1, 'c2': 0.3, 'c3': 0.3})
        ap6 = AlternativePerformances('a6', {'c1': 0.8, 'c2': 0.8, 'c3': 0.1})
        ap7 = AlternativePerformances('a7', {'c1': 0.8, 'c2': 0.1, 'c3': 0.8})
        ap8 = AlternativePerformances('a8', {'c1': 0.1, 'c2': 0.8, 'c3': 0.8})
        pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8])

        aa = model.get_assignments(pt)

        self.assertEqual(aa['a1'].category_id, "cat2")
        self.assertEqual(aa['a2'].category_id, "cat1")
        self.assertEqual(aa['a3'].category_id, "cat3")
        self.assertEqual(aa['a4'].category_id, "cat2")
        self.assertEqual(aa['a5'].category_id, "cat2")
        self.assertEqual(aa['a6'].category_id, "cat3")
        self.assertEqual(aa['a7'].category_id, "cat1")
        self.assertEqual(aa['a8'].category_id, "cat1")
Beispiel #6
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
Beispiel #7
0
    cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1)
    cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1)
    cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1)
    cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1)
    cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1)
    cv45 = CriterionValue(CriteriaSet(['c4', 'c5']), -0.1)
    cvs = CriteriaValues([cv1, cv2, cv3, cv4, cv5, cv12, cv13, cv14, cv15,
                          cv23, cv24, cv25, cv34, cv35, cv45])

    lbda = 0.6

    model = MRSort(c, cvs, bpt, lbda, cps)

    print(model.lbda, model.cv)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    lp = LpMRSortMobius(model, pt, aa)
    lp.solve()

    print(model.lbda, model.cv)

    aa2 = model.get_assignments(pt)

    for a in aa:
        a2 = aa2[a.id]
        if a.category_id != a2.category_id:
            print(a, a2)
Beispiel #8
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" % (seed, na, nc, ncat,
                    na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
    cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1)
    cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1)
    cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1)
    cv45 = CriterionValue(CriteriaSet(['c4', 'c5']), -0.1)
    cvs = CriteriaValues([cv1, cv2, cv3, cv4, cv5, cv12, cv13, cv14, cv15,
                          cv23, cv24, cv25, cv34, cv35, cv45])

    lbda = 0.6

    model = MRSort(c, cvs, bpt, lbda, cps)

    print(model.lbda, model.cv)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    model.cv = None
    model.lbda = None
    mip = MipMRSortMobius(model, pt, aa)
    mip.solve()

    print(model.lbda, model.cv)

    aa2 = model.get_assignments(pt)

    for a in aa:
        a2 = aa2[a.id]
        if a.category_id != a2.category_id:
            print(a, a2)
except:
    pt_test = None

aa_learning_m1, aa_learning_m2 = None, None
aa_test_m1, aa_test_m2 = None, None

if root.find("ElectreTri[@id='initial']") is not None:
    m1 = MRSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.pessimist(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.pessimist(pt_test)
elif root.find("AVFSort[@id='initial']") is not None:
    m1 = AVFSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.get_assignments(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.get_assignments(pt_test)
else:
    if root.find("alternativesAffectations[@id='learning_set']") is not None:
        aa_learning_m1 = AlternativesAssignments().from_xmcda(root,
                                                              'learning_set')

    if root.find("alternativesAffectations[@id='test_set']") is not None:
        aa_test_m1 = AlternativesAssignments().from_xmcda(root, 'test_set')

if root.find("ElectreTri[@id='learned']") is not None:
    m2 = MRSort().from_xmcda(root, 'learned')
    if pt_learning is not None:
        aa_learning_m2 = m2.pessimist(pt_learning)
    if pt_test is not None:
    model = AVFSort(data.c, cvs, cfs, catv)
else:
    print("Invalid algorithm!")
    sys.exit(1)

t_total = time.time() - t1

model.id = 'learned'
data.pt.id = 'learning_set'
data.aa.id = 'learning_set'

dt = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
save_to_xmcda("%s/%s-all-%s-%s.bz2" % (DATADIR, algo, data.name, dt), data.aa,
              data.pt, model)

aa2 = model.get_assignments(data.pt)

ca = compute_ca(data.aa, aa2)
auc = model.auc(data.aa, data.pt)

anok = []
for a in data.a:
    if data.aa[a.id].category_id != aa2[a.id].category_id:
        anok.append(a)

if len(anok) > 0:
    print("Alternatives wrongly assigned:")
    print_pt_and_assignments(anok.keys(), data.c.keys(), [data.aa, aa2],
                             data.pt)

print("Model parameters:")
Beispiel #12
0
    model = AVFSort(data.c, cvs, cfs, catv)
else:
    print("Invalid algorithm!")
    sys.exit(1)

t_total = time.time() - t1

model.id = 'learned'
data.pt.id = 'learning_set'
data.aa.id = 'learning_set'

dt = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
save_to_xmcda("%s/%s-all-%s-%s.bz2" % (DATADIR, algo, data.name, dt),
              data.aa, data.pt, model)

aa2 = model.get_assignments(data.pt)

ca = compute_ca(data.aa, aa2)
auc = model.auc(data.aa, data.pt)

anok = []
for a in data.a:
    if data.aa[a.id].category_id != aa2[a.id].category_id:
        anok.append(a)

if len(anok) > 0:
    print("Alternatives wrongly assigned:")
    print_pt_and_assignments(anok.keys(), data.c.keys(), [data.aa, aa2], data.pt)

print("Model parameters:")
cids = model.criteria.keys()