示例#1
0
 def init_one_meta(self, seed):
     cps = generate_categories_profiles(self.categories)
     if not self.duplication and self.renewal_method > 0:
         for crit in self.criteria.values():
             if int(crit.id[1:]) <= self.nb_unk_criteria:
                 tmp = random.random()
                 if tmp < 0.5:
                     crit.direction = -1
                 else:
                     crit.direction = 1
                 #print(tmp)
         #print(self.unk_pref_dir_criteria,int(crit.id[1:]),self.criteria)
         #import pdb; pdb.set_trace()
     #print(self.criteria,self.renewal_method)
     #import pdb; pdb.set_trace()
     model = MRSort(copy.deepcopy(self.criteria), None, None, None, cps)
     model.id = 'model_%d' % seed
     meta = MetaMRSortCV4MSJP(model,
                              self.pt_sorted,
                              self.aa_ori,
                              self.lp_weights,
                              self.heur_profiles,
                              self.lp_veto_weights,
                              self.heur_veto_profiles,
                              gamma=self.gamma,
                              renewal_method=self.renewal_method,
                              pretreatment_crit=self.pretreatment_crit,
                              duplication=self.duplication,
                              nb_unk_criteria=self.nb_unk_criteria)
     random.seed(seed)
     meta.random_state = random.getstate()
     meta.auc = meta.model.auc(self.aa_ori, self.pt_sorted.pt)
     return meta
示例#2
0
    def test001(self):
        random.seed(1)
        c = generate_criteria(4)

        cv1 = CriterionValue('c1', 0.25)
        cv2 = CriterionValue('c2', 0.25)
        cv3 = CriterionValue('c3', 0.25)
        cv4 = CriterionValue('c4', 0.25)
        cv = CriteriaValues([cv1, cv2, cv3, cv4])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        bp = AlternativePerformances('b1', {'c1': 0.5, 'c2': 0.5,
                                            'c3': 0.5, 'c4': 0.5})
        bpt = PerformanceTable([bp])
        lbda = 0.5

        etri = MRSort(c, cv, bpt, 0.5, cps)

        a = generate_alternatives(1000)
        pt = generate_random_performance_table(a, c)
        aas = etri.pessimist(pt)

        for aa in aas:
            w = 0
            perfs = pt[aa.id].performances
            for c, val in perfs.items():
                if val >= bp.performances[c]:
                    w += cv[c].value

            if aa.category_id == 'cat2':
                self.assertLess(w, lbda)
            else:
                self.assertGreaterEqual(w, lbda)
示例#3
0
    def test002(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1',
                                      {'c1': 0.75, 'c2': 0.75, 'c3': 0.75})
        bp2 = AlternativePerformances('b2',
                                      {'c1': 0.25, 'c2': 0.25, 'c3': 0.25})
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        a = generate_alternatives(10000)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
示例#4
0
    def sort_models(self, fct_ca=0, heuristic=False):
        cps = generate_categories_profiles(self.categories)
        # metas_sorted = sorted(self.metas, key = lambda (k): k.ca,
        #                       reverse = True)
        if not heuristic:
            if fct_ca == 1:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: k.ca_good,
                                      reverse=True)
            elif fct_ca == 2:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: k.ca_good + k.ca,
                                      reverse=True)
            elif fct_ca == 3:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: 1000 * k.ca_good + k.ca,
                                      reverse=True)
            else:
                metas_sorted = sorted(self.metas,
                                      key=lambda k: k.ca,
                                      reverse=True)
        else:
            for m in self.metas:

                #print(self.pt_sorted)
                modelh = MRSort(copy.deepcopy(m.model.criteria),
                                copy.deepcopy(m.model.cv),
                                copy.deepcopy(m.model.bpt),
                                copy.deepcopy(m.model.lbda), cps)
                #self.clone_model(self.modelh,self.model)
                #import pdb; pdb.set_trace()

                wtotal = 1 - modelh.cv["c1"].value
                modelh.cv["c1"].value = 0
                #print(wtotal, modelh.cv)
                #                if wtotal != 0:
                #                    for el in modelh.cv:
                #                        el.value /= wtotal
                modelh.bpt['b1'].performances["c1"] = 0

                #                print(modelh.criteria,self.pt_sorted.pt)
                aa_learned = modelh.get_assignments(self.pt_sorted.pt)
                cah = 0
                for alt in self.aa_ori:
                    #print(alt)
                    #import pdb; pdb.set_trace()
                    if alt.category_id == aa_learned(alt.id):
                        cah += 1
                m.cah = cah / m.meta.na
                #if m.num == 0:
                #    print(wtotal, modelh.cv,modelh.lbda)
            #import pdb; pdb.set_trace()
            metas_sorted = sorted(self.metas,
                                  key=lambda k: ((k.ca) - k.cah),
                                  reverse=True)

        return metas_sorted
示例#5
0
 def init_one_meta(self, seed):
     cps = generate_categories_profiles(self.categories)
     model = MRSort(self.criteria, None, None, None, cps)
     model.id = 'model_%d' % seed
     meta = MetaMRSortCV4(model, self.pt_sorted, self.aa_ori,
                          self.lp_weights, self.heur_profiles,
                          self.lp_veto_weights, self.heur_veto_profiles)
     random.seed(seed)
     meta.random_state = random.getstate()
     meta.auc = meta.model.auc(self.aa_ori, self.pt_sorted.pt)
     return meta
    def test002(self):
        random.seed(2)
        c = generate_criteria(4)

        cv1 = CriterionValue('c1', 0.25)
        cv2 = CriterionValue('c2', 0.25)
        cv3 = CriterionValue('c3', 0.25)
        cv4 = CriterionValue('c4', 0.25)
        cv = CriteriaValues([cv1, cv2, cv3, cv4])

        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75,
            'c4': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25,
            'c4': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])
        lbda = 0.5

        etri = MRSort(c, cv, bpt, 0.5, cps)

        a = generate_alternatives(1000)
        pt = generate_random_performance_table(a, c)
        aas = etri.pessimist(pt)

        for aa in aas:
            w1 = w2 = 0
            perfs = pt[aa.id].performances
            for c, val in perfs.items():
                if val >= bp1.performances[c]:
                    w1 += cv[c].value
                if val >= bp2.performances[c]:
                    w2 += cv[c].value

            if aa.category_id == 'cat3':
                self.assertLess(w1, lbda)
                self.assertLess(w2, lbda)
            elif aa.category_id == 'cat2':
                self.assertLess(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
            else:
                self.assertGreaterEqual(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
示例#7
0
 def init_one_meta(self, seed):
     cps = generate_categories_profiles(self.categories)
     model = MRSort(self.criteria, None, None, None, cps)
     model.id = 'model_%d' % seed
     meta = MetaMRSortCV4(model, self.pt_sorted, self.aa_ori,
                          self.lp_weights,
                          self.heur_profiles,
                          self.lp_veto_weights,
                          self.heur_veto_profiles)
     random.seed(seed)
     meta.random_state = random.getstate()
     meta.auc = meta.model.auc(self.aa_ori, self.pt_sorted.pt)
     return meta
    def test001(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        ap1 = AlternativePerformances('a1', {'c1': 0.3, 'c2': 0.3, 'c3': 0.3})
        ap2 = AlternativePerformances('a2', {'c1': 0.8, 'c2': 0.8, 'c3': 0.8})
        ap3 = AlternativePerformances('a3', {'c1': 0.3, 'c2': 0.3, 'c3': 0.1})
        ap4 = AlternativePerformances('a4', {'c1': 0.3, 'c2': 0.1, 'c3': 0.3})
        ap5 = AlternativePerformances('a5', {'c1': 0.1, 'c2': 0.3, 'c3': 0.3})
        ap6 = AlternativePerformances('a6', {'c1': 0.8, 'c2': 0.8, 'c3': 0.1})
        ap7 = AlternativePerformances('a7', {'c1': 0.8, 'c2': 0.1, 'c3': 0.8})
        ap8 = AlternativePerformances('a8', {'c1': 0.1, 'c2': 0.8, 'c3': 0.8})
        pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8])

        aa = model.get_assignments(pt)

        self.assertEqual(aa['a1'].category_id, "cat2")
        self.assertEqual(aa['a2'].category_id, "cat1")
        self.assertEqual(aa['a3'].category_id, "cat3")
        self.assertEqual(aa['a4'].category_id, "cat2")
        self.assertEqual(aa['a5'].category_id, "cat2")
        self.assertEqual(aa['a6'].category_id, "cat3")
        self.assertEqual(aa['a7'].category_id, "cat1")
        self.assertEqual(aa['a8'].category_id, "cat1")
示例#9
0
    def test001(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {'c1': 0.75, 'c2': 0.75, 'c3': 0.75})
        bp2 = AlternativePerformances('b2', {'c1': 0.25, 'c2': 0.25, 'c3': 0.25})
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        ap1 = AlternativePerformances('a1',
                                      {'c1': 0.3, 'c2': 0.3, 'c3': 0.3})
        ap2 = AlternativePerformances('a2',
                                      {'c1': 0.8, 'c2': 0.8, 'c3': 0.8})
        ap3 = AlternativePerformances('a3',
                                      {'c1': 0.3, 'c2': 0.3, 'c3': 0.1})
        ap4 = AlternativePerformances('a4',
                                      {'c1': 0.3, 'c2': 0.1, 'c3': 0.3})
        ap5 = AlternativePerformances('a5',
                                      {'c1': 0.1, 'c2': 0.3, 'c3': 0.3})
        ap6 = AlternativePerformances('a6',
                                      {'c1': 0.8, 'c2': 0.8, 'c3': 0.1})
        ap7 = AlternativePerformances('a7',
                                      {'c1': 0.8, 'c2': 0.1, 'c3': 0.8})
        ap8 = AlternativePerformances('a8',
                                      {'c1': 0.1, 'c2': 0.8, 'c3': 0.8})
        pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8])

        aa = model.get_assignments(pt)

        self.assertEqual(aa['a1'].category_id, "cat2")
        self.assertEqual(aa['a2'].category_id, "cat1")
        self.assertEqual(aa['a3'].category_id, "cat3")
        self.assertEqual(aa['a4'].category_id, "cat2")
        self.assertEqual(aa['a5'].category_id, "cat2")
        self.assertEqual(aa['a6'].category_id, "cat3")
        self.assertEqual(aa['a7'].category_id, "cat1")
        self.assertEqual(aa['a8'].category_id, "cat1")
def generate_random_mrsort_choquet_model(ncrit,
                                         ncat,
                                         additivity=None,
                                         seed=None,
                                         k=3,
                                         worst=None,
                                         best=None,
                                         random_direction=False):
    if seed is not None:
        random.seed(int(seed))

    c = generate_criteria(ncrit, random_direction=random_direction)

    if worst is None:
        worst = generate_worst_ap(c)
    if best is None:
        best = generate_best_ap(c)

    cv = generate_random_mobius_indices(c, additivity, None, k)
    cat = generate_categories(ncat)
    cps = generate_categories_profiles(cat)
    b = cps.get_ordered_profiles()
    bpt = generate_random_profiles(b, c, None, k, worst, best)
    lbda = round(random.uniform(0.5, 1), k)

    return MRSort(c, cv, bpt, lbda, cps)
def generate_random_mrsort_model_msjp(ncrit, ncat, seed = None, k = 3,
                                 worst = None, best = None,
                                 random_direction = False, random_directions = None):
    if seed is not None:
        random.seed(int(seed))
    
    random.seed()
    #random.seed(int(random.random()*100))

    c = generate_criteria_msjp(ncrit, random_direction = random_direction, random_directions = random_directions)
    random.seed()
    if worst is None:
        worst = generate_worst_ap(c)
    if best is None:
        best = generate_best_ap(c)

    random.seed()
    cv = generate_random_criteria_weights(c, None, k)
    random.seed()
    cat = generate_categories(ncat)
    random.seed()
    cps = generate_categories_profiles(cat)
    random.seed()
    b = cps.get_ordered_profiles()
    random.seed()
    bpt = generate_random_profiles(b, c, None, k, worst, best)
    random.seed()
    # random.seed(int(random.random()*100))
    lbda = round(random.uniform(0.5, 1), k)
    #import pdb; pdb.set_trace()

    return MRSort(c, cv, bpt, lbda, cps)
示例#12
0
def generate_random_mrsort_model_msjp(ncrit, ncat, seed = None, k = 3,
                                 worst = None, best = None,
                                 random_direction = False, random_directions = None, fixed_w1 = None, min_w = 0.05, prof_threshold = 0.05):
    if seed is not None:
        random.seed(int(seed))
    
    random.seed()
    #random.seed(int(random.random()*100))

    c = generate_criteria_msjp(ncrit, random_direction = random_direction, random_directions = random_directions)
    random.seed()
    if worst is None:
        worst = generate_worst_ap(c)
    if best is None:
        best = generate_best_ap(c)

    no_min_w =  True
    while no_min_w:
        random.seed()
        cv = generate_random_criteria_weights_msjp(c, None, k, fixed_w1)
        #import pdb; pdb.set_trace()
        if [i for i in list(cv.values()) if i.value < min_w] == []: # and [i for i in list(cv.values()) if i.value == 0] == [] :
            no_min_w = False
#    if cv["c1"].value < 0.1:
#        #print([i.value for i in list(cv.values())])
#        maxi = max([i.value for i in list(cv.values())])
#        for i in list(cv.values()):
#            if maxi == i.value:
#                i.value = round(i.value-0.05,k)
#                break
#        cv["c1"].value = round(cv["c1"].value+0.05,k)
        #print([i.value for i in list(cv.values())])
        #import pdb; pdb.set_trace()
    
        
    random.seed()
    cat = generate_categories_msjp(ncat)
    random.seed()
    cps = generate_categories_profiles(cat)
    random.seed()
    b = cps.get_ordered_profiles()
    random.seed()
    bpt = generate_random_profiles(b, c, None, k, worst, best, prof_threshold = prof_threshold)
    random.seed()
    #random.seed(int(random.random()*100))
    #lbda = round(random.uniform(0.5, 1), k)
    # to avoid a dictotorial criteria, we have:
    #import pdb; pdb.set_trace()
    lbda = round(random.uniform(max(0.5,cv.max())+0.01, 1), k)
    #import pdb; pdb.set_trace()
    #print(cv.max(),lbda)

    return MRSort(c, cv, bpt, lbda, cps)
    def get_output_model(self, criteria):
        cat_profiles = self.parse_xmcda_file("cat_profiles.xml",
                                             "categoriesProfiles",
                                             CategoriesProfiles)
        compatible_alts = self.parse_xmcda_file("compatible_alts.xml",
                                                "alternatives", Alternatives)
        crit_weights = self.parse_xmcda_file("crit_weights.xml",
                                             "criteriaValues", CriteriaValues)
        profiles_perfs = self.parse_xmcda_file("profiles_perfs.xml",
                                               "performanceTable",
                                               PerformanceTable)
        lbda = self.parse_xmcda_file_lbda("lambda.xml")

        return MRSort(criteria, crit_weights, profiles_perfs, lbda,
                      cat_profiles)
示例#14
0
def parse_input_files(indir):
    criteria = parse_xmcda_file(indir + '/criteria.xml', "criteria", Criteria)
    alternatives = parse_xmcda_file(indir + '/alternatives.xml',
                                    "alternatives", Alternatives)
    categories = parse_xmcda_file(indir + '/categories.xml', "categories",
                                  Categories)
    pt = parse_xmcda_file(indir + '/perfs_table.xml', "performanceTable",
                          PerformanceTable)
    assignments = parse_xmcda_file(indir + '/assign.xml',
                                   "alternativesAffectations",
                                   AlternativesAssignments)

    # Optional parameters
    params = parse_xmcda_file(indir + '/params.xml', "methodParameters",
                              Parameters)

    # Partial inference
    categories_profiles = parse_xmcda_file(indir + '/cat_profiles.xml',
                                           "categoriesProfiles",
                                           CategoriesProfiles)
    bpt = parse_xmcda_file(indir + '/profiles_perfs.xml', "performanceTable",
                           PerformanceTable)
    criteria_values = parse_xmcda_file(indir + '/crit_weights.xml',
                                       "criteriaValues", CriteriaValues)
    if criteria_values:
        criteria_values.normalize_sum_to_unity()

    lbda = None
    solver = DEFAULT_SOLVER
    if params is not None:
        if 'lambda' in params:
            lbda = params['lambda'].value

        if 'solver' in params:
            solver = params['solver'].value

    if categories_profiles is None:
        categories_profiles = generate_categories_profiles(categories)

    if categories and criteria and pt:
        model = MRSort(criteria, criteria_values, bpt, lbda,
                       categories_profiles)
    else:
        model = None

    return solver, model, assignments, pt
示例#15
0
def parse_input_files(indir):
    criteria = parse_xmcda_file(indir + '/criteria.xml', "criteria", Criteria)
    alternatives = parse_xmcda_file(indir + '/alternatives.xml',
                                    "alternatives", Alternatives)
    categories = parse_xmcda_file(indir + '/categories.xml', "categories",
                                  Categories)
    pt = parse_xmcda_file(indir + '/perfs_table.xml', "performanceTable",
                          PerformanceTable)
    assignments = parse_xmcda_file(indir + '/assign.xml',
                                   "alternativesAffectations",
                                   AlternativesAssignments)
    meta_params = parse_xmcda_file(indir + '/params.xml', "methodParameters",
                                   Parameters)
    categories_profiles = generate_categories_profiles(categories)

    if categories and criteria and pt:
        model = MRSort(criteria, None, None, None, categories_profiles)
    else:
        model = None

    return model, assignments, pt, meta_params
    def test002(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        a = generate_alternatives(10000)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
示例#17
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.categories_profiles.to_categories(),
                          pt_sorted, aa_learning,
                          heur_init_profiles,
                          lp_weights,
                          heur_profiles)

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)

    t_total = time.time() - t1

    aa_learning2 = compute_assignments_majority(meta.models, pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = compute_auc_majority(meta.models, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                           model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = compute_assignments_majority(meta.models, pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = compute_auc_majority(meta.models, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = compute_assignments_majority(meta.models, data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = compute_auc_majority(meta.models, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels,
                                           nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  aa_learning, aa_test, pt_learning, pt_test, *meta.models)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
示例#18
0
t1 = time.time()

if algo == 'meta_mrsort':
    heur_init_profiles = HeurMRSortInitProfiles
    lp_weights = LpMRSortWeights
    heur_profiles = MetaMRSortProfiles4
elif algo == 'meta_mrsortc':
    heur_init_profiles = HeurMRSortInitProfiles
    lp_weights = LpMRSortMobius
    heur_profiles = MetaMRSortProfilesChoquet

if algo == 'meta_mrsort' or algo == 'meta_mrsortc':
    model_type = 'mrsort'
    cat_profiles = generate_categories_profiles(data.cats)
    model = MRSort(data.c, None, None, None, cat_profiles)
    pt_sorted = SortedPerformanceTable(data.pt)

    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.categories_profiles.to_categories(), pt_sorted,
                          data.aa, heur_init_profiles, lp_weights,
                          heur_profiles)

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)
        print(ca_learning)
        if ca_learning == 1:
            break
elif algo == 'mip_mrsort':
    model_type = 'mrsort'
    cat_profiles = generate_categories_profiles(data.cats)
 def test001(self):
     model = generate_random_mrsort_model(5, 3)
     mxmcda = model.to_xmcda()
     model2 = MRSort().from_xmcda(mxmcda)
     self.assertEqual(model, model2)
xmcda_models_toshow = []
xmcda_models = []
for f in sys.argv[1:]:
    if not os.path.isfile(f):
        xmcda_models_toshow.append(f)
        continue

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()

    xmcda_models = root.findall(".//ElectreTri")

    m = MRSort().from_xmcda(xmcda_models[0])

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')

    uniquevalues = pt_learning.get_unique_values()

    bname = os.path.basename(os.path.splitext(f.name)[0])
    fweights = open('%s-w.dat' % bname, 'w+')
    fprofiles = open('%s-p.dat' % bname, 'w+')

    print("Processing %s..." % bname)

    criteria = m.criteria.keys()
    for c in criteria:
        print("{%s} " % criteria_names[c], end='', file=fprofiles)
示例#21
0
    cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
    cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.1)
    cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1)
    cv15 = CriterionValue(CriteriaSet(['c1', 'c5']), 0.1)
    cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1)
    cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1)
    cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1)
    cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1)
    cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1)
    cv45 = CriterionValue(CriteriaSet(['c4', 'c5']), -0.1)
    cvs = CriteriaValues([cv1, cv2, cv3, cv4, cv5, cv12, cv13, cv14, cv15,
                          cv23, cv24, cv25, cv34, cv35, cv45])

    lbda = 0.6

    model = MRSort(c, cvs, bpt, lbda, cps)

    print(model.lbda, model.cv)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    lp = LpMRSortMobius(model, pt, aa)
    lp.solve()

    print(model.lbda, model.cv)

    aa2 = model.get_assignments(pt)

    for a in aa:
    from pymcda.generate import generate_criteria
    from pymcda.generate import generate_categories
    from pymcda.generate import generate_categories_profiles
    from pymcda.utils import print_pt_and_assignments
    from pymcda.utils import compute_number_of_winning_coalitions
    from pymcda.pt_sorted import SortedPerformanceTable
    from pymcda.ui.graphic import display_electre_tri_models
    from pymcda.electre_tri import MRSort
    from pymcda.types import CriterionValue, CriteriaValues
    from pymcda.types import AlternativePerformances, PerformanceTable
    from pymcda.types import AlternativeAssignment, AlternativesAssignments

    # Generate a random ELECTRE TRI BM model
    random.seed(127890123456789)
    ncriteria = 5
    model = MRSort()
    model.criteria = generate_criteria(ncriteria)
    model.cv = CriteriaValues([CriterionValue('c%d' % (i + 1), 0.2)
                               for i in range(ncriteria)])
    b1 = AlternativePerformances('b1', {'c%d' % (i + 1): 0.5
                                        for i in range(ncriteria)})
    model.bpt = PerformanceTable([b1])
    cat = generate_categories(2)
    model.categories_profiles = generate_categories_profiles(cat)
    model.lbda = 0.6
    vb1 = AlternativePerformances('b1', {'c%d' % (i + 1): random.uniform(0,0.4)
                                         for i in range(ncriteria)})
    model.veto = PerformanceTable([vb1])
    model.veto_weights = model.cv.copy()
    model.veto_lbda = 0.4
示例#23
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)
    global aaa
    global allm
    global fct_ca
    global LOO

    # Separate learning data and test data
    if LOO:
        pt_learning, pt_test = data.pt.split_LOO(seed)
    else:
        pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    #import pdb; pdb.set_trace()

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)
    # if LOO:
    #     print(data.c, cvs, bpt, lbda, cat_profiles)
    #     print(model.categories_profiles.to_categories())
    #     print(model.categories)        
    #     import pdb; pdb.set_trace()

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = meta_mrsort(nmodels, model.criteria,
                       model.categories_profiles.to_categories(),
                       pt_sorted, aa_learning,
                       seed = seed * 100)
    # if LOO:
    #     print(nmodels, model.criteria,
    #                    model.categories_profiles.to_categories(),
    #                    pt_sorted, aa_learning)
        #import pdb; pdb.set_trace()
#lp_weights = lp_weights,
#heur_profiles = heur_profiles,
#lp_veto_weights = lp_veto_weights,
#heur_veto_profiles = heur_veto_profiles,

    for i in range(0, nloop):
        model, ca_learning, all_models = meta.optimize(nmeta, fct_ca)
        #import pdb; pdb.set_trace()

        if ca_learning == 1:
            break

    t_total = time.time() - t1

    aa_learning2 = model.pessimist(pt_learning)
    
    ca_learning = compute_ca(aa_learning, aa_learning2)
    ca_learning_good = compute_ca_good(aa_learning, aa_learning2)
    #import pdb; pdb.set_trace()
    auc_learning = model.auc(aa_learning, pt_learning)

    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                           model.categories)

    # Compute CA of test setting
    
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        ca_test_good = compute_ca_good(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)
        #import pdb; pdb.set_trace()

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    ca_good = compute_ca_good(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels,
                                           nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca    
    t['ca_learning_good'] = ca_learning_good
    t['ca_test_good'] = ca_test_good
    t['ca_all_good'] = ca_good
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    # import pdb; pdb.set_trace()
    aaa[seed]=dict()
    aaa[seed]['id'] = seed
    aaa[seed]['learning_asgmt_id'] = [i.id for i in aa_learning]
    aaa[seed]['learning_asgmt'] = [i.category_id for i in aa_learning]
    aaa[seed]['learning_asgmt2'] = [i.category_id for i in aa_learning2]        
    aaa[seed]['test_asgmt_id'] = [i.id for i in aa_test]
    aaa[seed]['test_asgmt'] = [i.category_id for i in aa_test]
    aaa[seed]['test_asgmt2'] = [i.category_id for i in aa_test2]
    aaa[seed]['criteria'] =  [i for i,j in model.criteria.items()]
    aaa[seed]['criteria_weights'] = [str(i.value) for i in model.cv.values()]
    aaa[seed]['profiles_values'] = [str(model.bpt['b1'].performances[i]) for i,j in model.criteria.items()]
    aaa[seed]['lambda'] = model.lbda
    #[model.bpt['b1'].performances[i] for i,j in model.criteria.items()]


    allm[seed]=dict()
    allm[seed]['id'] = seed
    current_model = 0
    allm[seed]['mresults'] = dict()
    for all_model in list(all_models)[1:]:
        current_model += 1 # skipping the 1rst model already treated
        allm[seed]['mresults'][current_model] = ["",""]
        aa_learning2_allm = all_model.model.pessimist(pt_learning)
        ca_learning_allm = compute_ca(aa_learning, aa_learning2_allm)
        ca_learning_good_allm = compute_ca_good(aa_learning, aa_learning2_allm)
        auc_learning_allm = all_model.model.auc(aa_learning, pt_learning)
        # diff_learning_allm = compute_confusion_matrix(aa_learning, aa_learning2_allm,
        #                                        all_model.model.categories)
        # Compute CA of test setting
        if len(aa_test) > 0:
            aa_test2_allm = all_model.model.pessimist(pt_test)
            ca_test_allm = compute_ca(aa_test, aa_test2_allm)
            ca_test_good_allm = compute_ca_good(aa_test, aa_test2_allm)
            auc_test_allm = all_model.model.auc(aa_test, pt_test)
            # diff_test_allm = compute_confusion_matrix(aa_test, aa_test2_allm,
            #                                    all_model.categories)
        else:
            ca_test_allm = 0
            auc_test_allm = 0
            ncat_allm = len(data.cats)
            # diff_test_allm = OrderedDict([((a, b), 0) for a in all_model.categories \
            #                                      for b in all_model.model.categories])
        # Compute CA of whole set
        aa2_allm = all_model.model.pessimist(data.pt)
        ca_allm = compute_ca(data.aa, aa2_allm)
        ca_good_allm = compute_ca_good(data.aa, aa2_allm)
        auc_allm = all_model.model.auc(data.aa, data.pt)
        #diff_all_allm = compute_confusion_matrix(data.aa, aa2_allm, all_model.model.categories) 
        allm[seed]['mresults'][current_model][0] = 'na_learning,na_test,ca_learning,ca_test,ca_all,ca_learning_good,ca_test_good,ca_all_good,auc_learning,auc_test,auc_all'
        allm[seed]['mresults'][current_model][1] =  str(len(aa_learning)) + "," + str(len(aa_test)) + "," + str(ca_learning_allm) +  "," + str(ca_test_allm) +  "," + str(ca_allm) + "," + str(ca_learning_good_allm) + "," + str(ca_test_good_allm) + "," + str(ca_good_allm) +  "," + str(auc_learning_allm) + "," + str(auc_test_allm) +  "," + str(auc_allm)
        #allm[seed]['mresults'][current_model][1] =
        #all_model.model.bpt['b1'].performances
        #all_model.model.cv.values()
        #import pdb; pdb.set_trace()

        # allm[seed][current_model]['na_learning'] = len(aa_learning)
        # allm[seed][current_model]['na_test'] = len(na_test)
        # allm[seed][current_model]['ca_learning'] = ca_learning_allm
        # allm[seed][current_model]['ca_test'] = ca_test_allm
        # allm[seed][current_model]['ca_all'] = ca_allm     
        # allm[seed][current_model]['ca_learning_good'] = ca_learning_good_allm
        # allm[seed][current_model]['ca_test_good'] = ca_test_good_allm
        # allm[seed][current_model]['ca_all_good'] = ca_good_allm
        # allm[seed][current_model]['auc_learning'] = auc_learning_allm
        # allm[seed][current_model]['auc_test'] = auc_test_allm
        # allm[seed][current_model]['auc_all'] = auc_allm


    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
xmcda_csets = root.findall(".//criteriaSets")
f.close()

## Weights
w = CriteriaValues()
for c in criteria:
    w.append(CriterionValue(c.id, 0.2))

## Profiles and categories
bp1 = AlternativePerformances('b1', {c.id: 0.5 for c in criteria})
bpt = PerformanceTable([bp1])
cat = generate_categories(2, names = ['good', 'bad'])
cps = generate_categories_profiles(cat)

## Model
model = MRSort(c, w, bpt, 0.6, cps)

fmins = []
results = []
for i, xmcda in enumerate(xmcda_csets):
    result = {}
    fmins = CriteriaSets().from_xmcda(xmcda)
    result['fmins'] = fmins
    result['vector'] = "".join(map(str, sorted([len(fmin)
                                   for fmin in sorted(fmins, key = len)])))
    print("\n%d. Fmin: %s" % (i + 1, ', '.join("%s" % f for f in fmins)))
    pt, aa = generate_binary_performance_table_and_assignments(criteria, cat,
                                                               fmins)
    aa.id = 'aa'
    a = Alternatives([Alternative(a.id) for a in aa])
示例#25
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = meta_mrsort(nmodels,
                       model.criteria,
                       model.categories_profiles.to_categories(),
                       pt_sorted,
                       aa_learning,
                       seed=seed * 100)
    #lp_weights = lp_weights,
    #heur_profiles = heur_profiles,
    #lp_veto_weights = lp_veto_weights,
    #heur_veto_profiles = heur_veto_profiles,

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)

        if ca_learning == 1:
            break

    t_total = time.time() - t1

    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                             model.categories)

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" %
                    (data.name, seed, nloop, nmodels, nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning,
                  aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
示例#26
0
文件: test_veto.py 项目: oso/pymcda
w5 = CriterionValue('c5', 0.2)
w = CriteriaValues([w1, w2, w3, w4, w5])

## Profiles and categories
b1 = AlternativePerformances('b1', {'c1': 10, 'c2': 10, 'c3': 10, 'c4': 10, 'c5': 10})
bpt = PerformanceTable([b1])

cat = generate_categories(2)
cps = generate_categories_profiles(cat)

## Veto thresholds
vb1 = AlternativePerformances('b1', {'c1': 2, 'c2': 2, 'c3': 2, 'c4': 2, 'c5': 2}, 'b1')
v = PerformanceTable([vb1])
vw = w.copy()

model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4)

# Altenatives
a1 = AlternativePerformances('a1',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4':  9, 'c5': 11})
a2 = AlternativePerformances('a2',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4': 11, 'c5':  9})
a3 = AlternativePerformances('a3',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4': 11, 'c5': 11})
a4 = AlternativePerformances('a4',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4':  9, 'c5':  9})
a5 = AlternativePerformances('a5',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4':  9, 'c5': 11})
a6 = AlternativePerformances('a6',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4': 11, 'c5':  9})
a7 = AlternativePerformances('a7',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4': 11, 'c5': 11})
a8 = AlternativePerformances('a8',   {'c1':  9, 'c2': 11, 'c3':  9, 'c4':  9, 'c5':  9})
a9 = AlternativePerformances('a9',   {'c1':  9, 'c2': 11, 'c3':  9, 'c4':  9, 'c5': 11})
a10 = AlternativePerformances('a10', {'c1':  9, 'c2': 11, 'c3':  9, 'c4': 11, 'c5':  9})
a11 = AlternativePerformances('a11', {'c1':  9, 'c2': 11, 'c3':  9, 'c4': 11, 'c5': 11})
a12 = AlternativePerformances('a12', {'c1':  9, 'c2': 11, 'c3': 11, 'c4':  9, 'c5':  9})
a13 = AlternativePerformances('a13', {'c1':  9, 'c2': 11, 'c3': 11, 'c4':  9, 'c5': 11})
示例#27
0
xmcda_models_toshow = []
xmcda_models = []
for f in sys.argv[1:]:
    if not os.path.isfile(f):
        xmcda_models_toshow.append(f)
        continue

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()

    xmcda_models = root.findall(".//ElectreTri")

    m = MRSort().from_xmcda(xmcda_models[0])

    bname = os.path.basename(os.path.splitext(f.name)[0])
    fweights = open('%s-w.dat' % bname, 'w+')
    fprofiles = open('%s-p.dat' % bname, 'w+')

    print("Processing %s..." % bname)

    ncriteria = len(m.criteria)
    for c in m.criteria.keys():
        criteria_names[c] = c
        criteria_worst[c] = 0
        criteria_best[c] = 1

    criteria_order = sorted(m.criteria.keys())
    criteria_discarded = {c: 0 for c in criteria_order}
    cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
    cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.1)
    cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1)
    cv15 = CriterionValue(CriteriaSet(['c1', 'c5']), 0.1)
    cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1)
    cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1)
    cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1)
    cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1)
    cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1)
    cv45 = CriterionValue(CriteriaSet(['c4', 'c5']), -0.1)
    cvs = CriteriaValues([cv1, cv2, cv3, cv4, cv5, cv12, cv13, cv14, cv15,
                          cv23, cv24, cv25, cv34, cv35, cv45])

    lbda = 0.6

    model = MRSort(c, cvs, bpt, lbda, cps)

    print(model.lbda, model.cv)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    model.cv = None
    model.lbda = None
    mip = MipMRSortMobius(model, pt, aa)
    mip.solve()

    print(model.lbda, model.cv)

    aa2 = model.get_assignments(pt)
示例#29
0
    cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1)
    cv15 = CriterionValue(CriteriaSet(['c1', 'c5']), 0.1)
    cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1)
    cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1)
    cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1)
    cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1)
    cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1)
    cv45 = CriterionValue(CriteriaSet(['c4', 'c5']), -0.1)
    cvs = CriteriaValues([
        cv1, cv2, cv3, cv4, cv5, cv12, cv13, cv14, cv15, cv23, cv24, cv25,
        cv34, cv35, cv45
    ])

    lbda = 0.6

    model = MRSort(c, cvs, bpt, lbda, cps)

    print(model.lbda, model.cv)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    lp = LpMRSortMobius(model, pt, aa)
    lp.solve()

    print(model.lbda, model.cv)

    aa2 = model.get_assignments(pt)

    for a in aa:
示例#30
0
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(nc, ncat,
                            seed,
                            veto_func = veto_func,
                            veto_param = veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(nc, ncat,
                            seed,
                            veto_weights = indep_veto_weights,
                            veto_func = veto_func,
                            veto_param = veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues([CriterionValue(c.id,
                                                             w[c.id])
                                              for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" % (seed, na, nc, ncat,
                    na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, a, a_gen, pt, pt_gen, aa, aa2,
                  aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(
            nc, ncat, seed, veto_func=veto_func, veto_param=veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(
            nc,
            ncat,
            seed,
            veto_weights=indep_veto_weights,
            veto_func=veto_func,
            veto_param=veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues(
            [CriterionValue(c.id, w[c.id]) for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" %
                    (seed, na, nc, ncat, na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, a,
                  a_gen, pt, pt_gen, aa, aa2, aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
示例#32
0
for f in sys.argv[1:]:
    if not os.path.isfile(f):
        xmcda_models_toshow.append(f)
        continue

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()

    xmcda_models += root.findall(".//ElectreTri")

models = []
for xmcda_model in xmcda_models:
    m = MRSort().from_xmcda(xmcda_model)

    if len(xmcda_models_toshow) > 0 and m.id not in xmcda_models_toshow:
        continue

    string = "Model '%s'" % m.id
    print(string)
    print('=' * len(string))

    print(m.bpt)
    print(m.cv)
    print("lambda: %s\n" % m.lbda)

    if m.veto is not None:
        print(m.veto)
    else:
示例#33
0
cat = generate_categories(2)
cps = generate_categories_profiles(cat)

## Veto thresholds
vb1 = AlternativePerformances('b1', {
    'c1': 2,
    'c2': 2,
    'c3': 2,
    'c4': 2,
    'c5': 2
}, 'b1')
v = PerformanceTable([vb1])
vw = w.copy()

model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4)

# Altenatives
a1 = AlternativePerformances('a1', {
    'c1': 9,
    'c2': 9,
    'c3': 9,
    'c4': 9,
    'c5': 11
})
a2 = AlternativePerformances('a2', {
    'c1': 9,
    'c2': 9,
    'c3': 9,
    'c4': 11,
    'c5': 9
示例#34
0
table_auc_test = []
cmatrix_learning = {}
cmatrix_test = {}

DATADIR = os.getenv('DATADIR', '%s/pymcda-data' % os.path.expanduser('~'))
directory='%s/test-veto2' % (DATADIR)

for f in sys.argv[1:]:
    fname = os.path.splitext(os.path.basename(f))[0]

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root,
                                                       'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root,
                                                  'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

#    # Remove alternatives that cannot be corrected with a veto rule
#    aa_learning_m2p = discard_undersorted_alternatives(m.categories,
#                                                      aa_learning,
示例#35
0
t1 = time.time()

if algo == 'meta_mrsort':
    heur_init_profiles = HeurMRSortInitProfiles
    lp_weights = LpMRSortWeights
    heur_profiles = MetaMRSortProfiles4
elif algo == 'meta_mrsortc':
    heur_init_profiles = HeurMRSortInitProfiles
    lp_weights = LpMRSortMobius
    heur_profiles = MetaMRSortProfilesChoquet

if algo == 'meta_mrsort' or algo == 'meta_mrsortc':
    model_type = 'mrsort'
    cat_profiles = generate_categories_profiles(data.cats)
    model = MRSort(data.c, None, None, None, cat_profiles)
    pt_sorted = SortedPerformanceTable(data.pt)

    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.categories_profiles.to_categories(),
                          pt_sorted, data.aa,
                          heur_init_profiles,
                          lp_weights,
                          heur_profiles)

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)
        print(ca_learning)
        if ca_learning == 1:
            break
elif algo == 'mip_mrsort':
table_auc_learning = []
table_auc_test = []
cmatrix_learning = {}
cmatrix_test = {}

nveto = 0
for f in sys.argv[1:]:
    f = os.path.abspath(f)
    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    import pdb
    pdb.set_trace()
    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root, 'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

    # Compute classification accuracy
    ca_learning = compute_ca(aa_learning, aa_learning_m2)
    ca_test = compute_ca(aa_test, aa_test_m2)

    table_ca_learning.append(ca_learning)
示例#37
0
table_ca_learning = []
table_ca_test = []
table_auc_learning = []
table_auc_test = []
cmatrix_learning = {}
cmatrix_test = {}

nveto = 0
for f in sys.argv[1:]:
    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root,
                                                       'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root,
                                                  'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

    # Compute classification accuracy
    ca_learning = compute_ca(aa_learning, aa_learning_m2)
    ca_test = compute_ca(aa_test, aa_test_m2)
示例#38
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
示例#39
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" % (seed, na, nc, ncat,
                    na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
    def test001(self):
        c = generate_criteria(5)
        w1 = CriterionValue('c1', 0.2)
        w2 = CriterionValue('c2', 0.2)
        w3 = CriterionValue('c3', 0.2)
        w4 = CriterionValue('c4', 0.2)
        w5 = CriterionValue('c5', 0.2)
        w = CriteriaValues([w1, w2, w3, w4, w5])

        b1 = AlternativePerformances('b1', {
            'c1': 10,
            'c2': 10,
            'c3': 10,
            'c4': 10,
            'c5': 10
        })
        bpt = PerformanceTable([b1])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        vb1 = AlternativePerformances('b1', {
            'c1': 2,
            'c2': 2,
            'c3': 2,
            'c4': 2,
            'c5': 2
        }, 'b1')
        v = PerformanceTable([vb1])
        vw = w.copy()

        a1 = AlternativePerformances('a1', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a2 = AlternativePerformances('a2', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a3 = AlternativePerformances('a3', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a4 = AlternativePerformances('a4', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a5 = AlternativePerformances('a5', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a6 = AlternativePerformances('a6', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a7 = AlternativePerformances('a7', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a8 = AlternativePerformances('a8', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a9 = AlternativePerformances('a9', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a10 = AlternativePerformances('a10', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a11 = AlternativePerformances('a11', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a12 = AlternativePerformances('a12', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a13 = AlternativePerformances('a13', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a14 = AlternativePerformances('a14', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a15 = AlternativePerformances('a15', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a16 = AlternativePerformances('a16', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a17 = AlternativePerformances('a17', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a18 = AlternativePerformances('a18', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a19 = AlternativePerformances('a19', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a20 = AlternativePerformances('a20', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a21 = AlternativePerformances('a21', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a22 = AlternativePerformances('a22', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a23 = AlternativePerformances('a23', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a24 = AlternativePerformances('a24', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a25 = AlternativePerformances('a25', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a26 = AlternativePerformances('a26', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a27 = AlternativePerformances('a27', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a28 = AlternativePerformances('a28', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a29 = AlternativePerformances('a29', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a30 = AlternativePerformances('a30', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a31 = AlternativePerformances('a31', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a32 = AlternativePerformances('a32', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a33 = AlternativePerformances('a33', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a34 = AlternativePerformances('a34', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a35 = AlternativePerformances('a35', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a36 = AlternativePerformances('a36', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 7
        })
        a37 = AlternativePerformances('a37', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 7
        })
        a38 = AlternativePerformances('a38', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a39 = AlternativePerformances('a39', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a40 = AlternativePerformances('a40', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 7,
            'c5': 11
        })
        a41 = AlternativePerformances('a41', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a42 = AlternativePerformances('a42', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a43 = AlternativePerformances('a43', {
            'c1': 11,
            'c2': 7,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a44 = AlternativePerformances('a44', {
            'c1': 7,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a45 = AlternativePerformances('a45', {
            'c1': 7,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        pt = PerformanceTable([
            a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
            a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28,
            a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41,
            a42, a43, a44, a45
        ])

        ap1 = AlternativeAssignment('a1', 'cat2')
        ap2 = AlternativeAssignment('a2', 'cat2')
        ap3 = AlternativeAssignment('a3', 'cat2')
        ap4 = AlternativeAssignment('a4', 'cat2')
        ap5 = AlternativeAssignment('a5', 'cat2')
        ap6 = AlternativeAssignment('a6', 'cat2')
        ap7 = AlternativeAssignment('a7', 'cat1')
        ap8 = AlternativeAssignment('a8', 'cat2')
        ap9 = AlternativeAssignment('a9', 'cat2')
        ap10 = AlternativeAssignment('a10', 'cat2')
        ap11 = AlternativeAssignment('a11', 'cat1')
        ap12 = AlternativeAssignment('a12', 'cat2')
        ap13 = AlternativeAssignment('a13', 'cat1')
        ap14 = AlternativeAssignment('a14', 'cat1')
        ap15 = AlternativeAssignment('a15', 'cat1')
        ap16 = AlternativeAssignment('a16', 'cat2')
        ap17 = AlternativeAssignment('a17', 'cat2')
        ap18 = AlternativeAssignment('a18', 'cat2')
        ap19 = AlternativeAssignment('a19', 'cat1')
        ap20 = AlternativeAssignment('a20', 'cat2')
        ap21 = AlternativeAssignment('a21', 'cat1')
        ap22 = AlternativeAssignment('a22', 'cat1')
        ap23 = AlternativeAssignment('a23', 'cat1')
        ap24 = AlternativeAssignment('a24', 'cat2')
        ap25 = AlternativeAssignment('a25', 'cat1')
        ap26 = AlternativeAssignment('a26', 'cat1')
        ap27 = AlternativeAssignment('a27', 'cat1')
        ap28 = AlternativeAssignment('a28', 'cat1')
        ap29 = AlternativeAssignment('a29', 'cat1')
        ap30 = AlternativeAssignment('a30', 'cat1')
        ap31 = AlternativeAssignment('a31', 'cat1')
        ap32 = AlternativeAssignment('a32', 'cat1')
        ap33 = AlternativeAssignment('a33', 'cat1')
        ap34 = AlternativeAssignment('a34', 'cat1')
        ap35 = AlternativeAssignment('a35', 'cat1')
        ap36 = AlternativeAssignment('a36', 'cat2')
        ap37 = AlternativeAssignment('a37', 'cat2')
        ap38 = AlternativeAssignment('a38', 'cat2')
        ap39 = AlternativeAssignment('a39', 'cat2')
        ap40 = AlternativeAssignment('a40', 'cat2')
        ap41 = AlternativeAssignment('a41', 'cat2')
        ap42 = AlternativeAssignment('a42', 'cat2')
        ap43 = AlternativeAssignment('a43', 'cat2')
        ap44 = AlternativeAssignment('a44', 'cat2')
        ap45 = AlternativeAssignment('a45', 'cat2')
        aa = AlternativesAssignments([
            ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8, ap9, ap10, ap11, ap12,
            ap13, ap14, ap15, ap16, ap17, ap18, ap19, ap20, ap21, ap22, ap23,
            ap24, ap25, ap26, ap27, ap28, ap29, ap30, ap31, ap32, ap33, ap34,
            ap35, ap36, ap37, ap38, ap39, ap40, ap41, ap42, ap43, ap44, ap45
        ])

        model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4)
        aa2 = model.pessimist(pt)
        ok = compare_assignments(aa, aa2)
        self.assertEqual(ok, 1, "One or more alternatives were wrongly "
                         "assigned")
def run_test(seed, data, pclearning):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize ELECTRE-TRI BM model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the linear program
    t1 = time.time()

    mip = mip_mrsort(model, pt_learning, aa_learning)
    obj = mip.solve()

    t_total = time.time() - t1

    # CA learning set
    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                             model.categories)
    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d" % (data.name, seed, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning,
                  aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['obj'] = obj
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
示例#42
0
    def test001(self):
        c = generate_criteria(5)
        w1 = CriterionValue('c1', 0.2)
        w2 = CriterionValue('c2', 0.2)
        w3 = CriterionValue('c3', 0.2)
        w4 = CriterionValue('c4', 0.2)
        w5 = CriterionValue('c5', 0.2)
        w = CriteriaValues([w1, w2, w3, w4, w5])

        b1 = AlternativePerformances('b1', {'c1': 10, 'c2': 10, 'c3': 10, 'c4': 10, 'c5': 10})
        bpt = PerformanceTable([b1])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        vb1 = AlternativePerformances('b1', {'c1': 2, 'c2': 2, 'c3': 2, 'c4': 2, 'c5': 2}, 'b1')
        v = PerformanceTable([vb1])
        vw = w.copy()

        a1 = AlternativePerformances('a1',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4':  9, 'c5': 11})
        a2 = AlternativePerformances('a2',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4': 11, 'c5':  9})
        a3 = AlternativePerformances('a3',   {'c1':  9, 'c2':  9, 'c3':  9, 'c4': 11, 'c5': 11})
        a4 = AlternativePerformances('a4',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4':  9, 'c5':  9})
        a5 = AlternativePerformances('a5',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4':  9, 'c5': 11})
        a6 = AlternativePerformances('a6',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4': 11, 'c5':  9})
        a7 = AlternativePerformances('a7',   {'c1':  9, 'c2':  9, 'c3': 11, 'c4': 11, 'c5': 11})
        a8 = AlternativePerformances('a8',   {'c1':  9, 'c2': 11, 'c3':  9, 'c4':  9, 'c5':  9})
        a9 = AlternativePerformances('a9',   {'c1':  9, 'c2': 11, 'c3':  9, 'c4':  9, 'c5': 11})
        a10 = AlternativePerformances('a10', {'c1':  9, 'c2': 11, 'c3':  9, 'c4': 11, 'c5':  9})
        a11 = AlternativePerformances('a11', {'c1':  9, 'c2': 11, 'c3':  9, 'c4': 11, 'c5': 11})
        a12 = AlternativePerformances('a12', {'c1':  9, 'c2': 11, 'c3': 11, 'c4':  9, 'c5':  9})
        a13 = AlternativePerformances('a13', {'c1':  9, 'c2': 11, 'c3': 11, 'c4':  9, 'c5': 11})
        a14 = AlternativePerformances('a14', {'c1':  9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  9})
        a15 = AlternativePerformances('a15', {'c1':  9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11})
        a16 = AlternativePerformances('a16', {'c1': 11, 'c2':  9, 'c3':  9, 'c4':  9, 'c5':  9})
        a17 = AlternativePerformances('a17', {'c1': 11, 'c2':  9, 'c3':  9, 'c4':  9, 'c5': 11})
        a18 = AlternativePerformances('a18', {'c1': 11, 'c2':  9, 'c3':  9, 'c4': 11, 'c5':  9})
        a19 = AlternativePerformances('a19', {'c1': 11, 'c2':  9, 'c3':  9, 'c4': 11, 'c5': 11})
        a20 = AlternativePerformances('a20', {'c1': 11, 'c2':  9, 'c3': 11, 'c4':  9, 'c5':  9})
        a21 = AlternativePerformances('a21', {'c1': 11, 'c2':  9, 'c3': 11, 'c4':  9, 'c5': 11})
        a22 = AlternativePerformances('a22', {'c1': 11, 'c2':  9, 'c3': 11, 'c4': 11, 'c5':  9})
        a23 = AlternativePerformances('a23', {'c1': 11, 'c2':  9, 'c3': 11, 'c4': 11, 'c5': 11})
        a24 = AlternativePerformances('a24', {'c1': 11, 'c2': 11, 'c3':  9, 'c4':  9, 'c5':  9})
        a25 = AlternativePerformances('a25', {'c1': 11, 'c2': 11, 'c3':  9, 'c4':  9, 'c5': 11})
        a26 = AlternativePerformances('a26', {'c1': 11, 'c2': 11, 'c3':  9, 'c4': 11, 'c5':  9})
        a27 = AlternativePerformances('a27', {'c1': 11, 'c2': 11, 'c3':  9, 'c4': 11, 'c5': 11})
        a28 = AlternativePerformances('a28', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  9, 'c5':  9})
        a29 = AlternativePerformances('a29', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  9, 'c5': 11})
        a30 = AlternativePerformances('a30', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  9})
        a31 = AlternativePerformances('a31', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  7})
        a32 = AlternativePerformances('a32', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  7, 'c5': 11})
        a33 = AlternativePerformances('a33', {'c1': 11, 'c2': 11, 'c3':  7, 'c4': 11, 'c5': 11})
        a34 = AlternativePerformances('a34', {'c1': 11, 'c2':  7, 'c3': 11, 'c4': 11, 'c5': 11})
        a35 = AlternativePerformances('a35', {'c1':  7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11})
        a36 = AlternativePerformances('a36', {'c1': 11, 'c2': 11, 'c3': 11, 'c4':  7, 'c5':  7})
        a37 = AlternativePerformances('a37', {'c1': 11, 'c2': 11, 'c3':  7, 'c4': 11, 'c5':  7})
        a38 = AlternativePerformances('a38', {'c1': 11, 'c2':  7, 'c3': 11, 'c4': 11, 'c5':  7})
        a39 = AlternativePerformances('a39', {'c1':  7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5':  7})
        a40 = AlternativePerformances('a40', {'c1': 11, 'c2': 11, 'c3':  7, 'c4':  7, 'c5': 11})
        a41 = AlternativePerformances('a41', {'c1': 11, 'c2':  7, 'c3': 11, 'c4':  7, 'c5': 11})
        a42 = AlternativePerformances('a42', {'c1':  7, 'c2': 11, 'c3': 11, 'c4':  7, 'c5': 11})
        a43 = AlternativePerformances('a43', {'c1': 11, 'c2':  7, 'c3':  7, 'c4': 11, 'c5': 11})
        a44 = AlternativePerformances('a44', {'c1':  7, 'c2': 11, 'c3':  7, 'c4': 11, 'c5': 11})
        a45 = AlternativePerformances('a45', {'c1':  7, 'c2':  7, 'c3': 11, 'c4': 11, 'c5': 11})
        pt = PerformanceTable([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
                               a12, a13, a14, a15, a16, a17, a18, a19, a20,
                               a21, a22, a23, a24, a25, a26, a27, a28, a29,
                               a30, a31, a32, a33, a34, a35, a36, a37, a38,
                               a39, a40, a41, a42, a43, a44, a45])

        ap1 = AlternativeAssignment('a1', 'cat2')
        ap2 = AlternativeAssignment('a2', 'cat2')
        ap3 = AlternativeAssignment('a3', 'cat2')
        ap4 = AlternativeAssignment('a4', 'cat2')
        ap5 = AlternativeAssignment('a5', 'cat2')
        ap6 = AlternativeAssignment('a6', 'cat2')
        ap7 = AlternativeAssignment('a7', 'cat1')
        ap8 = AlternativeAssignment('a8', 'cat2')
        ap9 = AlternativeAssignment('a9', 'cat2')
        ap10 = AlternativeAssignment('a10', 'cat2')
        ap11 = AlternativeAssignment('a11', 'cat1')
        ap12 = AlternativeAssignment('a12', 'cat2')
        ap13 = AlternativeAssignment('a13', 'cat1')
        ap14 = AlternativeAssignment('a14', 'cat1')
        ap15 = AlternativeAssignment('a15', 'cat1')
        ap16 = AlternativeAssignment('a16', 'cat2')
        ap17 = AlternativeAssignment('a17', 'cat2')
        ap18 = AlternativeAssignment('a18', 'cat2')
        ap19 = AlternativeAssignment('a19', 'cat1')
        ap20 = AlternativeAssignment('a20', 'cat2')
        ap21 = AlternativeAssignment('a21', 'cat1')
        ap22 = AlternativeAssignment('a22', 'cat1')
        ap23 = AlternativeAssignment('a23', 'cat1')
        ap24 = AlternativeAssignment('a24', 'cat2')
        ap25 = AlternativeAssignment('a25', 'cat1')
        ap26 = AlternativeAssignment('a26', 'cat1')
        ap27 = AlternativeAssignment('a27', 'cat1')
        ap28 = AlternativeAssignment('a28', 'cat1')
        ap29 = AlternativeAssignment('a29', 'cat1')
        ap30 = AlternativeAssignment('a30', 'cat1')
        ap31 = AlternativeAssignment('a31', 'cat1')
        ap32 = AlternativeAssignment('a32', 'cat1')
        ap33 = AlternativeAssignment('a33', 'cat1')
        ap34 = AlternativeAssignment('a34', 'cat1')
        ap35 = AlternativeAssignment('a35', 'cat1')
        ap36 = AlternativeAssignment('a36', 'cat2')
        ap37 = AlternativeAssignment('a37', 'cat2')
        ap38 = AlternativeAssignment('a38', 'cat2')
        ap39 = AlternativeAssignment('a39', 'cat2')
        ap40 = AlternativeAssignment('a40', 'cat2')
        ap41 = AlternativeAssignment('a41', 'cat2')
        ap42 = AlternativeAssignment('a42', 'cat2')
        ap43 = AlternativeAssignment('a43', 'cat2')
        ap44 = AlternativeAssignment('a44', 'cat2')
        ap45 = AlternativeAssignment('a45', 'cat2')
        aa = AlternativesAssignments([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8,
                                      ap9, ap10, ap11, ap12, ap13, ap14, ap15,
                                      ap16, ap17, ap18, ap19, ap20, ap21,
                                      ap22, ap23, ap24, ap25, ap26, ap27,
                                      ap28, ap29, ap30, ap31, ap32, ap33,
                                      ap34, ap35, ap36, ap37, ap38, ap39,
                                      ap40, ap41, ap42, ap43, ap44, ap45])

        model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4)
        aa2 = model.pessimist(pt)
        ok = compare_assignments(aa, aa2)
        self.assertEqual(ok, 1, "One or more alternatives were wrongly "
                         "assigned")
    ap3 = AlternativePerformances('a3', {'c1': 1, 'c2': 0, 'c3': 1, 'c4': 0})
    ap4 = AlternativePerformances('a4', {'c1': 1, 'c2': 0, 'c3': 0, 'c4': 1})
    ap5 = AlternativePerformances('a5', {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0})
    ap6 = AlternativePerformances('a6', {'c1': 0, 'c2': 1, 'c3': 0, 'c4': 1})
    pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6])

    aa1 = AlternativeAssignment('a1', 'cat1')
    aa2 = AlternativeAssignment('a2', 'cat1')
    aa3 = AlternativeAssignment('a3', 'cat2')
    aa4 = AlternativeAssignment('a4', 'cat2')
    aa5 = AlternativeAssignment('a5', 'cat2')
    aa6 = AlternativeAssignment('a6', 'cat2')
    aa = AlternativesAssignments([aa1, aa2, aa3, aa4, aa5, aa6])
    print_pt_and_assignments(aa.keys(), c.keys(), [aa], pt)

    model = MRSort(c, None, None, None, cps)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)

    # Run the MIP
    mip = MipCMRSort(model, pt, aa)
    mip.solve()

    # Display learned model parameters
    print('Learned model')
    print('=============')
    model.bpt.display()
    model.cv.display()
    print("lambda: %.7s" % model.lbda)
示例#44
0
def run_test(seed, data, pclearning):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize ELECTRE-TRI BM model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the linear program
    t1 = time.time()

    mip = mip_mrsort(model, pt_learning, aa_learning)
    obj = mip.solve()

    t_total = time.time() - t1

    # CA learning set
    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)
    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d" % (data.name, seed, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['obj'] = obj
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
try:
    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
except:
    pt_learning = None

try:
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')
except:
    pt_test = None

aa_learning_m1, aa_learning_m2 = None, None
aa_test_m1, aa_test_m2 = None, None

if root.find("ElectreTri[@id='initial']") is not None:
    m1 = MRSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.pessimist(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.pessimist(pt_test)
elif root.find("AVFSort[@id='initial']") is not None:
    m1 = AVFSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.get_assignments(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.get_assignments(pt_test)
else:
    if root.find("alternativesAffectations[@id='learning_set']") is not None:
        aa_learning_m1 = AlternativesAssignments().from_xmcda(root,
                                                              'learning_set')