def test002(self): c = generate_criteria(3) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', {'c1': 0.75, 'c2': 0.75, 'c3': 0.75}) bp2 = AlternativePerformances('b2', {'c1': 0.25, 'c2': 0.25, 'c3': 0.25}) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3) cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13]) lbda = 0.6 model = MRSort(c, cvs, bpt, lbda, cps) a = generate_alternatives(10000) pt = generate_random_performance_table(a, model.criteria) aa = model.get_assignments(pt) model2 = MRSort(c, None, bpt, None, cps) lp = LpMRSortMobius(model2, pt, aa) obj = lp.solve() aa2 = model2.get_assignments(pt) self.assertEqual(obj, 0) self.assertEqual(aa, aa2)
def test001(self): random.seed(1) c = generate_criteria(4) cv1 = CriterionValue('c1', 0.25) cv2 = CriterionValue('c2', 0.25) cv3 = CriterionValue('c3', 0.25) cv4 = CriterionValue('c4', 0.25) cv = CriteriaValues([cv1, cv2, cv3, cv4]) cat = generate_categories(2) cps = generate_categories_profiles(cat) bp = AlternativePerformances('b1', {'c1': 0.5, 'c2': 0.5, 'c3': 0.5, 'c4': 0.5}) bpt = PerformanceTable([bp]) lbda = 0.5 etri = MRSort(c, cv, bpt, 0.5, cps) a = generate_alternatives(1000) pt = generate_random_performance_table(a, c) aas = etri.pessimist(pt) for aa in aas: w = 0 perfs = pt[aa.id].performances for c, val in perfs.items(): if val >= bp.performances[c]: w += cv[c].value if aa.category_id == 'cat2': self.assertLess(w, lbda) else: self.assertGreaterEqual(w, lbda)
def init_one_meta(self, seed): cps = generate_categories_profiles(self.categories) if not self.duplication and self.renewal_method > 0: for crit in self.criteria.values(): if int(crit.id[1:]) <= self.nb_unk_criteria: tmp = random.random() if tmp < 0.5: crit.direction = -1 else: crit.direction = 1 #print(tmp) #print(self.unk_pref_dir_criteria,int(crit.id[1:]),self.criteria) #import pdb; pdb.set_trace() #print(self.criteria,self.renewal_method) #import pdb; pdb.set_trace() model = MRSort(copy.deepcopy(self.criteria), None, None, None, cps) model.id = 'model_%d' % seed meta = MetaMRSortCV4MSJP(model, self.pt_sorted, self.aa_ori, self.lp_weights, self.heur_profiles, self.lp_veto_weights, self.heur_veto_profiles, gamma=self.gamma, renewal_method=self.renewal_method, pretreatment_crit=self.pretreatment_crit, duplication=self.duplication, nb_unk_criteria=self.nb_unk_criteria) random.seed(seed) meta.random_state = random.getstate() meta.auc = meta.model.auc(self.aa_ori, self.pt_sorted.pt) return meta
def sort_models(self, fct_ca=0, heuristic=False): cps = generate_categories_profiles(self.categories) # metas_sorted = sorted(self.metas, key = lambda (k): k.ca, # reverse = True) if not heuristic: if fct_ca == 1: metas_sorted = sorted(self.metas, key=lambda k: k.ca_good, reverse=True) elif fct_ca == 2: metas_sorted = sorted(self.metas, key=lambda k: k.ca_good + k.ca, reverse=True) elif fct_ca == 3: metas_sorted = sorted(self.metas, key=lambda k: 1000 * k.ca_good + k.ca, reverse=True) else: metas_sorted = sorted(self.metas, key=lambda k: k.ca, reverse=True) else: for m in self.metas: #print(self.pt_sorted) modelh = MRSort(copy.deepcopy(m.model.criteria), copy.deepcopy(m.model.cv), copy.deepcopy(m.model.bpt), copy.deepcopy(m.model.lbda), cps) #self.clone_model(self.modelh,self.model) #import pdb; pdb.set_trace() wtotal = 1 - modelh.cv["c1"].value modelh.cv["c1"].value = 0 #print(wtotal, modelh.cv) # if wtotal != 0: # for el in modelh.cv: # el.value /= wtotal modelh.bpt['b1'].performances["c1"] = 0 # print(modelh.criteria,self.pt_sorted.pt) aa_learned = modelh.get_assignments(self.pt_sorted.pt) cah = 0 for alt in self.aa_ori: #print(alt) #import pdb; pdb.set_trace() if alt.category_id == aa_learned(alt.id): cah += 1 m.cah = cah / m.meta.na #if m.num == 0: # print(wtotal, modelh.cv,modelh.lbda) #import pdb; pdb.set_trace() metas_sorted = sorted(self.metas, key=lambda k: ((k.ca) - k.cah), reverse=True) return metas_sorted
def init_one_meta(self, seed): cps = generate_categories_profiles(self.categories) model = self.model.copy() meta = MetaMRSortCV3(model, self.pt_sorted, self.aa_ori, self.heur_init_profiles, self.lp_veto_weights, self.heur_veto_profiles) random.seed(seed) meta.random_state = random.getstate() meta.auc = meta.model.auc(self.aa_ori, self.pt_sorted.pt) return meta
def init_one_meta(self, seed): cps = generate_categories_profiles(self.categories) model = MRSort(self.criteria, None, None, None, cps) meta = MetaMRSort3(model, self.pt_sorted, self.aa_ori, self.heur_init_profiles, self.lp_weights, self.heur_profiles) random.seed(seed) meta.random_state = random.getstate() meta.auc = meta.model.auc(self.aa_ori, self.pt_sorted.pt) return meta
def init_one_meta(self, seed): cps = generate_categories_profiles(self.categories) model = MRSort(self.criteria, None, None, None, cps) model.id = 'model_%d' % seed meta = MetaMRSortCV4(model, self.pt_sorted, self.aa_ori, self.lp_weights, self.heur_profiles, self.lp_veto_weights, self.heur_veto_profiles) random.seed(seed) meta.random_state = random.getstate() meta.auc = meta.model.auc(self.aa_ori, self.pt_sorted.pt) return meta
def test002(self): random.seed(2) c = generate_criteria(4) cv1 = CriterionValue('c1', 0.25) cv2 = CriterionValue('c2', 0.25) cv3 = CriterionValue('c3', 0.25) cv4 = CriterionValue('c4', 0.25) cv = CriteriaValues([cv1, cv2, cv3, cv4]) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', { 'c1': 0.75, 'c2': 0.75, 'c3': 0.75, 'c4': 0.75 }) bp2 = AlternativePerformances('b2', { 'c1': 0.25, 'c2': 0.25, 'c3': 0.25, 'c4': 0.25 }) bpt = PerformanceTable([bp1, bp2]) lbda = 0.5 etri = MRSort(c, cv, bpt, 0.5, cps) a = generate_alternatives(1000) pt = generate_random_performance_table(a, c) aas = etri.pessimist(pt) for aa in aas: w1 = w2 = 0 perfs = pt[aa.id].performances for c, val in perfs.items(): if val >= bp1.performances[c]: w1 += cv[c].value if val >= bp2.performances[c]: w2 += cv[c].value if aa.category_id == 'cat3': self.assertLess(w1, lbda) self.assertLess(w2, lbda) elif aa.category_id == 'cat2': self.assertLess(w1, lbda) self.assertGreaterEqual(w2, lbda) else: self.assertGreaterEqual(w1, lbda) self.assertGreaterEqual(w2, lbda)
def test001(self): c = generate_criteria(3) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', { 'c1': 0.75, 'c2': 0.75, 'c3': 0.75 }) bp2 = AlternativePerformances('b2', { 'c1': 0.25, 'c2': 0.25, 'c3': 0.25 }) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3) cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13]) lbda = 0.6 model = MRSort(c, cvs, bpt, lbda, cps) ap1 = AlternativePerformances('a1', {'c1': 0.3, 'c2': 0.3, 'c3': 0.3}) ap2 = AlternativePerformances('a2', {'c1': 0.8, 'c2': 0.8, 'c3': 0.8}) ap3 = AlternativePerformances('a3', {'c1': 0.3, 'c2': 0.3, 'c3': 0.1}) ap4 = AlternativePerformances('a4', {'c1': 0.3, 'c2': 0.1, 'c3': 0.3}) ap5 = AlternativePerformances('a5', {'c1': 0.1, 'c2': 0.3, 'c3': 0.3}) ap6 = AlternativePerformances('a6', {'c1': 0.8, 'c2': 0.8, 'c3': 0.1}) ap7 = AlternativePerformances('a7', {'c1': 0.8, 'c2': 0.1, 'c3': 0.8}) ap8 = AlternativePerformances('a8', {'c1': 0.1, 'c2': 0.8, 'c3': 0.8}) pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8]) aa = model.get_assignments(pt) self.assertEqual(aa['a1'].category_id, "cat2") self.assertEqual(aa['a2'].category_id, "cat1") self.assertEqual(aa['a3'].category_id, "cat3") self.assertEqual(aa['a4'].category_id, "cat2") self.assertEqual(aa['a5'].category_id, "cat2") self.assertEqual(aa['a6'].category_id, "cat3") self.assertEqual(aa['a7'].category_id, "cat1") self.assertEqual(aa['a8'].category_id, "cat1")
def test001(self): c = generate_criteria(3) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', {'c1': 0.75, 'c2': 0.75, 'c3': 0.75}) bp2 = AlternativePerformances('b2', {'c1': 0.25, 'c2': 0.25, 'c3': 0.25}) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3) cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13]) lbda = 0.6 model = MRSort(c, cvs, bpt, lbda, cps) ap1 = AlternativePerformances('a1', {'c1': 0.3, 'c2': 0.3, 'c3': 0.3}) ap2 = AlternativePerformances('a2', {'c1': 0.8, 'c2': 0.8, 'c3': 0.8}) ap3 = AlternativePerformances('a3', {'c1': 0.3, 'c2': 0.3, 'c3': 0.1}) ap4 = AlternativePerformances('a4', {'c1': 0.3, 'c2': 0.1, 'c3': 0.3}) ap5 = AlternativePerformances('a5', {'c1': 0.1, 'c2': 0.3, 'c3': 0.3}) ap6 = AlternativePerformances('a6', {'c1': 0.8, 'c2': 0.8, 'c3': 0.1}) ap7 = AlternativePerformances('a7', {'c1': 0.8, 'c2': 0.1, 'c3': 0.8}) ap8 = AlternativePerformances('a8', {'c1': 0.1, 'c2': 0.8, 'c3': 0.8}) pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8]) aa = model.get_assignments(pt) self.assertEqual(aa['a1'].category_id, "cat2") self.assertEqual(aa['a2'].category_id, "cat1") self.assertEqual(aa['a3'].category_id, "cat3") self.assertEqual(aa['a4'].category_id, "cat2") self.assertEqual(aa['a5'].category_id, "cat2") self.assertEqual(aa['a6'].category_id, "cat3") self.assertEqual(aa['a7'].category_id, "cat1") self.assertEqual(aa['a8'].category_id, "cat1")
def parse_input_files(indir): criteria = parse_xmcda_file(indir + '/criteria.xml', "criteria", Criteria) alternatives = parse_xmcda_file(indir + '/alternatives.xml', "alternatives", Alternatives) categories = parse_xmcda_file(indir + '/categories.xml', "categories", Categories) pt = parse_xmcda_file(indir + '/perfs_table.xml', "performanceTable", PerformanceTable) assignments = parse_xmcda_file(indir + '/assign.xml', "alternativesAffectations", AlternativesAssignments) # Optional parameters params = parse_xmcda_file(indir + '/params.xml', "methodParameters", Parameters) # Partial inference categories_profiles = parse_xmcda_file(indir + '/cat_profiles.xml', "categoriesProfiles", CategoriesProfiles) bpt = parse_xmcda_file(indir + '/profiles_perfs.xml', "performanceTable", PerformanceTable) criteria_values = parse_xmcda_file(indir + '/crit_weights.xml', "criteriaValues", CriteriaValues) if criteria_values: criteria_values.normalize_sum_to_unity() lbda = None solver = DEFAULT_SOLVER if params is not None: if 'lambda' in params: lbda = params['lambda'].value if 'solver' in params: solver = params['solver'].value if categories_profiles is None: categories_profiles = generate_categories_profiles(categories) if categories and criteria and pt: model = MRSort(criteria, criteria_values, bpt, lbda, categories_profiles) else: model = None return solver, model, assignments, pt
def test002(self): c = generate_criteria(3) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', { 'c1': 0.75, 'c2': 0.75, 'c3': 0.75 }) bp2 = AlternativePerformances('b2', { 'c1': 0.25, 'c2': 0.25, 'c3': 0.25 }) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3) cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13]) lbda = 0.6 model = MRSort(c, cvs, bpt, lbda, cps) a = generate_alternatives(10000) pt = generate_random_performance_table(a, model.criteria) aa = model.get_assignments(pt) model2 = MRSort(c, None, bpt, None, cps) lp = LpMRSortMobius(model2, pt, aa) obj = lp.solve() aa2 = model2.get_assignments(pt) self.assertEqual(obj, 0) self.assertEqual(aa, aa2)
def parse_input_files(indir): criteria = parse_xmcda_file(indir + '/criteria.xml', "criteria", Criteria) alternatives = parse_xmcda_file(indir + '/alternatives.xml', "alternatives", Alternatives) categories = parse_xmcda_file(indir + '/categories.xml', "categories", Categories) pt = parse_xmcda_file(indir + '/perfs_table.xml', "performanceTable", PerformanceTable) assignments = parse_xmcda_file(indir + '/assign.xml', "alternativesAffectations", AlternativesAssignments) meta_params = parse_xmcda_file(indir + '/params.xml', "methodParameters", Parameters) categories_profiles = generate_categories_profiles(categories) if categories and criteria and pt: model = MRSort(criteria, None, None, None, categories_profiles) else: model = None return model, assignments, pt, meta_params
def test001(self): c = generate_criteria(5) w1 = CriterionValue('c1', 0.2) w2 = CriterionValue('c2', 0.2) w3 = CriterionValue('c3', 0.2) w4 = CriterionValue('c4', 0.2) w5 = CriterionValue('c5', 0.2) w = CriteriaValues([w1, w2, w3, w4, w5]) b1 = AlternativePerformances('b1', {'c1': 10, 'c2': 10, 'c3': 10, 'c4': 10, 'c5': 10}) bpt = PerformanceTable([b1]) cat = generate_categories(2) cps = generate_categories_profiles(cat) vb1 = AlternativePerformances('b1', {'c1': 2, 'c2': 2, 'c3': 2, 'c4': 2, 'c5': 2}, 'b1') v = PerformanceTable([vb1]) vw = w.copy() a1 = AlternativePerformances('a1', {'c1': 9, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11}) a2 = AlternativePerformances('a2', {'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9}) a3 = AlternativePerformances('a3', {'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11}) a4 = AlternativePerformances('a4', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9}) a5 = AlternativePerformances('a5', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11}) a6 = AlternativePerformances('a6', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9}) a7 = AlternativePerformances('a7', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11}) a8 = AlternativePerformances('a8', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9}) a9 = AlternativePerformances('a9', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11}) a10 = AlternativePerformances('a10', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9}) a11 = AlternativePerformances('a11', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11}) a12 = AlternativePerformances('a12', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9}) a13 = AlternativePerformances('a13', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11}) a14 = AlternativePerformances('a14', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9}) a15 = AlternativePerformances('a15', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11}) a16 = AlternativePerformances('a16', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 9}) a17 = AlternativePerformances('a17', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11}) a18 = AlternativePerformances('a18', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9}) a19 = AlternativePerformances('a19', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11}) a20 = AlternativePerformances('a20', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9}) a21 = AlternativePerformances('a21', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11}) a22 = AlternativePerformances('a22', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9}) a23 = AlternativePerformances('a23', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11}) a24 = AlternativePerformances('a24', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9}) a25 = AlternativePerformances('a25', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11}) a26 = AlternativePerformances('a26', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9}) a27 = AlternativePerformances('a27', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11}) a28 = AlternativePerformances('a28', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9}) a29 = AlternativePerformances('a29', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11}) a30 = AlternativePerformances('a30', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9}) a31 = AlternativePerformances('a31', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7}) a32 = AlternativePerformances('a32', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11}) a33 = AlternativePerformances('a33', {'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11}) a34 = AlternativePerformances('a34', {'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11}) a35 = AlternativePerformances('a35', {'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11}) a36 = AlternativePerformances('a36', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 7}) a37 = AlternativePerformances('a37', {'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 7}) a38 = AlternativePerformances('a38', {'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 7}) a39 = AlternativePerformances('a39', {'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7}) a40 = AlternativePerformances('a40', {'c1': 11, 'c2': 11, 'c3': 7, 'c4': 7, 'c5': 11}) a41 = AlternativePerformances('a41', {'c1': 11, 'c2': 7, 'c3': 11, 'c4': 7, 'c5': 11}) a42 = AlternativePerformances('a42', {'c1': 7, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11}) a43 = AlternativePerformances('a43', {'c1': 11, 'c2': 7, 'c3': 7, 'c4': 11, 'c5': 11}) a44 = AlternativePerformances('a44', {'c1': 7, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11}) a45 = AlternativePerformances('a45', {'c1': 7, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11}) pt = PerformanceTable([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45]) ap1 = AlternativeAssignment('a1', 'cat2') ap2 = AlternativeAssignment('a2', 'cat2') ap3 = AlternativeAssignment('a3', 'cat2') ap4 = AlternativeAssignment('a4', 'cat2') ap5 = AlternativeAssignment('a5', 'cat2') ap6 = AlternativeAssignment('a6', 'cat2') ap7 = AlternativeAssignment('a7', 'cat1') ap8 = AlternativeAssignment('a8', 'cat2') ap9 = AlternativeAssignment('a9', 'cat2') ap10 = AlternativeAssignment('a10', 'cat2') ap11 = AlternativeAssignment('a11', 'cat1') ap12 = AlternativeAssignment('a12', 'cat2') ap13 = AlternativeAssignment('a13', 'cat1') ap14 = AlternativeAssignment('a14', 'cat1') ap15 = AlternativeAssignment('a15', 'cat1') ap16 = AlternativeAssignment('a16', 'cat2') ap17 = AlternativeAssignment('a17', 'cat2') ap18 = AlternativeAssignment('a18', 'cat2') ap19 = AlternativeAssignment('a19', 'cat1') ap20 = AlternativeAssignment('a20', 'cat2') ap21 = AlternativeAssignment('a21', 'cat1') ap22 = AlternativeAssignment('a22', 'cat1') ap23 = AlternativeAssignment('a23', 'cat1') ap24 = AlternativeAssignment('a24', 'cat2') ap25 = AlternativeAssignment('a25', 'cat1') ap26 = AlternativeAssignment('a26', 'cat1') ap27 = AlternativeAssignment('a27', 'cat1') ap28 = AlternativeAssignment('a28', 'cat1') ap29 = AlternativeAssignment('a29', 'cat1') ap30 = AlternativeAssignment('a30', 'cat1') ap31 = AlternativeAssignment('a31', 'cat1') ap32 = AlternativeAssignment('a32', 'cat1') ap33 = AlternativeAssignment('a33', 'cat1') ap34 = AlternativeAssignment('a34', 'cat1') ap35 = AlternativeAssignment('a35', 'cat1') ap36 = AlternativeAssignment('a36', 'cat2') ap37 = AlternativeAssignment('a37', 'cat2') ap38 = AlternativeAssignment('a38', 'cat2') ap39 = AlternativeAssignment('a39', 'cat2') ap40 = AlternativeAssignment('a40', 'cat2') ap41 = AlternativeAssignment('a41', 'cat2') ap42 = AlternativeAssignment('a42', 'cat2') ap43 = AlternativeAssignment('a43', 'cat2') ap44 = AlternativeAssignment('a44', 'cat2') ap45 = AlternativeAssignment('a45', 'cat2') aa = AlternativesAssignments([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8, ap9, ap10, ap11, ap12, ap13, ap14, ap15, ap16, ap17, ap18, ap19, ap20, ap21, ap22, ap23, ap24, ap25, ap26, ap27, ap28, ap29, ap30, ap31, ap32, ap33, ap34, ap35, ap36, ap37, ap38, ap39, ap40, ap41, ap42, ap43, ap44, ap45]) model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4) aa2 = model.pessimist(pt) ok = compare_assignments(aa, aa2) self.assertEqual(ok, 1, "One or more alternatives were wrongly " "assigned")
def test001(self): c = generate_criteria(5) w1 = CriterionValue('c1', 0.2) w2 = CriterionValue('c2', 0.2) w3 = CriterionValue('c3', 0.2) w4 = CriterionValue('c4', 0.2) w5 = CriterionValue('c5', 0.2) w = CriteriaValues([w1, w2, w3, w4, w5]) b1 = AlternativePerformances('b1', { 'c1': 10, 'c2': 10, 'c3': 10, 'c4': 10, 'c5': 10 }) bpt = PerformanceTable([b1]) cat = generate_categories(2) cps = generate_categories_profiles(cat) vb1 = AlternativePerformances('b1', { 'c1': 2, 'c2': 2, 'c3': 2, 'c4': 2, 'c5': 2 }, 'b1') v = PerformanceTable([vb1]) vw = w.copy() a1 = AlternativePerformances('a1', { 'c1': 9, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11 }) a2 = AlternativePerformances('a2', { 'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9 }) a3 = AlternativePerformances('a3', { 'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11 }) a4 = AlternativePerformances('a4', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9 }) a5 = AlternativePerformances('a5', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11 }) a6 = AlternativePerformances('a6', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9 }) a7 = AlternativePerformances('a7', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11 }) a8 = AlternativePerformances('a8', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9 }) a9 = AlternativePerformances('a9', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11 }) a10 = AlternativePerformances('a10', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9 }) a11 = AlternativePerformances('a11', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11 }) a12 = AlternativePerformances('a12', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9 }) a13 = AlternativePerformances('a13', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11 }) a14 = AlternativePerformances('a14', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9 }) a15 = AlternativePerformances('a15', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11 }) a16 = AlternativePerformances('a16', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 9 }) a17 = AlternativePerformances('a17', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11 }) a18 = AlternativePerformances('a18', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9 }) a19 = AlternativePerformances('a19', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11 }) a20 = AlternativePerformances('a20', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9 }) a21 = AlternativePerformances('a21', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11 }) a22 = AlternativePerformances('a22', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9 }) a23 = AlternativePerformances('a23', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11 }) a24 = AlternativePerformances('a24', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9 }) a25 = AlternativePerformances('a25', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11 }) a26 = AlternativePerformances('a26', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9 }) a27 = AlternativePerformances('a27', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11 }) a28 = AlternativePerformances('a28', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9 }) a29 = AlternativePerformances('a29', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11 }) a30 = AlternativePerformances('a30', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9 }) a31 = AlternativePerformances('a31', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7 }) a32 = AlternativePerformances('a32', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11 }) a33 = AlternativePerformances('a33', { 'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11 }) a34 = AlternativePerformances('a34', { 'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11 }) a35 = AlternativePerformances('a35', { 'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11 }) a36 = AlternativePerformances('a36', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 7 }) a37 = AlternativePerformances('a37', { 'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 7 }) a38 = AlternativePerformances('a38', { 'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 7 }) a39 = AlternativePerformances('a39', { 'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7 }) a40 = AlternativePerformances('a40', { 'c1': 11, 'c2': 11, 'c3': 7, 'c4': 7, 'c5': 11 }) a41 = AlternativePerformances('a41', { 'c1': 11, 'c2': 7, 'c3': 11, 'c4': 7, 'c5': 11 }) a42 = AlternativePerformances('a42', { 'c1': 7, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11 }) a43 = AlternativePerformances('a43', { 'c1': 11, 'c2': 7, 'c3': 7, 'c4': 11, 'c5': 11 }) a44 = AlternativePerformances('a44', { 'c1': 7, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11 }) a45 = AlternativePerformances('a45', { 'c1': 7, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11 }) pt = PerformanceTable([ a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45 ]) ap1 = AlternativeAssignment('a1', 'cat2') ap2 = AlternativeAssignment('a2', 'cat2') ap3 = AlternativeAssignment('a3', 'cat2') ap4 = AlternativeAssignment('a4', 'cat2') ap5 = AlternativeAssignment('a5', 'cat2') ap6 = AlternativeAssignment('a6', 'cat2') ap7 = AlternativeAssignment('a7', 'cat1') ap8 = AlternativeAssignment('a8', 'cat2') ap9 = AlternativeAssignment('a9', 'cat2') ap10 = AlternativeAssignment('a10', 'cat2') ap11 = AlternativeAssignment('a11', 'cat1') ap12 = AlternativeAssignment('a12', 'cat2') ap13 = AlternativeAssignment('a13', 'cat1') ap14 = AlternativeAssignment('a14', 'cat1') ap15 = AlternativeAssignment('a15', 'cat1') ap16 = AlternativeAssignment('a16', 'cat2') ap17 = AlternativeAssignment('a17', 'cat2') ap18 = AlternativeAssignment('a18', 'cat2') ap19 = AlternativeAssignment('a19', 'cat1') ap20 = AlternativeAssignment('a20', 'cat2') ap21 = AlternativeAssignment('a21', 'cat1') ap22 = AlternativeAssignment('a22', 'cat1') ap23 = AlternativeAssignment('a23', 'cat1') ap24 = AlternativeAssignment('a24', 'cat2') ap25 = AlternativeAssignment('a25', 'cat1') ap26 = AlternativeAssignment('a26', 'cat1') ap27 = AlternativeAssignment('a27', 'cat1') ap28 = AlternativeAssignment('a28', 'cat1') ap29 = AlternativeAssignment('a29', 'cat1') ap30 = AlternativeAssignment('a30', 'cat1') ap31 = AlternativeAssignment('a31', 'cat1') ap32 = AlternativeAssignment('a32', 'cat1') ap33 = AlternativeAssignment('a33', 'cat1') ap34 = AlternativeAssignment('a34', 'cat1') ap35 = AlternativeAssignment('a35', 'cat1') ap36 = AlternativeAssignment('a36', 'cat2') ap37 = AlternativeAssignment('a37', 'cat2') ap38 = AlternativeAssignment('a38', 'cat2') ap39 = AlternativeAssignment('a39', 'cat2') ap40 = AlternativeAssignment('a40', 'cat2') ap41 = AlternativeAssignment('a41', 'cat2') ap42 = AlternativeAssignment('a42', 'cat2') ap43 = AlternativeAssignment('a43', 'cat2') ap44 = AlternativeAssignment('a44', 'cat2') ap45 = AlternativeAssignment('a45', 'cat2') aa = AlternativesAssignments([ ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8, ap9, ap10, ap11, ap12, ap13, ap14, ap15, ap16, ap17, ap18, ap19, ap20, ap21, ap22, ap23, ap24, ap25, ap26, ap27, ap28, ap29, ap30, ap31, ap32, ap33, ap34, ap35, ap36, ap37, ap38, ap39, ap40, ap41, ap42, ap43, ap44, ap45 ]) model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4) aa2 = model.pessimist(pt) ok = compare_assignments(aa, aa2) self.assertEqual(ok, 1, "One or more alternatives were wrongly " "assigned")
best = data.pt.get_best(data.c) t1 = time.time() if algo == 'meta_mrsort': heur_init_profiles = HeurMRSortInitProfiles lp_weights = LpMRSortWeights heur_profiles = MetaMRSortProfiles4 elif algo == 'meta_mrsortc': heur_init_profiles = HeurMRSortInitProfiles lp_weights = LpMRSortMobius heur_profiles = MetaMRSortProfilesChoquet if algo == 'meta_mrsort' or algo == 'meta_mrsortc': model_type = 'mrsort' cat_profiles = generate_categories_profiles(data.cats) model = MRSort(data.c, None, None, None, cat_profiles) pt_sorted = SortedPerformanceTable(data.pt) meta = MetaMRSortPop3(nmodels, model.criteria, model.categories_profiles.to_categories(), pt_sorted, data.aa, heur_init_profiles, lp_weights, heur_profiles) for i in range(0, nloop): model, ca_learning = meta.optimize(nmeta) print(ca_learning) if ca_learning == 1: break
if __name__ == "__main__": import random from pymcda.generate import generate_criteria from pymcda.generate import generate_categories from pymcda.generate import generate_categories_profiles from pymcda.generate import generate_alternatives from pymcda.generate import generate_random_performance_table from pymcda.types import AlternativePerformances, PerformanceTable from pymcda.types import CriterionValue, CriteriaValues, CriteriaSet from pymcda.electre_tri import MRSort random.seed(0) c = generate_criteria(5) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', {'c1': 0.75, 'c2': 0.75, 'c3': 0.75, 'c4': 0.75, 'c5': 0.75}) bp2 = AlternativePerformances('b2', {'c1': 0.25, 'c2': 0.25, 'c3': 0.25, 'c4': 0.25, 'c5': 0.25}) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv4 = CriterionValue('c4', 0.2) cv5 = CriterionValue('c5', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.1) cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1)
best = data.pt.get_best(data.c) t1 = time.time() if algo == 'meta_mrsort': heur_init_profiles = HeurMRSortInitProfiles lp_weights = LpMRSortWeights heur_profiles = MetaMRSortProfiles4 elif algo == 'meta_mrsortc': heur_init_profiles = HeurMRSortInitProfiles lp_weights = LpMRSortMobius heur_profiles = MetaMRSortProfilesChoquet if algo == 'meta_mrsort' or algo == 'meta_mrsortc': model_type = 'mrsort' cat_profiles = generate_categories_profiles(data.cats) model = MRSort(data.c, None, None, None, cat_profiles) pt_sorted = SortedPerformanceTable(data.pt) meta = MetaMRSortPop3(nmodels, model.criteria, model.categories_profiles.to_categories(), pt_sorted, data.aa, heur_init_profiles, lp_weights, heur_profiles) for i in range(0, nloop): model, ca_learning = meta.optimize(nmeta) print(ca_learning) if ca_learning == 1: break elif algo == 'mip_mrsort': model_type = 'mrsort'
def run_test(seed, data, pclearning, nloop, nmodels, nmeta): random.seed(seed) # Separate learning data and test data pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning]) aa_learning = data.aa.get_subset(pt_learning.keys()) aa_test = data.aa.get_subset(pt_test.keys()) # Initialize a random model cat_profiles = generate_categories_profiles(data.cats) worst = data.pt.get_worst(data.c) best = data.pt.get_best(data.c) b = generate_alternatives(len(data.cats) - 1, 'b') bpt = None cvs = None lbda = None model = MRSort(data.c, cvs, bpt, lbda, cat_profiles) # Run the metaheuristic t1 = time.time() pt_sorted = SortedPerformanceTable(pt_learning) # Algorithm meta = MetaMRSortPop3(nmodels, model.criteria, model.categories_profiles.to_categories(), pt_sorted, aa_learning, heur_init_profiles, lp_weights, heur_profiles) for i in range(0, nloop): model, ca_learning = meta.optimize(nmeta) t_total = time.time() - t1 aa_learning2 = compute_assignments_majority(meta.models, pt_learning) ca_learning = compute_ca(aa_learning, aa_learning2) auc_learning = compute_auc_majority(meta.models, pt_learning) diff_learning = compute_confusion_matrix(aa_learning, aa_learning2, model.categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2 = compute_assignments_majority(meta.models, pt_test) ca_test = compute_ca(aa_test, aa_test2) auc_test = compute_auc_majority(meta.models, pt_test) diff_test = compute_confusion_matrix(aa_test, aa_test2, model.categories) else: ca_test = 0 auc_test = 0 ncat = len(data.cats) diff_test = OrderedDict([((a, b), 0) for a in model.categories \ for b in model.categories]) # Compute CA of whole set aa2 = compute_assignments_majority(meta.models, data.pt) ca = compute_ca(data.aa, aa2) auc = compute_auc_majority(meta.models, data.pt) diff_all = compute_confusion_matrix(data.aa, aa2, model.categories) t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels, nmeta, pclearning)) model.id = 'learned' aa_learning.id, aa_test.id = 'learning_set', 'test_set' pt_learning.id, pt_test.id = 'learning_set', 'test_set' save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), aa_learning, aa_test, pt_learning, pt_test, *meta.models) t['seed'] = seed t['na'] = len(data.a) t['nc'] = len(data.c) t['ncat'] = len(data.cats) t['pclearning'] = pclearning t['nloop'] = nloop t['nmodels'] = nmodels t['nmeta'] = nmeta t['na_learning'] = len(aa_learning) t['na_test'] = len(aa_test) t['ca_learning'] = ca_learning t['ca_test'] = ca_test t['ca_all'] = ca t['auc_learning'] = auc_learning t['auc_test'] = auc_test t['auc_all'] = auc for k, v in diff_learning.items(): t['learn_%s_%s' % (k[0], k[1])] = v for k, v in diff_test.items(): t['test_%s_%s' % (k[0], k[1])] = v for k, v in diff_all.items(): t['all_%s_%s' % (k[0], k[1])] = v t['t_total'] = t_total return t
def run_test(seed, data, pclearning): random.seed(seed) # Separate learning data and test data pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning]) aa_learning = data.aa.get_subset(pt_learning.keys()) aa_test = data.aa.get_subset(pt_test.keys()) # Initialize ELECTRE-TRI BM model cat_profiles = generate_categories_profiles(data.cats) worst = data.pt.get_worst(data.c) best = data.pt.get_best(data.c) b = generate_alternatives(len(data.cats) - 1, 'b') bpt = None cvs = None lbda = None model = MRSort(data.c, cvs, bpt, lbda, cat_profiles) # Run the linear program t1 = time.time() mip = mip_mrsort(model, pt_learning, aa_learning) obj = mip.solve() t_total = time.time() - t1 # CA learning set aa_learning2 = model.pessimist(pt_learning) ca_learning = compute_ca(aa_learning, aa_learning2) auc_learning = model.auc(aa_learning, pt_learning) diff_learning = compute_confusion_matrix(aa_learning, aa_learning2, model.categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2 = model.pessimist(pt_test) ca_test = compute_ca(aa_test, aa_test2) auc_test = model.auc(aa_test, pt_test) diff_test = compute_confusion_matrix(aa_test, aa_test2, model.categories) else: ca_test = 0 auc_test = 0 ncat = len(data.cats) diff_test = OrderedDict([((a, b), 0) for a in model.categories \ for b in model.categories]) # Compute CA of whole set aa2 = model.pessimist(data.pt) ca = compute_ca(data.aa, aa2) auc = model.auc(data.aa, data.pt) diff_all = compute_confusion_matrix(data.aa, aa2, model.categories) t = test_result("%s-%d-%d" % (data.name, seed, pclearning)) model.id = 'learned' aa_learning.id, aa_test.id = 'learning_set', 'test_set' pt_learning.id, pt_test.id = 'learning_set', 'test_set' save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning, aa_test, pt_learning, pt_test) t['seed'] = seed t['na'] = len(data.a) t['nc'] = len(data.c) t['ncat'] = len(data.cats) t['pclearning'] = pclearning t['na_learning'] = len(aa_learning) t['na_test'] = len(aa_test) t['obj'] = obj t['ca_learning'] = ca_learning t['ca_test'] = ca_test t['ca_all'] = ca t['auc_learning'] = auc_learning t['auc_test'] = auc_test t['auc_all'] = auc for k, v in diff_learning.items(): t['learn_%s_%s' % (k[0], k[1])] = v for k, v in diff_test.items(): t['test_%s_%s' % (k[0], k[1])] = v for k, v in diff_all.items(): t['all_%s_%s' % (k[0], k[1])] = v t['t_total'] = t_total return t
import random from pymcda.generate import generate_criteria from pymcda.generate import generate_random_criteria_weights from pymcda.generate import generate_categories from pymcda.generate import generate_categories_profiles from pymcda.generate import generate_alternatives from pymcda.generate import generate_random_performance_table from pymcda.generate import generate_random_profiles from pymcda.generate import generate_random_plinear_preference_function random.seed(123) criteria = generate_criteria(5) crit_weights = generate_random_criteria_weights(criteria) categories = generate_categories(5) cat_profiles = generate_categories_profiles(categories) a = generate_alternatives(100) pt = generate_random_performance_table(a, criteria) ap_best = pt.get_best(criteria) ap_worst = pt.get_worst(criteria) b = cat_profiles.get_ordered_profiles() bpt = generate_random_profiles(b, criteria) pf = generate_random_plinear_preference_function(criteria, ap_worst, ap_best) print(crit_weights) print(categories) print(cat_profiles) print(bpt) print(pf)
def run_test(seed, data, pclearning, nloop, nmodels, nmeta): random.seed(seed) # Separate learning data and test data pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning]) aa_learning = data.aa.get_subset(pt_learning.keys()) aa_test = data.aa.get_subset(pt_test.keys()) # Initialize a random model cat_profiles = generate_categories_profiles(data.cats) worst = data.pt.get_worst(data.c) best = data.pt.get_best(data.c) b = generate_alternatives(len(data.cats) - 1, 'b') bpt = None cvs = None lbda = None model = MRSort(data.c, cvs, bpt, lbda, cat_profiles) # Run the metaheuristic t1 = time.time() pt_sorted = SortedPerformanceTable(pt_learning) # Algorithm meta = meta_mrsort(nmodels, model.criteria, model.categories_profiles.to_categories(), pt_sorted, aa_learning, seed=seed * 100) #lp_weights = lp_weights, #heur_profiles = heur_profiles, #lp_veto_weights = lp_veto_weights, #heur_veto_profiles = heur_veto_profiles, for i in range(0, nloop): model, ca_learning = meta.optimize(nmeta) if ca_learning == 1: break t_total = time.time() - t1 aa_learning2 = model.pessimist(pt_learning) ca_learning = compute_ca(aa_learning, aa_learning2) auc_learning = model.auc(aa_learning, pt_learning) diff_learning = compute_confusion_matrix(aa_learning, aa_learning2, model.categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2 = model.pessimist(pt_test) ca_test = compute_ca(aa_test, aa_test2) auc_test = model.auc(aa_test, pt_test) diff_test = compute_confusion_matrix(aa_test, aa_test2, model.categories) else: ca_test = 0 auc_test = 0 ncat = len(data.cats) diff_test = OrderedDict([((a, b), 0) for a in model.categories \ for b in model.categories]) # Compute CA of whole set aa2 = model.pessimist(data.pt) ca = compute_ca(data.aa, aa2) auc = model.auc(data.aa, data.pt) diff_all = compute_confusion_matrix(data.aa, aa2, model.categories) t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels, nmeta, pclearning)) model.id = 'learned' aa_learning.id, aa_test.id = 'learning_set', 'test_set' pt_learning.id, pt_test.id = 'learning_set', 'test_set' save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning, aa_test, pt_learning, pt_test) t['seed'] = seed t['na'] = len(data.a) t['nc'] = len(data.c) t['ncat'] = len(data.cats) t['pclearning'] = pclearning t['nloop'] = nloop t['nmodels'] = nmodels t['nmeta'] = nmeta t['na_learning'] = len(aa_learning) t['na_test'] = len(aa_test) t['ca_learning'] = ca_learning t['ca_test'] = ca_test t['ca_all'] = ca t['auc_learning'] = auc_learning t['auc_test'] = auc_test t['auc_all'] = auc for k, v in diff_learning.items(): t['learn_%s_%s' % (k[0], k[1])] = v for k, v in diff_test.items(): t['test_%s_%s' % (k[0], k[1])] = v for k, v in diff_all.items(): t['all_%s_%s' % (k[0], k[1])] = v t['t_total'] = t_total return t
from pymcda.types import CriterionValue, CriteriaValues from pymcda.types import AlternativePerformances, PerformanceTable from pymcda.types import AlternativeAssignment, AlternativesAssignments # Generate a random ELECTRE TRI BM model random.seed(127890123456789) ncriteria = 5 model = MRSort() model.criteria = generate_criteria(ncriteria) model.cv = CriteriaValues([CriterionValue('c%d' % (i + 1), 0.2) for i in range(ncriteria)]) b1 = AlternativePerformances('b1', {'c%d' % (i + 1): 0.5 for i in range(ncriteria)}) model.bpt = PerformanceTable([b1]) cat = generate_categories(2) model.categories_profiles = generate_categories_profiles(cat) model.lbda = 0.6 vb1 = AlternativePerformances('b1', {'c%d' % (i + 1): random.uniform(0,0.4) for i in range(ncriteria)}) model.veto = PerformanceTable([vb1]) model.veto_weights = model.cv.copy() model.veto_lbda = 0.4 # Generate a set of alternatives a = generate_alternatives(1000) pt = generate_random_performance_table(a, model.criteria) aa = model.pessimist(pt) worst = pt.get_worst(model.criteria) best = b1
def run_test(seed, data, pclearning, nloop, nmodels, nmeta): random.seed(seed) global aaa global allm global fct_ca global LOO # Separate learning data and test data if LOO: pt_learning, pt_test = data.pt.split_LOO(seed) else: pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning]) aa_learning = data.aa.get_subset(pt_learning.keys()) aa_test = data.aa.get_subset(pt_test.keys()) #import pdb; pdb.set_trace() # Initialize a random model cat_profiles = generate_categories_profiles(data.cats) worst = data.pt.get_worst(data.c) best = data.pt.get_best(data.c) b = generate_alternatives(len(data.cats) - 1, 'b') bpt = None cvs = None lbda = None model = MRSort(data.c, cvs, bpt, lbda, cat_profiles) # if LOO: # print(data.c, cvs, bpt, lbda, cat_profiles) # print(model.categories_profiles.to_categories()) # print(model.categories) # import pdb; pdb.set_trace() # Run the metaheuristic t1 = time.time() pt_sorted = SortedPerformanceTable(pt_learning) # Algorithm meta = meta_mrsort(nmodels, model.criteria, model.categories_profiles.to_categories(), pt_sorted, aa_learning, seed = seed * 100) # if LOO: # print(nmodels, model.criteria, # model.categories_profiles.to_categories(), # pt_sorted, aa_learning) #import pdb; pdb.set_trace() #lp_weights = lp_weights, #heur_profiles = heur_profiles, #lp_veto_weights = lp_veto_weights, #heur_veto_profiles = heur_veto_profiles, for i in range(0, nloop): model, ca_learning, all_models = meta.optimize(nmeta, fct_ca) #import pdb; pdb.set_trace() if ca_learning == 1: break t_total = time.time() - t1 aa_learning2 = model.pessimist(pt_learning) ca_learning = compute_ca(aa_learning, aa_learning2) ca_learning_good = compute_ca_good(aa_learning, aa_learning2) #import pdb; pdb.set_trace() auc_learning = model.auc(aa_learning, pt_learning) diff_learning = compute_confusion_matrix(aa_learning, aa_learning2, model.categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2 = model.pessimist(pt_test) ca_test = compute_ca(aa_test, aa_test2) ca_test_good = compute_ca_good(aa_test, aa_test2) auc_test = model.auc(aa_test, pt_test) diff_test = compute_confusion_matrix(aa_test, aa_test2, model.categories) #import pdb; pdb.set_trace() else: ca_test = 0 auc_test = 0 ncat = len(data.cats) diff_test = OrderedDict([((a, b), 0) for a in model.categories \ for b in model.categories]) # Compute CA of whole set aa2 = model.pessimist(data.pt) ca = compute_ca(data.aa, aa2) ca_good = compute_ca_good(data.aa, aa2) auc = model.auc(data.aa, data.pt) diff_all = compute_confusion_matrix(data.aa, aa2, model.categories) t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels, nmeta, pclearning)) model.id = 'learned' aa_learning.id, aa_test.id = 'learning_set', 'test_set' pt_learning.id, pt_test.id = 'learning_set', 'test_set' save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning, aa_test, pt_learning, pt_test) t['seed'] = seed t['na'] = len(data.a) t['nc'] = len(data.c) t['ncat'] = len(data.cats) t['pclearning'] = pclearning t['nloop'] = nloop t['nmodels'] = nmodels t['nmeta'] = nmeta t['na_learning'] = len(aa_learning) t['na_test'] = len(aa_test) t['ca_learning'] = ca_learning t['ca_test'] = ca_test t['ca_all'] = ca t['ca_learning_good'] = ca_learning_good t['ca_test_good'] = ca_test_good t['ca_all_good'] = ca_good t['auc_learning'] = auc_learning t['auc_test'] = auc_test t['auc_all'] = auc # import pdb; pdb.set_trace() aaa[seed]=dict() aaa[seed]['id'] = seed aaa[seed]['learning_asgmt_id'] = [i.id for i in aa_learning] aaa[seed]['learning_asgmt'] = [i.category_id for i in aa_learning] aaa[seed]['learning_asgmt2'] = [i.category_id for i in aa_learning2] aaa[seed]['test_asgmt_id'] = [i.id for i in aa_test] aaa[seed]['test_asgmt'] = [i.category_id for i in aa_test] aaa[seed]['test_asgmt2'] = [i.category_id for i in aa_test2] aaa[seed]['criteria'] = [i for i,j in model.criteria.items()] aaa[seed]['criteria_weights'] = [str(i.value) for i in model.cv.values()] aaa[seed]['profiles_values'] = [str(model.bpt['b1'].performances[i]) for i,j in model.criteria.items()] aaa[seed]['lambda'] = model.lbda #[model.bpt['b1'].performances[i] for i,j in model.criteria.items()] allm[seed]=dict() allm[seed]['id'] = seed current_model = 0 allm[seed]['mresults'] = dict() for all_model in list(all_models)[1:]: current_model += 1 # skipping the 1rst model already treated allm[seed]['mresults'][current_model] = ["",""] aa_learning2_allm = all_model.model.pessimist(pt_learning) ca_learning_allm = compute_ca(aa_learning, aa_learning2_allm) ca_learning_good_allm = compute_ca_good(aa_learning, aa_learning2_allm) auc_learning_allm = all_model.model.auc(aa_learning, pt_learning) # diff_learning_allm = compute_confusion_matrix(aa_learning, aa_learning2_allm, # all_model.model.categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2_allm = all_model.model.pessimist(pt_test) ca_test_allm = compute_ca(aa_test, aa_test2_allm) ca_test_good_allm = compute_ca_good(aa_test, aa_test2_allm) auc_test_allm = all_model.model.auc(aa_test, pt_test) # diff_test_allm = compute_confusion_matrix(aa_test, aa_test2_allm, # all_model.categories) else: ca_test_allm = 0 auc_test_allm = 0 ncat_allm = len(data.cats) # diff_test_allm = OrderedDict([((a, b), 0) for a in all_model.categories \ # for b in all_model.model.categories]) # Compute CA of whole set aa2_allm = all_model.model.pessimist(data.pt) ca_allm = compute_ca(data.aa, aa2_allm) ca_good_allm = compute_ca_good(data.aa, aa2_allm) auc_allm = all_model.model.auc(data.aa, data.pt) #diff_all_allm = compute_confusion_matrix(data.aa, aa2_allm, all_model.model.categories) allm[seed]['mresults'][current_model][0] = 'na_learning,na_test,ca_learning,ca_test,ca_all,ca_learning_good,ca_test_good,ca_all_good,auc_learning,auc_test,auc_all' allm[seed]['mresults'][current_model][1] = str(len(aa_learning)) + "," + str(len(aa_test)) + "," + str(ca_learning_allm) + "," + str(ca_test_allm) + "," + str(ca_allm) + "," + str(ca_learning_good_allm) + "," + str(ca_test_good_allm) + "," + str(ca_good_allm) + "," + str(auc_learning_allm) + "," + str(auc_test_allm) + "," + str(auc_allm) #allm[seed]['mresults'][current_model][1] = #all_model.model.bpt['b1'].performances #all_model.model.cv.values() #import pdb; pdb.set_trace() # allm[seed][current_model]['na_learning'] = len(aa_learning) # allm[seed][current_model]['na_test'] = len(na_test) # allm[seed][current_model]['ca_learning'] = ca_learning_allm # allm[seed][current_model]['ca_test'] = ca_test_allm # allm[seed][current_model]['ca_all'] = ca_allm # allm[seed][current_model]['ca_learning_good'] = ca_learning_good_allm # allm[seed][current_model]['ca_test_good'] = ca_test_good_allm # allm[seed][current_model]['ca_all_good'] = ca_good_allm # allm[seed][current_model]['auc_learning'] = auc_learning_allm # allm[seed][current_model]['auc_test'] = auc_test_allm # allm[seed][current_model]['auc_all'] = auc_allm for k, v in diff_learning.items(): t['learn_%s_%s' % (k[0], k[1])] = v for k, v in diff_test.items(): t['test_%s_%s' % (k[0], k[1])] = v for k, v in diff_all.items(): t['all_%s_%s' % (k[0], k[1])] = v t['t_total'] = t_total return t