def run_lp_avf(pipe, criteria, categories, worst, best, css, pt, aa): lp = LpAVFSort(criteria, css, categories, worst, best) obj, cvs, cfs, catv = lp.solve(aa, pt) model = AVFSort(criteria, cvs, cfs, catv) aa2 = model.get_assignments(pt) ca = compute_ca(aa, aa2) pipe.send([model, ca]) pipe.close()
def one_test(self, seed, na, nc, ncat, ns): u = generate_random_avfsort_model(nc, ncat, ns, ns, seed) a = generate_alternatives(na) pt = generate_random_performance_table(a, u.criteria) aa = u.get_assignments(pt) css = CriteriaValues([]) for cf in u.cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) cat = u.cat_values.to_categories() lp = LpAVFSort(u.criteria, css, cat, pt.get_worst(u.criteria), pt.get_best(u.criteria)) obj, cvs, cfs, catv = lp.solve(aa, pt) u2 = AVFSort(u.criteria, cvs, cfs, catv) aa2 = u2.get_assignments(pt) self.assertEqual(aa, aa2)
def generate_random_avfsort_model(ncrit, ncat, nseg_min, nseg_max, seed = None, k = 3, random_direction = False): if seed is not None: random.seed(seed) c = generate_criteria(ncrit, random_direction = random_direction) cv = generate_random_criteria_weights(c, None, k) cat = generate_categories(ncat) cfs = generate_random_criteria_functions(c, nseg_min = nseg_min, nseg_max = nseg_max) catv = generate_random_categories_values(cat) return AVFSort(c, cv, cfs, catv)
def generate_model(self): c1 = Criterion("c1") c2 = Criterion("c2") c3 = Criterion("c3") c = Criteria([c1, c2, c3]) cv1 = CriterionValue("c1", 0.5) cv2 = CriterionValue("c2", 0.25) cv3 = CriterionValue("c3", 0.25) cvs = CriteriaValues([cv1, cv2, cv3]) f1 = PiecewiseLinear([ Segment('s1', Point(0, 0), Point(2.5, 0.2)), Segment('s2', Point(2.5, 0.2), Point(5, 1), True, True) ]) f2 = PiecewiseLinear([ Segment('s1', Point(0, 0), Point(2.5, 0.8)), Segment('s2', Point(2.5, 0.8), Point(5, 1), True, True) ]) f3 = PiecewiseLinear([ Segment('s1', Point(0, 0), Point(2.5, 0.5)), Segment('s2', Point(2.5, 0.5), Point(5, 1), True, True) ]) cf1 = CriterionFunction("c1", f1) cf2 = CriterionFunction("c2", f2) cf3 = CriterionFunction("c3", f3) cfs = CriteriaFunctions([cf1, cf2, cf3]) cat1 = Category("cat1") cat2 = Category("cat2") cat3 = Category("cat3") cats = Categories([cat1, cat2, cat3]) catv1 = CategoryValue("cat1", Interval(0, 0.25)) catv2 = CategoryValue("cat2", Interval(0.25, 0.65)) catv3 = CategoryValue("cat3", Interval(0.65, 1)) catv = CategoriesValues([catv1, catv2, catv3]) return AVFSort(c, cvs, cfs, catv)
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors): # Generate a random UTADIS model and assignment examples model = generate_random_avfsort_model(nc, ncat, ns, ns) model.set_equal_weights() cat = model.cat_values.to_categories() a = generate_alternatives(na) pt = generate_random_performance_table(a, model.criteria) aa = model.get_assignments(pt) # Add errors in assignment examples aa_err = aa.copy() aa_erroned = add_errors_in_assignments_proba(aa_err, cat.keys(), pcerrors / 100) na_err = len(aa_erroned) gi_worst = AlternativePerformances('worst', {crit.id: 0 for crit in model.criteria}) gi_best = AlternativePerformances('best', {crit.id: 1 for crit in model.criteria}) css = CriteriaValues([]) for cf in model.cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) # Run linear program t1 = time.time() lp = LpAVFSort(model.criteria, css, cat, gi_worst, gi_best) t2 = time.time() obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt) t3 = time.time() model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l) # Compute new assignment and classification accuracy aa2 = model2.get_assignments(pt) ok = ok_errors = ok2 = ok2_errors = altered = 0 for alt in a: if aa_err(alt.id) == aa2(alt.id): ok2 += 1 if alt.id in aa_erroned: ok2_errors += 1 if aa(alt.id) == aa2(alt.id): ok += 1 if alt.id in aa_erroned: ok_errors += 1 elif alt.id not in aa_erroned: altered += 1 total = len(a) ca2 = ok2 / total ca2_errors = ok2_errors / total ca = ok / total ca_errors = ok_errors / total # Perform the generalization a_gen = generate_alternatives(na_gen) pt_gen = generate_random_performance_table(a_gen, model.criteria) aa_gen = model.get_assignments(pt_gen) aa_gen2 = model2.get_assignments(pt_gen) ca_gen = compute_ca(aa_gen, aa_gen2) aa_gen_err = aa_gen.copy() aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err, cat.keys(), pcerrors / 100) aa_gen2 = model2.get_assignments(pt_gen) ca_gen_err = compute_ca(aa_gen_err, aa_gen2) # Save all infos in test_result class t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns, na_gen, pcerrors)) # Input params t['seed'] = seed t['na'] = na t['nc'] = nc t['ncat'] = ncat t['ns'] = ns t['na_gen'] = na_gen t['pcerrors'] = pcerrors # Output params t['na_err'] = na_err t['obj'] = obj t['ca'] = ca t['ca_errors'] = ca_errors t['altered'] = altered t['ca2'] = ca2 t['ca2_errors'] = ca2_errors t['ca_gen'] = ca_gen t['ca_gen_err'] = ca_gen_err t['t_total'] = t3 - t1 t['t_const'] = t2 - t1 t['t_solve'] = t3 - t2 return t
from pymcda.generate import generate_random_performance_table from pymcda.generate import generate_random_criteria_functions from pymcda.generate import generate_random_categories_values from pymcda.utils import add_errors_in_assignments from pymcda.utils import print_pt_and_assignments # Generate an avfsort model c = generate_criteria(7, random_direction = True) cv = generate_random_criteria_values(c, seed = 1) cv.normalize_sum_to_unity() cat = generate_categories(3) cfs = generate_random_criteria_functions(c, nseg_min = 3, nseg_max = 3) catv = generate_random_categories_values(cat) u = AVFSort(c, cv, cfs, catv) # Generate random alternative and compute assignments a = generate_alternatives(1000) pt = generate_random_performance_table(a, c) aa = u.get_assignments(pt) aa_err = aa.copy() aa_erroned = add_errors_in_assignments(aa_err, cat.keys(), 0.0) print('==============') print('Original model') print('==============') print("Number of alternatives: %d" % len(a)) print('Criteria weights:') cv.display() print('Criteria functions:')
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors): # Generate a random ELECTRE TRI model and assignment examples model = generate_random_mrsort_model(nc, ncat, seed) # Generate a first set of alternatives a = generate_alternatives(na) pt = generate_random_performance_table(a, model.criteria) aa = model.pessimist(pt) # Add errors in assignment examples aa_err = aa.copy() aa_erroned = add_errors_in_assignments(aa_err, model.categories, pcerrors / 100) gi_worst = AlternativePerformances('worst', {c.id: 0 for c in model.criteria}) gi_best = AlternativePerformances('best', {c.id: 1 for c in model.criteria}) css = CriteriaValues([]) for c in model.criteria: cs = CriterionValue(c.id, ns) css.append(cs) # Run linear program t1 = time.time() lp = LpAVFSort(model.criteria, css, model.categories_profiles.to_categories(), gi_worst, gi_best) t2 = time.time() obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt) t3 = time.time() model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l) # Compute new assignment and classification accuracy aa2 = model2.get_assignments(pt) ok = ok_errors = ok2 = ok2_errors = 0 for alt in a: if aa_err(alt.id) == aa2(alt.id): ok2 += 1 if alt.id in aa_erroned: ok2_errors += 1 if aa(alt.id) == aa2(alt.id): ok += 1 if alt.id in aa_erroned: ok_errors += 1 total = len(a) ca2 = ok2 / total ca2_errors = ok2_errors / total ca = ok / total ca_errors = ok_errors / total # Perform the generalization a_gen = generate_alternatives(na_gen) pt_gen = generate_random_performance_table(a_gen, model.criteria) aa = model.pessimist(pt_gen) aa2 = model2.get_assignments(pt_gen) ca_gen = compute_ca(aa, aa2) # Save all infos in test_result class t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns, na_gen, pcerrors)) # Input params t['seed'] = seed t['na'] = na t['nc'] = nc t['ncat'] = ncat t['ns'] = ns t['na_gen'] = na_gen t['pcerrors'] = pcerrors # Output params t['obj'] = obj t['ca'] = ca t['ca_errors'] = ca_errors t['ca2'] = ca2 t['ca2_errors'] = ca2_errors t['ca_gen'] = ca_gen t['t_total'] = t3 - t1 t['t_const'] = t2 - t1 t['t_solve'] = t3 - t2 return t
if not os.path.isfile(f): print("Invalid file %s" % f) sys.exit(1) print("!!") if is_bz2_file(f) is True: f = bz2.BZ2File(f) tree = ElementTree.parse(f) root = tree.getroot() xmcda_models += root.findall(".//AVFSort") models = [] for xmcda_model in xmcda_models: m = AVFSort().from_xmcda(xmcda_model) string = "Model '%s'" % m.id print(string) print('=' * len(string)) models.append(m) m.cfs.display() m.cat_values.display() display_utadis_model(m.cfs) sys.exit(0) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
def run_test(seed, data, pclearning, nseg): random.seed(seed) # Separate learning data and test data pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning]) aa_learning = data.aa.get_subset(pt_learning.keys()) aa_test = data.aa.get_subset(pt_test.keys()) worst = data.pt.get_worst(data.c) best = data.pt.get_best(data.c) # Run the linear program t1 = time.time() css = CriteriaValues([]) for c in data.c: cs = CriterionValue(c.id, nseg) css.append(cs) lp = LpAVFSort(data.c, css, data.cats, worst, best) obj, cvs, cfs, catv = lp.solve(aa_learning, pt_learning) t_total = time.time() - t1 model = AVFSort(data.c, cvs, cfs, catv) ordered_categories = model.categories # CA learning set aa_learning2 = model.get_assignments(pt_learning) ca_learning = compute_ca(aa_learning, aa_learning2) auc_learning = model.auc(aa_learning, pt_learning) diff_learning = compute_confusion_matrix(aa_learning, aa_learning2, ordered_categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2 = model.get_assignments(pt_test) ca_test = compute_ca(aa_test, aa_test2) auc_test = model.auc(aa_test, pt_test) diff_test = compute_confusion_matrix(aa_test,aa_test2, ordered_categories) else: ca_test = 0 auc_test = 0 ncat = len(data.cats) diff_test = OrderedDict([((a, b), 0) for a in ordered_categories \ for b in ordered_categories]) # Compute CA of whole set aa2 = model.get_assignments(data.pt) ca = compute_ca(data.aa, aa2) auc = model.auc(data.aa, data.pt) diff_all = compute_confusion_matrix(data.aa, aa2, ordered_categories) t = test_result("%s-%d-%d-%d" % (data.name, seed, nseg, pclearning)) model.id = 'learned' aa_learning.id, aa_test.id = 'learning_set', 'test_set' pt_learning.id, pt_test.id = 'learning_set', 'test_set' save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning, aa_test, pt_learning, pt_test) t['seed'] = seed t['na'] = len(data.a) t['nc'] = len(data.c) t['ncat'] = len(data.cats) t['ns'] = nseg t['pclearning'] = pclearning t['na_learning'] = len(aa_learning) t['na_test'] = len(aa_test) t['obj'] = obj t['ca_learning'] = ca_learning t['ca_test'] = ca_test t['ca_all'] = ca t['auc_learning'] = auc_learning t['auc_test'] = auc_test t['auc_all'] = auc for k, v in diff_learning.items(): t['learn_%s_%s' % (k[0], k[1])] = v for k, v in diff_test.items(): t['test_%s_%s' % (k[0], k[1])] = v for k, v in diff_all.items(): t['all_%s_%s' % (k[0], k[1])] = v t['t_total'] = t_total return t
try: pt_test = PerformanceTable().from_xmcda(root, 'test_set') except: pt_test = None aa_learning_m1, aa_learning_m2 = None, None aa_test_m1, aa_test_m2 = None, None if root.find("ElectreTri[@id='initial']") is not None: m1 = MRSort().from_xmcda(root, 'initial') if pt_learning is not None: aa_learning_m1 = m1.pessimist(pt_learning) if pt_test is not None: aa_test_m1 = m1.pessimist(pt_test) elif root.find("AVFSort[@id='initial']") is not None: m1 = AVFSort().from_xmcda(root, 'initial') if pt_learning is not None: aa_learning_m1 = m1.get_assignments(pt_learning) if pt_test is not None: aa_test_m1 = m1.get_assignments(pt_test) else: if root.find("alternativesAffectations[@id='learning_set']") is not None: aa_learning_m1 = AlternativesAssignments().from_xmcda(root, 'learning_set') if root.find("alternativesAffectations[@id='test_set']") is not None: aa_test_m1 = AlternativesAssignments().from_xmcda(root, 'test_set') if root.find("ElectreTri[@id='learned']") is not None: m2 = MRSort().from_xmcda(root, 'learned') if pt_learning is not None:
model, ca_learning = meta.optimize(nmeta) print(ca_learning) if ca_learning == 1: break elif algo == 'mip_mrsort': model_type = 'mrsort' cat_profiles = generate_categories_profiles(data.cats) model = MRSort(data.c, None, None, None, cat_profiles) mip = MipMRSort(model, data.pt, data.aa) mip.solve() elif algo == 'lp_utadis': model_type = 'utadis' css = CriteriaValues(CriterionValue(c.id, nseg) for c in data.c) lp = LpAVFSort(data.c, css, data.cats, worst, best) obj, cvs, cfs, catv = lp.solve(data.aa, data.pt) model = AVFSort(data.c, cvs, cfs, catv) elif algo == 'lp_utadis_compat': model_type = 'utadis' css = CriteriaValues(CriterionValue(c.id, nseg) for c in data.c) print("LpAVFSortCompat") lp = LpAVFSortCompat(data.c, css, data.cats, worst, best) obj, cvs, cfs, catv = lp.solve(data.aa, data.pt) model = AVFSort(data.c, cvs, cfs, catv) else: print("Invalid algorithm!") sys.exit(1) t_total = time.time() - t1 model.id = 'learned' data.pt.id = 'learning_set'