def generate_random_criteria_weights_msjp(crits, seed=None, k=3, fixed_w1=None): if seed is not None: random.seed(seed) if fixed_w1 is None: weights = [random.random() for i in range(len(crits) - 1)] else: weights = [ round(random.uniform(0, 1 - fixed_w1), k) for i in range(len(crits) - 2) ] weights = [(fixed_w1 + i) for i in weights] weights = weights + [round(fixed_w1, k)] weights.sort() cvals = CriteriaValues() for i, c in enumerate(crits): cval = CriterionValue() cval.id = c.id if i == 0: cval.value = round(weights[i], k) elif i == len(crits) - 1: cval.value = round(1 - weights[i - 1], k) else: cval.value = round(weights[i] - weights[i - 1], k) cvals.append(cval) return cvals
def generate_random_capacities(criteria, seed = None, k = 3): if seed is not None: random.seed(seed) n = len(criteria) r = [round(random.random(), k) for i in range(2 ** n - 2)] + [1.0] r.sort() j = 0 cvs = CriteriaValues() for i in range(1, n + 1): combis = [c for c in combinations(criteria.keys(), i)] random.shuffle(combis) for combi in combis: if i == 1: cid = combi[0] else: cid = CriteriaSet(combi) cv = CriterionValue(cid, r[j]) cvs.append(cv) j += 1 return cvs
def solve_glpk(self): self.lp.solvopt(method='exact', integer='advanced') self.lp.solve() status = self.lp.status() if status != 'opt': raise RuntimeError("Solver status: %s" % self.lp.status()) #print(self.lp.reportKKT()) obj = self.lp.vobj() cvs = CriteriaValues() for c in self.criteria: cv = CriterionValue() cv.id = c.id cv.value = float(self.w[c.id].primal) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lbda.primal pt = PerformanceTable() for p in self.__profiles: ap = AlternativePerformances(p) for c in self.criteria: perf = self.g[p][c.id].primal ap.performances[c.id] = round(perf, 5) pt.append(ap) self.model.bpt = pt self.model.bpt.update_direction(self.model.criteria) return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w_' + c.id) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") pt = PerformanceTable() for p in self.__profiles: ap = AlternativePerformances(p) for c in self.criteria: perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id)) ap.performances[c.id] = round(perf, 5) pt.append(ap) self.model.bpt = pt self.model.bpt.update_direction(self.model.criteria) return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() m = ['m_%d' % i for i in range(len(self.mindices))] mindices_map = dict(zip(self.mindices, m)) for m, vname in mindices_map.items(): cv = CriterionValue() if len(m) > 1: cv.id = CriteriaSet(m) else: cv.id = next(iter(m)) cv.value = self.lp.solution.get_values(vname) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() m = ['m_%d' % i for i in range(len(self.mindices))] mindices_map = dict(zip(self.mindices, m)) for m, vname in mindices_map.items(): cv = CriterionValue() if len(m) > 1: cv.id = CriteriaSet(m) else: cv.id = next(iter(m)) cv.value = self.lp.solution.get_values(vname) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") return obj
def solve_cplex(self, aa, pt): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cfs = CriteriaFunctions() cvs = CriteriaValues() for cs in self.cs: cv = CriterionValue(cs.id, 1) cvs.append(cv) nseg = cs.value x_points = range(nseg) p1 = Point(self.points[cs.id][0], 0) ui = 0 f = PiecewiseLinear([]) for i in x_points: uivar = 'w_' + cs.id + "_%d" % (i + 1) ui += self.lp.solution.get_values(uivar) x = self.points[cs.id][i + 1] p2 = Point(x, ui) s = Segment("s%d" % (i + 1), p1, p2) f.append(s) p1 = p2 s.p1_in = True s.p2_in = True cf = CriterionFunction(cs.id, f) cfs.append(cf) cat = {v: k for k, v in self.cat.items()} catv = CategoriesValues() ui_a = 0 for i in range(1, len(cat)): ui_b = self.lp.solution.get_values("u_%d" % i) catv.append(CategoryValue(cat[i], Interval(ui_a, ui_b))) ui_a = ui_b catv.append(CategoryValue(cat[i + 1], Interval(ui_a, 1))) return obj, cvs, cfs, catv
def capacities_to_mobius(criteria, capacities): cvs = CriteriaValues() n = len(criteria) for i in range(1, n + 1): for combi in [c for c in combinations(criteria.keys(), i)]: cid = combi[0] if (i == 1) else CriteriaSet(combi) m = capacities[cid].value for j in range(1, i): for combi2 in [c for c in combinations(combi, j)]: cidj = combi2[0] if (j == 1) else CriteriaSet(combi2) m -= cvs[cidj].value cv = CriterionValue(cid, m) cvs.append(cv) return cvs
def mobius_to_capacities(criteria, mobius): cvs = CriteriaValues() n = len(criteria) for i in range(1, n + 1): for combi in [c for c in combinations(criteria.keys(), i)]: cid = combi[0] if (i == 1) else CriteriaSet(combi) v = 0 for j in range(1, i + 1): for combi2 in [c for c in combinations(combi, j)]: cidj = combi2[0] if (j == 1) else CriteriaSet(combi2) if cidj in mobius: v += mobius[cidj].value cv = CriterionValue(cid, v) cvs.append(cv) return cvs
def generate_random_criteria_values(crits, seed = None, k = 3, type = 'float', vmin = 0, vmax = 1): if seed is not None: random.seed(seed) cvals = CriteriaValues() for c in crits: cval = CriterionValue() cval.id = c.id if type == 'integer': cval.value = random.randint(vmin, vmax) else: cval.value = round(random.uniform(vmin, vmax), k) cvals.append(cval) return cvals
def solve_glpk(self, aa, pt): self.lp.solve() status = self.lp.status() if status != 'opt': raise RuntimeError("Solver status: %s" % self.lp.status()) obj = self.lp.vobj() cfs = CriteriaFunctions() cvs = CriteriaValues() for cid, points in self.points.items(): cv = CriterionValue(cid, 1) cvs.append(cv) p1 = Point(self.points[cid][0], 0) ui = 0 f = PiecewiseLinear([]) for i in range(len(points) - 1): uivar = 'w_' + cid + "_%d" % (i + 1) ui += self.w[cid][i].primal p2 = Point(self.points[cid][i + 1], ui) s = Segment(p1, p2) f.append(s) p1 = p2 s.p2_in = True cf = CriterionFunction(cid, f) cfs.append(cf) cat = {v: k for k, v in self.cat.items()} catv = CategoriesValues() ui_a = 0 for i in range(0, len(cat) - 1): ui_b = self.u[i].primal catv.append(CategoryValue(cat[i + 1], Interval(ui_a, ui_b))) ui_a = ui_b catv.append(CategoryValue(cat[i + 2], Interval(ui_a, 1))) return obj, cvs, cfs, catv
def solve_scip(self): solution = self.lp.minimize(objective=self.obj) if solution is None: raise RuntimeError("No solution found") obj = solution.objective cvs = CriteriaValues() for c in self.model.criteria: cv = CriterionValue() cv.id = c.id cv.value = solution[self.w[c.id]] cvs.append(cv) self.model.cv = cvs self.model.lbda = solution[self.lbda] return obj
def from_cmda(self, xmcda, id = None): xmcda = find_xmcda_tag(xmcda, 'UTA', id) self.id = xmcda.get('id') setattr(self, 'criteria', Criteria().from_xmcda(xmcda, 'criteria')) setattr(self, 'cvs', CriteriaValues().from_xmcda(xmcda, 'cvs')) setattr(self, 'cfs', CriteriaFunctions().from_xmcda(xmcda, 'cfs')) return self
def on_button_run(self): if hasattr(self, 'started') and self.started is True: self.thread.stop() return if not hasattr(self, 'model'): self.on_button_generate() if self.combobox_type.currentIndex() == COMBO_AVFSORT: self.init_results_avf() ns = self.spinbox_nsegments.value() css = CriteriaValues([]) for c in self.model.criteria: cs = CriterionValue(c.id, ns) css.append(cs) self.thread = qt_thread_avf(self.model.criteria, self.categories, self.worst, self.best, css, self.pt, self.aa, None) else: self.init_results_mr() nmodels = self.spinbox_nmodels.value() niter = self.spinbox_niter.value() nmeta = self.spinbox_nmeta.value() self.thread = qt_thread_mr(self.model.criteria, self.categories, self.worst, self.best, nmodels, niter, nmeta, self.pt, self.aa, None) self.connect(self.thread, QtCore.SIGNAL("update(int)"), self.update) self.connect(self.thread, QtCore.SIGNAL("finished()"), self.finished) self.label_time2.setText("") self.start_time = time.time() self.timer.start(100) self.thread.start() self.button_run.setText("Stop") self.groupbox_result.setVisible(True) self.started = True
def from_xmcda(self, xmcda, id = None): xmcda = find_xmcda_tag(xmcda, 'AVFSort', id) self.id = xmcda.get('id') setattr(self, 'criteria', Criteria().from_xmcda(xmcda, 'criteria')) setattr(self, 'cvs', CriteriaValues().from_xmcda(xmcda, 'cvs')) setattr(self, 'cfs', CriteriaFunctions().from_xmcda(xmcda, 'cfs')) setattr(self, 'cat_values', CategoriesValues().from_xmcda(xmcda, 'cat_values')) return self
def solve(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w_' + c.id) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") pt = PerformanceTable() for p in self.__profiles: ap = AlternativePerformances(p) for c in self.criteria: perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id)) ap.performances[c.id] = round(perf, 5) pt.append(ap) self.model.bpt = pt self.model.bpt.update_direction(self.model.criteria) wv = CriteriaValues() for c in self.criteria: w = CriterionValue() w.id = c.id w.value = self.lp.solution.get_values('z_' + c.id) wv.append(w) self.model.veto_weights = wv self.model.veto_lbda = self.lp.solution.get_values("LAMBDA") v = PerformanceTable() for p in self.__profiles: vp = AlternativePerformances(p, {}) for c in self.criteria: perf = self.lp.solution.get_values('v_%s_%s' % (p, c.id)) vp.performances[c.id] = round(perf, 5) v.append(vp) self.model.veto = v return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.model.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w' + c.id) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.model.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w'+c.id) cvs.append(cv) self.model.veto_weights = cvs self.model.veto_lbda = self.lp.solution.get_values("lambda") return obj
def solve_glpk(self): self.lp.solve() status = self.lp.status() if status != 'opt': raise RuntimeError("Solver status: %s" % self.lp.status()) #print(self.lp.reportKKT()) obj = self.lp.vobj() cvs = CriteriaValues() for j, c in enumerate(self.model.criteria): cv = CriterionValue() cv.id = c.id cv.value = float(self.w[j].primal) cvs.append(cv) self.model.cv = cvs self.model.lbda = float(self.lbda.primal) return obj
def one_test(self, seed, na, nc, ncat, ns): u = generate_random_avfsort_model(nc, ncat, ns, ns, seed) a = generate_alternatives(na) pt = generate_random_performance_table(a, u.criteria) aa = u.get_assignments(pt) css = CriteriaValues([]) for cf in u.cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) cat = u.cat_values.to_categories() lp = LpAVFSort(u.criteria, css, cat, pt.get_worst(u.criteria), pt.get_best(u.criteria)) obj, cvs, cfs, catv = lp.solve(aa, pt) u2 = AVFSort(u.criteria, cvs, cfs, catv) aa2 = u2.get_assignments(pt) self.assertEqual(aa, aa2)
def generate_random_criteria_weights(crits, seed = None, k = 3): if seed is not None: random.seed(seed) weights = [ random.random() for i in range(len(crits) - 1) ] weights.sort() cvals = CriteriaValues() for i, c in enumerate(crits): cval = CriterionValue() cval.id = c.id if i == 0: cval.value = round(weights[i], k) elif i == len(crits) - 1: cval.value = round(1 - weights[i - 1], k) else: cval.value = round(weights[i] - weights[i - 1], k) cvals.append(cval) return cvals
def test002(self): random.seed(2) c = generate_criteria(4) cv1 = CriterionValue('c1', 0.25) cv2 = CriterionValue('c2', 0.25) cv3 = CriterionValue('c3', 0.25) cv4 = CriterionValue('c4', 0.25) cv = CriteriaValues([cv1, cv2, cv3, cv4]) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', { 'c1': 0.75, 'c2': 0.75, 'c3': 0.75, 'c4': 0.75 }) bp2 = AlternativePerformances('b2', { 'c1': 0.25, 'c2': 0.25, 'c3': 0.25, 'c4': 0.25 }) bpt = PerformanceTable([bp1, bp2]) lbda = 0.5 etri = MRSort(c, cv, bpt, 0.5, cps) a = generate_alternatives(1000) pt = generate_random_performance_table(a, c) aas = etri.pessimist(pt) for aa in aas: w1 = w2 = 0 perfs = pt[aa.id].performances for c, val in perfs.items(): if val >= bp1.performances[c]: w1 += cv[c].value if val >= bp2.performances[c]: w2 += cv[c].value if aa.category_id == 'cat3': self.assertLess(w1, lbda) self.assertLess(w2, lbda) elif aa.category_id == 'cat2': self.assertLess(w1, lbda) self.assertGreaterEqual(w2, lbda) else: self.assertGreaterEqual(w1, lbda) self.assertGreaterEqual(w2, lbda)
def solve_cplex(self): self.__add_variables_cplex() self.__add_constraints_cplex() self.__add_objective_cplex() self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs2 = CriteriaValues() for cv in self.cvs: cv2 = CriterionValue(cv.id) cv2.value = int(self.lp.solution.get_values("w_%s" % cv.id)) cvs2.append(cv2) lbda2 = int(self.lp.solution.get_values("lambda")) return obj, cvs2, lbda2
def from_xmcda(self, xmcda, id=None): xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id) self.id = xmcda.get('id') value = xmcda.find(".//methodParameters/parameter/value[@id='lambda']") self.lbda = unmarshal(value.getchildren()[0]) setattr(self, 'criteria', Criteria().from_xmcda(xmcda, 'criteria')) setattr(self, 'cv', CriteriaValues().from_xmcda(xmcda, 'cv')) setattr(self, 'bpt', PerformanceTable().from_xmcda(xmcda, 'bpt')) setattr(self, 'categories_profiles', CategoriesProfiles().from_xmcda(xmcda, 'categories_profiles')) return self
def test001(self): c = generate_criteria(3) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', { 'c1': 0.75, 'c2': 0.75, 'c3': 0.75 }) bp2 = AlternativePerformances('b2', { 'c1': 0.25, 'c2': 0.25, 'c3': 0.25 }) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3) cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13]) lbda = 0.6 model = MRSort(c, cvs, bpt, lbda, cps) ap1 = AlternativePerformances('a1', {'c1': 0.3, 'c2': 0.3, 'c3': 0.3}) ap2 = AlternativePerformances('a2', {'c1': 0.8, 'c2': 0.8, 'c3': 0.8}) ap3 = AlternativePerformances('a3', {'c1': 0.3, 'c2': 0.3, 'c3': 0.1}) ap4 = AlternativePerformances('a4', {'c1': 0.3, 'c2': 0.1, 'c3': 0.3}) ap5 = AlternativePerformances('a5', {'c1': 0.1, 'c2': 0.3, 'c3': 0.3}) ap6 = AlternativePerformances('a6', {'c1': 0.8, 'c2': 0.8, 'c3': 0.1}) ap7 = AlternativePerformances('a7', {'c1': 0.8, 'c2': 0.1, 'c3': 0.8}) ap8 = AlternativePerformances('a8', {'c1': 0.1, 'c2': 0.8, 'c3': 0.8}) pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8]) aa = model.get_assignments(pt) self.assertEqual(aa['a1'].category_id, "cat2") self.assertEqual(aa['a2'].category_id, "cat1") self.assertEqual(aa['a3'].category_id, "cat3") self.assertEqual(aa['a4'].category_id, "cat2") self.assertEqual(aa['a5'].category_id, "cat2") self.assertEqual(aa['a6'].category_id, "cat3") self.assertEqual(aa['a7'].category_id, "cat1") self.assertEqual(aa['a8'].category_id, "cat1")
def from_xmcda(self, xmcda, id=None): super(MRSort, self).from_xmcda(xmcda, id) xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id) value = xmcda.find( ".//methodParameters/parameter/value[@id='veto_lbda']") if value is not None: self.veto_lbda = unmarshal(value.getchildren()[0]) if xmcda.find(".//criteriaValues[@id='veto_weights']") is not None: setattr(self, 'veto_weights', CriteriaValues().from_xmcda(xmcda, 'veto_weights')) if xmcda.find(".//performanceTable[@id='veto']") is not None: setattr(self, 'veto', PerformanceTable().from_xmcda(xmcda, 'veto')) return self
def test002(self): c = generate_criteria(3) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', { 'c1': 0.75, 'c2': 0.75, 'c3': 0.75 }) bp2 = AlternativePerformances('b2', { 'c1': 0.25, 'c2': 0.25, 'c3': 0.25 }) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3) cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13]) lbda = 0.6 model = MRSort(c, cvs, bpt, lbda, cps) a = generate_alternatives(10000) pt = generate_random_performance_table(a, model.criteria) aa = model.get_assignments(pt) model2 = MRSort(c, None, bpt, None, cps) lp = LpMRSortMobius(model2, pt, aa) obj = lp.solve() aa2 = model2.get_assignments(pt) self.assertEqual(obj, 0) self.assertEqual(aa, aa2)
cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv4 = CriterionValue('c4', 0.2) cv5 = CriterionValue('c5', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.1) cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1) cv15 = CriterionValue(CriteriaSet(['c1', 'c5']), 0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1) cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1) cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1) cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1) cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1) cv45 = CriterionValue(CriteriaSet(['c4', 'c5']), -0.1) cvs = CriteriaValues([ cv1, cv2, cv3, cv4, cv5, cv12, cv13, cv14, cv15, cv23, cv24, cv25, cv34, cv35, cv45 ]) model.cv = cvs model.lbda = 0.6 # Generate a set of alternatives a = generate_alternatives(1000) pt = generate_random_performance_table(a, model.criteria) aa = model.pessimist(pt) worst = pt.get_worst(model.criteria) best = pt.get_best(model.criteria) print('Original model') print('==============') cids = model.criteria.keys()
def run_test(seed, data, pclearning, nseg): random.seed(seed) # Separate learning data and test data pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning]) aa_learning = data.aa.get_subset(pt_learning.keys()) aa_test = data.aa.get_subset(pt_test.keys()) worst = data.pt.get_worst(data.c) best = data.pt.get_best(data.c) # Run the linear program t1 = time.time() css = CriteriaValues([]) for c in data.c: cs = CriterionValue(c.id, nseg) css.append(cs) lp = LpAVFSort(data.c, css, data.cats, worst, best) obj, cvs, cfs, catv = lp.solve(aa_learning, pt_learning) t_total = time.time() - t1 model = AVFSort(data.c, cvs, cfs, catv) ordered_categories = model.categories # CA learning set aa_learning2 = model.get_assignments(pt_learning) ca_learning = compute_ca(aa_learning, aa_learning2) auc_learning = model.auc(aa_learning, pt_learning) diff_learning = compute_confusion_matrix(aa_learning, aa_learning2, ordered_categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2 = model.get_assignments(pt_test) ca_test = compute_ca(aa_test, aa_test2) auc_test = model.auc(aa_test, pt_test) diff_test = compute_confusion_matrix(aa_test,aa_test2, ordered_categories) else: ca_test = 0 auc_test = 0 ncat = len(data.cats) diff_test = OrderedDict([((a, b), 0) for a in ordered_categories \ for b in ordered_categories]) # Compute CA of whole set aa2 = model.get_assignments(data.pt) ca = compute_ca(data.aa, aa2) auc = model.auc(data.aa, data.pt) diff_all = compute_confusion_matrix(data.aa, aa2, ordered_categories) t = test_result("%s-%d-%d-%d" % (data.name, seed, nseg, pclearning)) model.id = 'learned' aa_learning.id, aa_test.id = 'learning_set', 'test_set' pt_learning.id, pt_test.id = 'learning_set', 'test_set' save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning, aa_test, pt_learning, pt_test) t['seed'] = seed t['na'] = len(data.a) t['nc'] = len(data.c) t['ncat'] = len(data.cats) t['ns'] = nseg t['pclearning'] = pclearning t['na_learning'] = len(aa_learning) t['na_test'] = len(aa_test) t['obj'] = obj t['ca_learning'] = ca_learning t['ca_test'] = ca_test t['ca_all'] = ca t['auc_learning'] = auc_learning t['auc_test'] = auc_test t['auc_all'] = auc for k, v in diff_learning.items(): t['learn_%s_%s' % (k[0], k[1])] = v for k, v in diff_test.items(): t['test_%s_%s' % (k[0], k[1])] = v for k, v in diff_all.items(): t['all_%s_%s' % (k[0], k[1])] = v t['t_total'] = t_total return t
from pymcda.generate import generate_categories_profiles from pymcda.utils import print_pt_and_assignments from pymcda.utils import compute_number_of_winning_coalitions from pymcda.pt_sorted import SortedPerformanceTable from pymcda.ui.graphic import display_electre_tri_models from pymcda.electre_tri import MRSort from pymcda.types import CriterionValue, CriteriaValues from pymcda.types import AlternativePerformances, PerformanceTable from pymcda.types import AlternativeAssignment, AlternativesAssignments # Generate a random ELECTRE TRI BM model random.seed(127890123456789) ncriteria = 5 model = MRSort() model.criteria = generate_criteria(ncriteria) model.cv = CriteriaValues([CriterionValue('c%d' % (i + 1), 0.2) for i in range(ncriteria)]) b1 = AlternativePerformances('b1', {'c%d' % (i + 1): 0.5 for i in range(ncriteria)}) model.bpt = PerformanceTable([b1]) cat = generate_categories(2) model.categories_profiles = generate_categories_profiles(cat) model.lbda = 0.6 vb1 = AlternativePerformances('b1', {'c%d' % (i + 1): random.uniform(0,0.4) for i in range(ncriteria)}) model.veto = PerformanceTable([vb1]) model.veto_weights = model.cv.copy() model.veto_lbda = 0.4 # Generate a set of alternatives a = generate_alternatives(1000) pt = generate_random_performance_table(a, model.criteria)
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors): # Generate a random UTADIS model and assignment examples model = generate_random_avfsort_model(nc, ncat, ns, ns) model.set_equal_weights() cat = model.cat_values.to_categories() a = generate_alternatives(na) pt = generate_random_performance_table(a, model.criteria) aa = model.get_assignments(pt) # Add errors in assignment examples aa_err = aa.copy() aa_erroned = add_errors_in_assignments_proba(aa_err, cat.keys(), pcerrors / 100) na_err = len(aa_erroned) gi_worst = AlternativePerformances('worst', {crit.id: 0 for crit in model.criteria}) gi_best = AlternativePerformances('best', {crit.id: 1 for crit in model.criteria}) css = CriteriaValues([]) for cf in model.cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) # Run linear program t1 = time.time() lp = LpAVFSort(model.criteria, css, cat, gi_worst, gi_best) t2 = time.time() obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt) t3 = time.time() model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l) # Compute new assignment and classification accuracy aa2 = model2.get_assignments(pt) ok = ok_errors = ok2 = ok2_errors = altered = 0 for alt in a: if aa_err(alt.id) == aa2(alt.id): ok2 += 1 if alt.id in aa_erroned: ok2_errors += 1 if aa(alt.id) == aa2(alt.id): ok += 1 if alt.id in aa_erroned: ok_errors += 1 elif alt.id not in aa_erroned: altered += 1 total = len(a) ca2 = ok2 / total ca2_errors = ok2_errors / total ca = ok / total ca_errors = ok_errors / total # Perform the generalization a_gen = generate_alternatives(na_gen) pt_gen = generate_random_performance_table(a_gen, model.criteria) aa_gen = model.get_assignments(pt_gen) aa_gen2 = model2.get_assignments(pt_gen) ca_gen = compute_ca(aa_gen, aa_gen2) aa_gen_err = aa_gen.copy() aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err, cat.keys(), pcerrors / 100) aa_gen2 = model2.get_assignments(pt_gen) ca_gen_err = compute_ca(aa_gen_err, aa_gen2) # Save all infos in test_result class t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns, na_gen, pcerrors)) # Input params t['seed'] = seed t['na'] = na t['nc'] = nc t['ncat'] = ncat t['ns'] = ns t['na_gen'] = na_gen t['pcerrors'] = pcerrors # Output params t['na_err'] = na_err t['obj'] = obj t['ca'] = ca t['ca_errors'] = ca_errors t['altered'] = altered t['ca2'] = ca2 t['ca2_errors'] = ca2_errors t['ca_gen'] = ca_gen t['ca_gen_err'] = ca_gen_err t['t_total'] = t3 - t1 t['t_const'] = t2 - t1 t['t_solve'] = t3 - t2 return t
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors): # Generate a random ELECTRE TRI model and assignment examples model = generate_random_mrsort_model(nc, ncat, seed) # Generate a first set of alternatives a = generate_alternatives(na) pt = generate_random_performance_table(a, model.criteria) aa = model.pessimist(pt) # Add errors in assignment examples aa_err = aa.copy() aa_erroned = add_errors_in_assignments(aa_err, model.categories, pcerrors / 100) gi_worst = AlternativePerformances('worst', {c.id: 0 for c in model.criteria}) gi_best = AlternativePerformances('best', {c.id: 1 for c in model.criteria}) css = CriteriaValues([]) for c in model.criteria: cs = CriterionValue(c.id, ns) css.append(cs) # Run linear program t1 = time.time() lp = LpAVFSort(model.criteria, css, model.categories_profiles.to_categories(), gi_worst, gi_best) t2 = time.time() obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt) t3 = time.time() model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l) # Compute new assignment and classification accuracy aa2 = model2.get_assignments(pt) ok = ok_errors = ok2 = ok2_errors = 0 for alt in a: if aa_err(alt.id) == aa2(alt.id): ok2 += 1 if alt.id in aa_erroned: ok2_errors += 1 if aa(alt.id) == aa2(alt.id): ok += 1 if alt.id in aa_erroned: ok_errors += 1 total = len(a) ca2 = ok2 / total ca2_errors = ok2_errors / total ca = ok / total ca_errors = ok_errors / total # Perform the generalization a_gen = generate_alternatives(na_gen) pt_gen = generate_random_performance_table(a_gen, model.criteria) aa = model.pessimist(pt_gen) aa2 = model2.get_assignments(pt_gen) ca_gen = compute_ca(aa, aa2) # Save all infos in test_result class t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns, na_gen, pcerrors)) # Input params t['seed'] = seed t['na'] = na t['nc'] = nc t['ncat'] = ncat t['ns'] = ns t['na_gen'] = na_gen t['pcerrors'] = pcerrors # Output params t['obj'] = obj t['ca'] = ca t['ca_errors'] = ca_errors t['ca2'] = ca2 t['ca2_errors'] = ca2_errors t['ca_gen'] = ca_gen t['t_total'] = t3 - t1 t['t_const'] = t2 - t1 t['t_solve'] = t3 - t2 return t
def test001(self): c = generate_criteria(5) w1 = CriterionValue('c1', 0.2) w2 = CriterionValue('c2', 0.2) w3 = CriterionValue('c3', 0.2) w4 = CriterionValue('c4', 0.2) w5 = CriterionValue('c5', 0.2) w = CriteriaValues([w1, w2, w3, w4, w5]) b1 = AlternativePerformances('b1', {'c1': 10, 'c2': 10, 'c3': 10, 'c4': 10, 'c5': 10}) bpt = PerformanceTable([b1]) cat = generate_categories(2) cps = generate_categories_profiles(cat) vb1 = AlternativePerformances('b1', {'c1': 2, 'c2': 2, 'c3': 2, 'c4': 2, 'c5': 2}, 'b1') v = PerformanceTable([vb1]) vw = w.copy() a1 = AlternativePerformances('a1', {'c1': 9, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11}) a2 = AlternativePerformances('a2', {'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9}) a3 = AlternativePerformances('a3', {'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11}) a4 = AlternativePerformances('a4', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9}) a5 = AlternativePerformances('a5', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11}) a6 = AlternativePerformances('a6', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9}) a7 = AlternativePerformances('a7', {'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11}) a8 = AlternativePerformances('a8', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9}) a9 = AlternativePerformances('a9', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11}) a10 = AlternativePerformances('a10', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9}) a11 = AlternativePerformances('a11', {'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11}) a12 = AlternativePerformances('a12', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9}) a13 = AlternativePerformances('a13', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11}) a14 = AlternativePerformances('a14', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9}) a15 = AlternativePerformances('a15', {'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11}) a16 = AlternativePerformances('a16', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 9}) a17 = AlternativePerformances('a17', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11}) a18 = AlternativePerformances('a18', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9}) a19 = AlternativePerformances('a19', {'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11}) a20 = AlternativePerformances('a20', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9}) a21 = AlternativePerformances('a21', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11}) a22 = AlternativePerformances('a22', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9}) a23 = AlternativePerformances('a23', {'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11}) a24 = AlternativePerformances('a24', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9}) a25 = AlternativePerformances('a25', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11}) a26 = AlternativePerformances('a26', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9}) a27 = AlternativePerformances('a27', {'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11}) a28 = AlternativePerformances('a28', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9}) a29 = AlternativePerformances('a29', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11}) a30 = AlternativePerformances('a30', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9}) a31 = AlternativePerformances('a31', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7}) a32 = AlternativePerformances('a32', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11}) a33 = AlternativePerformances('a33', {'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11}) a34 = AlternativePerformances('a34', {'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11}) a35 = AlternativePerformances('a35', {'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11}) a36 = AlternativePerformances('a36', {'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 7}) a37 = AlternativePerformances('a37', {'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 7}) a38 = AlternativePerformances('a38', {'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 7}) a39 = AlternativePerformances('a39', {'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7}) a40 = AlternativePerformances('a40', {'c1': 11, 'c2': 11, 'c3': 7, 'c4': 7, 'c5': 11}) a41 = AlternativePerformances('a41', {'c1': 11, 'c2': 7, 'c3': 11, 'c4': 7, 'c5': 11}) a42 = AlternativePerformances('a42', {'c1': 7, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11}) a43 = AlternativePerformances('a43', {'c1': 11, 'c2': 7, 'c3': 7, 'c4': 11, 'c5': 11}) a44 = AlternativePerformances('a44', {'c1': 7, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11}) a45 = AlternativePerformances('a45', {'c1': 7, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11}) pt = PerformanceTable([a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45]) ap1 = AlternativeAssignment('a1', 'cat2') ap2 = AlternativeAssignment('a2', 'cat2') ap3 = AlternativeAssignment('a3', 'cat2') ap4 = AlternativeAssignment('a4', 'cat2') ap5 = AlternativeAssignment('a5', 'cat2') ap6 = AlternativeAssignment('a6', 'cat2') ap7 = AlternativeAssignment('a7', 'cat1') ap8 = AlternativeAssignment('a8', 'cat2') ap9 = AlternativeAssignment('a9', 'cat2') ap10 = AlternativeAssignment('a10', 'cat2') ap11 = AlternativeAssignment('a11', 'cat1') ap12 = AlternativeAssignment('a12', 'cat2') ap13 = AlternativeAssignment('a13', 'cat1') ap14 = AlternativeAssignment('a14', 'cat1') ap15 = AlternativeAssignment('a15', 'cat1') ap16 = AlternativeAssignment('a16', 'cat2') ap17 = AlternativeAssignment('a17', 'cat2') ap18 = AlternativeAssignment('a18', 'cat2') ap19 = AlternativeAssignment('a19', 'cat1') ap20 = AlternativeAssignment('a20', 'cat2') ap21 = AlternativeAssignment('a21', 'cat1') ap22 = AlternativeAssignment('a22', 'cat1') ap23 = AlternativeAssignment('a23', 'cat1') ap24 = AlternativeAssignment('a24', 'cat2') ap25 = AlternativeAssignment('a25', 'cat1') ap26 = AlternativeAssignment('a26', 'cat1') ap27 = AlternativeAssignment('a27', 'cat1') ap28 = AlternativeAssignment('a28', 'cat1') ap29 = AlternativeAssignment('a29', 'cat1') ap30 = AlternativeAssignment('a30', 'cat1') ap31 = AlternativeAssignment('a31', 'cat1') ap32 = AlternativeAssignment('a32', 'cat1') ap33 = AlternativeAssignment('a33', 'cat1') ap34 = AlternativeAssignment('a34', 'cat1') ap35 = AlternativeAssignment('a35', 'cat1') ap36 = AlternativeAssignment('a36', 'cat2') ap37 = AlternativeAssignment('a37', 'cat2') ap38 = AlternativeAssignment('a38', 'cat2') ap39 = AlternativeAssignment('a39', 'cat2') ap40 = AlternativeAssignment('a40', 'cat2') ap41 = AlternativeAssignment('a41', 'cat2') ap42 = AlternativeAssignment('a42', 'cat2') ap43 = AlternativeAssignment('a43', 'cat2') ap44 = AlternativeAssignment('a44', 'cat2') ap45 = AlternativeAssignment('a45', 'cat2') aa = AlternativesAssignments([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8, ap9, ap10, ap11, ap12, ap13, ap14, ap15, ap16, ap17, ap18, ap19, ap20, ap21, ap22, ap23, ap24, ap25, ap26, ap27, ap28, ap29, ap30, ap31, ap32, ap33, ap34, ap35, ap36, ap37, ap38, ap39, ap40, ap41, ap42, ap43, ap44, ap45]) model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4) aa2 = model.pessimist(pt) ok = compare_assignments(aa, aa2) self.assertEqual(ok, 1, "One or more alternatives were wrongly " "assigned")
print('==============') print("Number of alternatives: %d" % len(a)) print('Criteria weights:') cv.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display() print("Errors in alternatives assignments: %g %%" \ % (len(aa_erroned) / len(a) * 100)) # Learn the parameters from assignment examples gi_worst = pt.get_worst(c) gi_best = pt.get_best(c) css = CriteriaValues([]) for cf in cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) lp = LpAVFSort(c, css, cat, gi_worst, gi_best) obj, cvs, cfs, catv = lp.solve(aa_err, pt) print('=============') print('Learned model') print('=============') print('Criteria weights:') cvs.display() print('Criteria functions:') cfs.display() print('Categories values:')
print('==============') print("Number of alternatives: %d" % len(a)) print('Criteria weights:') cv.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display() print("Errors in alternatives assignments: %g %%" \ % (len(aa_erroned) / len(a) * 100)) # Learn the parameters from assignment examples gi_worst = pt.get_worst(c) gi_best = pt.get_best(c) css = CriteriaValues([]) for cf in cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) lp = LpAVFSortCompat(c, css, cat, gi_worst, gi_best) obj, cvs, cfs, catv = lp.solve(aa_err, pt) print('=============') print('Learned model') print('=============') print('Criteria weights:') cvs.display() print('Criteria functions:') cfs.display() print('Categories values:')