def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() m = ['m_%d' % i for i in range(len(self.mindices))] mindices_map = dict(zip(self.mindices, m)) for m, vname in mindices_map.items(): cv = CriterionValue() if len(m) > 1: cv.id = CriteriaSet(m) else: cv.id = next(iter(m)) cv.value = self.lp.solution.get_values(vname) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() m = ['m_%d' % i for i in range(len(self.mindices))] mindices_map = dict(zip(self.mindices, m)) for m, vname in mindices_map.items(): cv = CriterionValue() if len(m) > 1: cv.id = CriteriaSet(m) else: cv.id = next(iter(m)) cv.value = self.lp.solution.get_values(vname) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") return obj
def generate_random_criteria_weights_msjp(crits, seed=None, k=3, fixed_w1=None): if seed is not None: random.seed(seed) if fixed_w1 is None: weights = [random.random() for i in range(len(crits) - 1)] else: weights = [ round(random.uniform(0, 1 - fixed_w1), k) for i in range(len(crits) - 2) ] weights = [(fixed_w1 + i) for i in weights] weights = weights + [round(fixed_w1, k)] weights.sort() cvals = CriteriaValues() for i, c in enumerate(crits): cval = CriterionValue() cval.id = c.id if i == 0: cval.value = round(weights[i], k) elif i == len(crits) - 1: cval.value = round(1 - weights[i - 1], k) else: cval.value = round(weights[i] - weights[i - 1], k) cvals.append(cval) return cvals
def solve_glpk(self): self.lp.solvopt(method='exact', integer='advanced') self.lp.solve() status = self.lp.status() if status != 'opt': raise RuntimeError("Solver status: %s" % self.lp.status()) #print(self.lp.reportKKT()) obj = self.lp.vobj() cvs = CriteriaValues() for c in self.criteria: cv = CriterionValue() cv.id = c.id cv.value = float(self.w[c.id].primal) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lbda.primal pt = PerformanceTable() for p in self.__profiles: ap = AlternativePerformances(p) for c in self.criteria: perf = self.g[p][c.id].primal ap.performances[c.id] = round(perf, 5) pt.append(ap) self.model.bpt = pt self.model.bpt.update_direction(self.model.criteria) return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w_' + c.id) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") pt = PerformanceTable() for p in self.__profiles: ap = AlternativePerformances(p) for c in self.criteria: perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id)) ap.performances[c.id] = round(perf, 5) pt.append(ap) self.model.bpt = pt self.model.bpt.update_direction(self.model.criteria) return obj
def generate_random_capacities(criteria, seed = None, k = 3): if seed is not None: random.seed(seed) n = len(criteria) r = [round(random.random(), k) for i in range(2 ** n - 2)] + [1.0] r.sort() j = 0 cvs = CriteriaValues() for i in range(1, n + 1): combis = [c for c in combinations(criteria.keys(), i)] random.shuffle(combis) for combi in combis: if i == 1: cid = combi[0] else: cid = CriteriaSet(combi) cv = CriterionValue(cid, r[j]) cvs.append(cv) j += 1 return cvs
def solve(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w_' + c.id) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") pt = PerformanceTable() for p in self.__profiles: ap = AlternativePerformances(p) for c in self.criteria: perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id)) ap.performances[c.id] = round(perf, 5) pt.append(ap) self.model.bpt = pt self.model.bpt.update_direction(self.model.criteria) wv = CriteriaValues() for c in self.criteria: w = CriterionValue() w.id = c.id w.value = self.lp.solution.get_values('z_' + c.id) wv.append(w) self.model.veto_weights = wv self.model.veto_lbda = self.lp.solution.get_values("LAMBDA") v = PerformanceTable() for p in self.__profiles: vp = AlternativePerformances(p, {}) for c in self.criteria: perf = self.lp.solution.get_values('v_%s_%s' % (p, c.id)) vp.performances[c.id] = round(perf, 5) v.append(vp) self.model.veto = v return obj
def solve_cplex(self, aa, pt): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cfs = CriteriaFunctions() cvs = CriteriaValues() for cs in self.cs: cv = CriterionValue(cs.id, 1) cvs.append(cv) nseg = cs.value x_points = range(nseg) p1 = Point(self.points[cs.id][0], 0) ui = 0 f = PiecewiseLinear([]) for i in x_points: uivar = 'w_' + cs.id + "_%d" % (i + 1) ui += self.lp.solution.get_values(uivar) x = self.points[cs.id][i + 1] p2 = Point(x, ui) s = Segment("s%d" % (i + 1), p1, p2) f.append(s) p1 = p2 s.p1_in = True s.p2_in = True cf = CriterionFunction(cs.id, f) cfs.append(cf) cat = {v: k for k, v in self.cat.items()} catv = CategoriesValues() ui_a = 0 for i in range(1, len(cat)): ui_b = self.lp.solution.get_values("u_%d" % i) catv.append(CategoryValue(cat[i], Interval(ui_a, ui_b))) ui_a = ui_b catv.append(CategoryValue(cat[i + 1], Interval(ui_a, 1))) return obj, cvs, cfs, catv
def capacities_to_mobius(criteria, capacities): cvs = CriteriaValues() n = len(criteria) for i in range(1, n + 1): for combi in [c for c in combinations(criteria.keys(), i)]: cid = combi[0] if (i == 1) else CriteriaSet(combi) m = capacities[cid].value for j in range(1, i): for combi2 in [c for c in combinations(combi, j)]: cidj = combi2[0] if (j == 1) else CriteriaSet(combi2) m -= cvs[cidj].value cv = CriterionValue(cid, m) cvs.append(cv) return cvs
def mobius_to_capacities(criteria, mobius): cvs = CriteriaValues() n = len(criteria) for i in range(1, n + 1): for combi in [c for c in combinations(criteria.keys(), i)]: cid = combi[0] if (i == 1) else CriteriaSet(combi) v = 0 for j in range(1, i + 1): for combi2 in [c for c in combinations(combi, j)]: cidj = combi2[0] if (j == 1) else CriteriaSet(combi2) if cidj in mobius: v += mobius[cidj].value cv = CriterionValue(cid, v) cvs.append(cv) return cvs
def generate_random_criteria_values(crits, seed = None, k = 3, type = 'float', vmin = 0, vmax = 1): if seed is not None: random.seed(seed) cvals = CriteriaValues() for c in crits: cval = CriterionValue() cval.id = c.id if type == 'integer': cval.value = random.randint(vmin, vmax) else: cval.value = round(random.uniform(vmin, vmax), k) cvals.append(cval) return cvals
def solve_glpk(self, aa, pt): self.lp.solve() status = self.lp.status() if status != 'opt': raise RuntimeError("Solver status: %s" % self.lp.status()) obj = self.lp.vobj() cfs = CriteriaFunctions() cvs = CriteriaValues() for cid, points in self.points.items(): cv = CriterionValue(cid, 1) cvs.append(cv) p1 = Point(self.points[cid][0], 0) ui = 0 f = PiecewiseLinear([]) for i in range(len(points) - 1): uivar = 'w_' + cid + "_%d" % (i + 1) ui += self.w[cid][i].primal p2 = Point(self.points[cid][i + 1], ui) s = Segment(p1, p2) f.append(s) p1 = p2 s.p2_in = True cf = CriterionFunction(cid, f) cfs.append(cf) cat = {v: k for k, v in self.cat.items()} catv = CategoriesValues() ui_a = 0 for i in range(0, len(cat) - 1): ui_b = self.u[i].primal catv.append(CategoryValue(cat[i + 1], Interval(ui_a, ui_b))) ui_a = ui_b catv.append(CategoryValue(cat[i + 2], Interval(ui_a, 1))) return obj, cvs, cfs, catv
def solve_scip(self): solution = self.lp.minimize(objective=self.obj) if solution is None: raise RuntimeError("No solution found") obj = solution.objective cvs = CriteriaValues() for c in self.model.criteria: cv = CriterionValue() cv.id = c.id cv.value = solution[self.w[c.id]] cvs.append(cv) self.model.cv = cvs self.model.lbda = solution[self.lbda] return obj
def on_button_run(self): if hasattr(self, 'started') and self.started is True: self.thread.stop() return if not hasattr(self, 'model'): self.on_button_generate() if self.combobox_type.currentIndex() == COMBO_AVFSORT: self.init_results_avf() ns = self.spinbox_nsegments.value() css = CriteriaValues([]) for c in self.model.criteria: cs = CriterionValue(c.id, ns) css.append(cs) self.thread = qt_thread_avf(self.model.criteria, self.categories, self.worst, self.best, css, self.pt, self.aa, None) else: self.init_results_mr() nmodels = self.spinbox_nmodels.value() niter = self.spinbox_niter.value() nmeta = self.spinbox_nmeta.value() self.thread = qt_thread_mr(self.model.criteria, self.categories, self.worst, self.best, nmodels, niter, nmeta, self.pt, self.aa, None) self.connect(self.thread, QtCore.SIGNAL("update(int)"), self.update) self.connect(self.thread, QtCore.SIGNAL("finished()"), self.finished) self.label_time2.setText("") self.start_time = time.time() self.timer.start(100) self.thread.start() self.button_run.setText("Stop") self.groupbox_result.setVisible(True) self.started = True
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.model.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w'+c.id) cvs.append(cv) self.model.veto_weights = cvs self.model.veto_lbda = self.lp.solution.get_values("lambda") return obj
def solve_cplex(self): self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs = CriteriaValues() for c in self.model.criteria: cv = CriterionValue() cv.id = c.id cv.value = self.lp.solution.get_values('w' + c.id) cvs.append(cv) self.model.cv = cvs self.model.lbda = self.lp.solution.get_values("lambda") return obj
def solve_glpk(self): self.lp.solve() status = self.lp.status() if status != 'opt': raise RuntimeError("Solver status: %s" % self.lp.status()) #print(self.lp.reportKKT()) obj = self.lp.vobj() cvs = CriteriaValues() for j, c in enumerate(self.model.criteria): cv = CriterionValue() cv.id = c.id cv.value = float(self.w[j].primal) cvs.append(cv) self.model.cv = cvs self.model.lbda = float(self.lbda.primal) return obj
def generate_random_criteria_weights(crits, seed = None, k = 3): if seed is not None: random.seed(seed) weights = [ random.random() for i in range(len(crits) - 1) ] weights.sort() cvals = CriteriaValues() for i, c in enumerate(crits): cval = CriterionValue() cval.id = c.id if i == 0: cval.value = round(weights[i], k) elif i == len(crits) - 1: cval.value = round(1 - weights[i - 1], k) else: cval.value = round(weights[i] - weights[i - 1], k) cvals.append(cval) return cvals
def one_test(self, seed, na, nc, ncat, ns): u = generate_random_avfsort_model(nc, ncat, ns, ns, seed) a = generate_alternatives(na) pt = generate_random_performance_table(a, u.criteria) aa = u.get_assignments(pt) css = CriteriaValues([]) for cf in u.cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) cat = u.cat_values.to_categories() lp = LpAVFSort(u.criteria, css, cat, pt.get_worst(u.criteria), pt.get_best(u.criteria)) obj, cvs, cfs, catv = lp.solve(aa, pt) u2 = AVFSort(u.criteria, cvs, cfs, catv) aa2 = u2.get_assignments(pt) self.assertEqual(aa, aa2)
def solve_cplex(self): self.__add_variables_cplex() self.__add_constraints_cplex() self.__add_objective_cplex() self.lp.solve() status = self.lp.solution.get_status() if status != self.lp.solution.status.MIP_optimal: raise RuntimeError("Solver status: %s" % status) obj = self.lp.solution.get_objective_value() cvs2 = CriteriaValues() for cv in self.cvs: cv2 = CriterionValue(cv.id) cv2.value = int(self.lp.solution.get_values("w_%s" % cv.id)) cvs2.append(cv2) lbda2 = int(self.lp.solution.get_values("lambda")) return obj, cvs2, lbda2
def run_test(seed, data, pclearning, nseg): random.seed(seed) # Separate learning data and test data pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning]) aa_learning = data.aa.get_subset(pt_learning.keys()) aa_test = data.aa.get_subset(pt_test.keys()) worst = data.pt.get_worst(data.c) best = data.pt.get_best(data.c) # Run the linear program t1 = time.time() css = CriteriaValues([]) for c in data.c: cs = CriterionValue(c.id, nseg) css.append(cs) lp = LpAVFSort(data.c, css, data.cats, worst, best) obj, cvs, cfs, catv = lp.solve(aa_learning, pt_learning) t_total = time.time() - t1 model = AVFSort(data.c, cvs, cfs, catv) ordered_categories = model.categories # CA learning set aa_learning2 = model.get_assignments(pt_learning) ca_learning = compute_ca(aa_learning, aa_learning2) auc_learning = model.auc(aa_learning, pt_learning) diff_learning = compute_confusion_matrix(aa_learning, aa_learning2, ordered_categories) # Compute CA of test setting if len(aa_test) > 0: aa_test2 = model.get_assignments(pt_test) ca_test = compute_ca(aa_test, aa_test2) auc_test = model.auc(aa_test, pt_test) diff_test = compute_confusion_matrix(aa_test,aa_test2, ordered_categories) else: ca_test = 0 auc_test = 0 ncat = len(data.cats) diff_test = OrderedDict([((a, b), 0) for a in ordered_categories \ for b in ordered_categories]) # Compute CA of whole set aa2 = model.get_assignments(data.pt) ca = compute_ca(data.aa, aa2) auc = model.auc(data.aa, data.pt) diff_all = compute_confusion_matrix(data.aa, aa2, ordered_categories) t = test_result("%s-%d-%d-%d" % (data.name, seed, nseg, pclearning)) model.id = 'learned' aa_learning.id, aa_test.id = 'learning_set', 'test_set' pt_learning.id, pt_test.id = 'learning_set', 'test_set' save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning, aa_test, pt_learning, pt_test) t['seed'] = seed t['na'] = len(data.a) t['nc'] = len(data.c) t['ncat'] = len(data.cats) t['ns'] = nseg t['pclearning'] = pclearning t['na_learning'] = len(aa_learning) t['na_test'] = len(aa_test) t['obj'] = obj t['ca_learning'] = ca_learning t['ca_test'] = ca_test t['ca_all'] = ca t['auc_learning'] = auc_learning t['auc_test'] = auc_test t['auc_all'] = auc for k, v in diff_learning.items(): t['learn_%s_%s' % (k[0], k[1])] = v for k, v in diff_test.items(): t['test_%s_%s' % (k[0], k[1])] = v for k, v in diff_all.items(): t['all_%s_%s' % (k[0], k[1])] = v t['t_total'] = t_total return t
cv.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display() print("Errors in alternatives assignments: %g %%" \ % (len(aa_erroned) / len(a) * 100)) # Learn the parameters from assignment examples gi_worst = pt.get_worst(c) gi_best = pt.get_best(c) css = CriteriaValues([]) for cf in cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) lp = LpAVFSort(c, css, cat, gi_worst, gi_best) obj, cvs, cfs, catv = lp.solve(aa_err, pt) print('=============') print('Learned model') print('=============') print('Criteria weights:') cvs.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display() u2 = AVFSort(c, cvs, cfs, catv)
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors): # Generate a random UTADIS model and assignment examples model = generate_random_avfsort_model(nc, ncat, ns, ns) model.set_equal_weights() cat = model.cat_values.to_categories() a = generate_alternatives(na) pt = generate_random_performance_table(a, model.criteria) aa = model.get_assignments(pt) # Add errors in assignment examples aa_err = aa.copy() aa_erroned = add_errors_in_assignments_proba(aa_err, cat.keys(), pcerrors / 100) na_err = len(aa_erroned) gi_worst = AlternativePerformances('worst', {crit.id: 0 for crit in model.criteria}) gi_best = AlternativePerformances('best', {crit.id: 1 for crit in model.criteria}) css = CriteriaValues([]) for cf in model.cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) # Run linear program t1 = time.time() lp = LpAVFSort(model.criteria, css, cat, gi_worst, gi_best) t2 = time.time() obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt) t3 = time.time() model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l) # Compute new assignment and classification accuracy aa2 = model2.get_assignments(pt) ok = ok_errors = ok2 = ok2_errors = altered = 0 for alt in a: if aa_err(alt.id) == aa2(alt.id): ok2 += 1 if alt.id in aa_erroned: ok2_errors += 1 if aa(alt.id) == aa2(alt.id): ok += 1 if alt.id in aa_erroned: ok_errors += 1 elif alt.id not in aa_erroned: altered += 1 total = len(a) ca2 = ok2 / total ca2_errors = ok2_errors / total ca = ok / total ca_errors = ok_errors / total # Perform the generalization a_gen = generate_alternatives(na_gen) pt_gen = generate_random_performance_table(a_gen, model.criteria) aa_gen = model.get_assignments(pt_gen) aa_gen2 = model2.get_assignments(pt_gen) ca_gen = compute_ca(aa_gen, aa_gen2) aa_gen_err = aa_gen.copy() aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err, cat.keys(), pcerrors / 100) aa_gen2 = model2.get_assignments(pt_gen) ca_gen_err = compute_ca(aa_gen_err, aa_gen2) # Save all infos in test_result class t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns, na_gen, pcerrors)) # Input params t['seed'] = seed t['na'] = na t['nc'] = nc t['ncat'] = ncat t['ns'] = ns t['na_gen'] = na_gen t['pcerrors'] = pcerrors # Output params t['na_err'] = na_err t['obj'] = obj t['ca'] = ca t['ca_errors'] = ca_errors t['altered'] = altered t['ca2'] = ca2 t['ca2_errors'] = ca2_errors t['ca_gen'] = ca_gen t['ca_gen_err'] = ca_gen_err t['t_total'] = t3 - t1 t['t_const'] = t2 - t1 t['t_solve'] = t3 - t2 return t
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors): # Generate a random ELECTRE TRI model and assignment examples model = generate_random_mrsort_model(nc, ncat, seed) # Generate a first set of alternatives a = generate_alternatives(na) pt = generate_random_performance_table(a, model.criteria) aa = model.pessimist(pt) # Add errors in assignment examples aa_err = aa.copy() aa_erroned = add_errors_in_assignments(aa_err, model.categories, pcerrors / 100) gi_worst = AlternativePerformances('worst', {c.id: 0 for c in model.criteria}) gi_best = AlternativePerformances('best', {c.id: 1 for c in model.criteria}) css = CriteriaValues([]) for c in model.criteria: cs = CriterionValue(c.id, ns) css.append(cs) # Run linear program t1 = time.time() lp = LpAVFSort(model.criteria, css, model.categories_profiles.to_categories(), gi_worst, gi_best) t2 = time.time() obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt) t3 = time.time() model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l) # Compute new assignment and classification accuracy aa2 = model2.get_assignments(pt) ok = ok_errors = ok2 = ok2_errors = 0 for alt in a: if aa_err(alt.id) == aa2(alt.id): ok2 += 1 if alt.id in aa_erroned: ok2_errors += 1 if aa(alt.id) == aa2(alt.id): ok += 1 if alt.id in aa_erroned: ok_errors += 1 total = len(a) ca2 = ok2 / total ca2_errors = ok2_errors / total ca = ok / total ca_errors = ok_errors / total # Perform the generalization a_gen = generate_alternatives(na_gen) pt_gen = generate_random_performance_table(a_gen, model.criteria) aa = model.pessimist(pt_gen) aa2 = model2.get_assignments(pt_gen) ca_gen = compute_ca(aa, aa2) # Save all infos in test_result class t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns, na_gen, pcerrors)) # Input params t['seed'] = seed t['na'] = na t['nc'] = nc t['ncat'] = ncat t['ns'] = ns t['na_gen'] = na_gen t['pcerrors'] = pcerrors # Output params t['obj'] = obj t['ca'] = ca t['ca_errors'] = ca_errors t['ca2'] = ca2 t['ca2_errors'] = ca2_errors t['ca_gen'] = ca_gen t['t_total'] = t3 - t1 t['t_const'] = t2 - t1 t['t_solve'] = t3 - t2 return t
cv.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display() print("Errors in alternatives assignments: %g %%" \ % (len(aa_erroned) / len(a) * 100)) # Learn the parameters from assignment examples gi_worst = pt.get_worst(c) gi_best = pt.get_best(c) css = CriteriaValues([]) for cf in cfs: cs = CriterionValue(cf.id, len(cf.function)) css.append(cs) lp = LpAVFSortCompat(c, css, cat, gi_worst, gi_best) obj, cvs, cfs, catv = lp.solve(aa_err, pt) print('=============') print('Learned model') print('=============') print('Criteria weights:') cvs.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display() u2 = AVFSort(c, cvs, cfs, catv)