def get_veto_profile_upper_limit(self, bid): index = self.profiles.index(bid) if index == (len(self.profiles) - 1): return self.bpt[self.profiles[index]] bp = self.bpt[bid] vp = self.vpt[self.profiles[index + 1]] ap = AlternativePerformances(bid, {}) for crit in bp.performances.keys(): direction = self.criteria[crit].direction bperf = bp.performances[crit] * direction vperf = vp.performances[crit] * direction ap.performances[crit] = min(bperf, vperf) * direction return ap
def get_profile_lower_limit(self, bid): index = self.profiles.index(bid) if self.vpt is None: if index == 0: return None else: return self.bpt[self.profiles[index - 1]] if index == 0: return self.vpt[bid] bp = self.bpt[self.profiles[index - 1]] vp = self.vpt[self.profiles[index]] ap = AlternativePerformances(bid, {}) for crit in bp.performances.keys(): direction = self.criteria[crit].direction bperf = bp.performances[crit] * direction vperf = vp.performances[crit] * direction ap.performances[crit] = max(bperf, vperf) * direction return ap
import time import random from pymcda.generate import generate_alternatives from pymcda.generate import generate_random_performance_table from pymcda.generate import generate_random_criteria_weights from pymcda.generate import generate_random_mrsort_model_with_coalition_veto from pymcda.utils import compute_winning_and_loosing_coalitions from pymcda.utils import compute_confusion_matrix, print_confusion_matrix from pymcda.types import AlternativePerformances from pymcda.ui.graphic import display_electre_tri_models # Generate a random ELECTRE TRI BM model model = generate_random_mrsort_model_with_coalition_veto(7, 2, 5, veto_weights = True) # model = generate_random_mrsort_model(7, 2, 1) worst = AlternativePerformances("worst", {c.id: 0 for c in model.criteria}) best = AlternativePerformances("best", {c.id: 1 for c in model.criteria}) # Generate a set of alternatives a = generate_alternatives(1000) pt = generate_random_performance_table(a, model.criteria) aa = model.get_assignments(pt) nmeta = 20 nloops = 10 print('Original model') print('==============') cids = model.criteria.keys() model.bpt.display(criterion_ids = cids)
print('==============') print('Original model') print('==============') print("Number of alternatives: %d" % len(a)) print('Criteria weights:') cv.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display() print("Errors in alternatives assignments: %g %%" \ % (len(aa_erroned) / len(a) * 100)) # Learn the parameters from assignment examples gi_worst = AlternativePerformances('worst', {crit.id: 0 for crit in c}) gi_best = AlternativePerformances('best', {crit.id: 1 for crit in c}) lp = LpAVFSort2(cat, gi_worst, gi_best) obj, cvs, cfs, catv = lp.solve(aa_err, pt) print('=============') print('Learned model') print('=============') print('Criteria weights:') cvs.display() print('Criteria functions:') cfs.display() print('Categories values:') catv.display()
m.cat_values.display() display_utadis_model(m.cfs) sys.exit(0) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(sizePolicy.hasHeightForWidth()) layout = QtGui.QGridLayout() for m in models: worst = AlternativePerformances("worst", {crit.id: 0 for crit in m.criteria}) best = AlternativePerformances("best", {crit.id: 1 for crit in m.criteria}) view = _MyGraphicsview() graph = QGraphicsSceneEtri(m, worst, best, view.size()) if m.veto is not None: for veto in m.veto: vb = m.bpt[veto.id] - veto graph.plot_alternative_performances(vb) view.setRenderHint(QtGui.QPainter.Antialiasing) view.setSizePolicy(sizePolicy) view.setScene(graph) layout.addWidget(view)
def get_best_ap(self): a = AlternativePerformances('best', {}) for cid in self.cids: a.performances[cid] = self.sorted_values[cid][-1] return a
from pymcda.generate import generate_criteria from pymcda.generate import generate_categories from pymcda.generate import generate_categories_profiles from pymcda.generate import generate_alternatives from pymcda.generate import generate_random_performance_table from pymcda.types import AlternativePerformances, PerformanceTable from pymcda.types import CriterionValue, CriteriaValues, CriteriaSet from pymcda.electre_tri import MRSort random.seed(0) c = generate_criteria(5) cat = generate_categories(3) cps = generate_categories_profiles(cat) bp1 = AlternativePerformances('b1', {'c1': 0.75, 'c2': 0.75, 'c3': 0.75, 'c4': 0.75, 'c5': 0.75}) bp2 = AlternativePerformances('b2', {'c1': 0.25, 'c2': 0.25, 'c3': 0.25, 'c4': 0.25, 'c5': 0.25}) bpt = PerformanceTable([bp1, bp2]) cv1 = CriterionValue('c1', 0.2) cv2 = CriterionValue('c2', 0.2) cv3 = CriterionValue('c3', 0.2) cv4 = CriterionValue('c4', 0.2) cv5 = CriterionValue('c5', 0.2) cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1) cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.1) cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1) cv15 = CriterionValue(CriteriaSet(['c1', 'c5']), 0.1) cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1) cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1)
from pymcda.utils import compute_number_of_winning_coalitions from pymcda.pt_sorted import SortedPerformanceTable from pymcda.ui.graphic import display_electre_tri_models from pymcda.electre_tri import MRSort from pymcda.types import CriterionValue, CriteriaValues from pymcda.types import AlternativePerformances, PerformanceTable from pymcda.types import AlternativeAssignment, AlternativesAssignments # Generate a random ELECTRE TRI BM model random.seed(127890123456789) ncriteria = 5 model = MRSort() model.criteria = generate_criteria(ncriteria) model.cv = CriteriaValues([CriterionValue('c%d' % (i + 1), 0.2) for i in range(ncriteria)]) b1 = AlternativePerformances('b1', {'c%d' % (i + 1): 0.5 for i in range(ncriteria)}) model.bpt = PerformanceTable([b1]) cat = generate_categories(2) model.categories_profiles = generate_categories_profiles(cat) model.lbda = 0.6 vb1 = AlternativePerformances('b1', {'c%d' % (i + 1): random.uniform(0,0.4) for i in range(ncriteria)}) model.veto = PerformanceTable([vb1]) model.veto_weights = model.cv.copy() model.veto_lbda = 0.4 # Generate a set of alternatives a = generate_alternatives(1000) pt = generate_random_performance_table(a, model.criteria) aa = model.pessimist(pt)
print("\n\nTest set") print("========") print("CA : %g" % ca_test) print("AUC: %g" % auc_test) print("Confusion table:") matrix = compute_confusion_matrix(aa_test_m1, aa_test_m2, m2.categories) print_confusion_matrix(matrix, m2.categories) aids = [a.id for a in aa_test_m1 \ if aa_test_m1[a.id].category_id != aa_test_m2[a.id].category_id] if len(aids) > 0: print("List of alternatives wrongly assigned:") print_pt_and_assignments(aids, None, [aa_test_m1, aa_test_m2], pt_test) if type(m2) == MRSort: worst = AlternativePerformances('worst', {c.id: 0 for c in m2.criteria}) best = AlternativePerformances('best', {c.id: 1 for c in m2.criteria}) categories = m2.categories a_learning = aa_learning_m1.keys() pt_learning_ok = [] pt_learning_too_low = [] pt_learning_too_high = [] for a in a_learning: i1 = categories.index(aa_learning_m1[a].category_id) i2 = categories.index(aa_learning_m2[a].category_id) if i1 == i2: pt_learning_ok.append(pt_learning[a]) elif i1 < i2: pt_learning_too_high.append(pt_learning[a])
# Actions a1 = Alternative('a1', 'a1') a2 = Alternative('a2', 'a2') a3 = Alternative('a3', 'a3') a4 = Alternative('a4', 'a4') a5 = Alternative('a5', 'a5') a6 = Alternative('a6', 'a6') a7 = Alternative('a7', 'a7') a = Alternatives([a1, a2, a3, a4, a5, a6, a7]) # Performance table p1 = AlternativePerformances('a1', { 'prix': 120, 'transport': 284, 'envir': 5, 'residents': 3.5, 'competition': 18 }) p2 = AlternativePerformances('a2', { 'prix': 150, 'transport': 269, 'envir': 2, 'residents': 4.5, 'competition': 24 }) p3 = AlternativePerformances('a3', { 'prix': 100, 'transport': 413, 'envir': 4, 'residents': 5.5,
def test001(self): c = generate_criteria(5) w1 = CriterionValue('c1', 0.2) w2 = CriterionValue('c2', 0.2) w3 = CriterionValue('c3', 0.2) w4 = CriterionValue('c4', 0.2) w5 = CriterionValue('c5', 0.2) w = CriteriaValues([w1, w2, w3, w4, w5]) b1 = AlternativePerformances('b1', { 'c1': 10, 'c2': 10, 'c3': 10, 'c4': 10, 'c5': 10 }) bpt = PerformanceTable([b1]) cat = generate_categories(2) cps = generate_categories_profiles(cat) vb1 = AlternativePerformances('b1', { 'c1': 2, 'c2': 2, 'c3': 2, 'c4': 2, 'c5': 2 }, 'b1') v = PerformanceTable([vb1]) vw = w.copy() a1 = AlternativePerformances('a1', { 'c1': 9, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11 }) a2 = AlternativePerformances('a2', { 'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9 }) a3 = AlternativePerformances('a3', { 'c1': 9, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11 }) a4 = AlternativePerformances('a4', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9 }) a5 = AlternativePerformances('a5', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11 }) a6 = AlternativePerformances('a6', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9 }) a7 = AlternativePerformances('a7', { 'c1': 9, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11 }) a8 = AlternativePerformances('a8', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9 }) a9 = AlternativePerformances('a9', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11 }) a10 = AlternativePerformances('a10', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9 }) a11 = AlternativePerformances('a11', { 'c1': 9, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11 }) a12 = AlternativePerformances('a12', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9 }) a13 = AlternativePerformances('a13', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11 }) a14 = AlternativePerformances('a14', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9 }) a15 = AlternativePerformances('a15', { 'c1': 9, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11 }) a16 = AlternativePerformances('a16', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 9 }) a17 = AlternativePerformances('a17', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 9, 'c5': 11 }) a18 = AlternativePerformances('a18', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 9 }) a19 = AlternativePerformances('a19', { 'c1': 11, 'c2': 9, 'c3': 9, 'c4': 11, 'c5': 11 }) a20 = AlternativePerformances('a20', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 9 }) a21 = AlternativePerformances('a21', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 9, 'c5': 11 }) a22 = AlternativePerformances('a22', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 9 }) a23 = AlternativePerformances('a23', { 'c1': 11, 'c2': 9, 'c3': 11, 'c4': 11, 'c5': 11 }) a24 = AlternativePerformances('a24', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 9 }) a25 = AlternativePerformances('a25', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 9, 'c5': 11 }) a26 = AlternativePerformances('a26', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 9 }) a27 = AlternativePerformances('a27', { 'c1': 11, 'c2': 11, 'c3': 9, 'c4': 11, 'c5': 11 }) a28 = AlternativePerformances('a28', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 9 }) a29 = AlternativePerformances('a29', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 9, 'c5': 11 }) a30 = AlternativePerformances('a30', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 9 }) a31 = AlternativePerformances('a31', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7 }) a32 = AlternativePerformances('a32', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11 }) a33 = AlternativePerformances('a33', { 'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11 }) a34 = AlternativePerformances('a34', { 'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11 }) a35 = AlternativePerformances('a35', { 'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 11 }) a36 = AlternativePerformances('a36', { 'c1': 11, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 7 }) a37 = AlternativePerformances('a37', { 'c1': 11, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 7 }) a38 = AlternativePerformances('a38', { 'c1': 11, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 7 }) a39 = AlternativePerformances('a39', { 'c1': 7, 'c2': 11, 'c3': 11, 'c4': 11, 'c5': 7 }) a40 = AlternativePerformances('a40', { 'c1': 11, 'c2': 11, 'c3': 7, 'c4': 7, 'c5': 11 }) a41 = AlternativePerformances('a41', { 'c1': 11, 'c2': 7, 'c3': 11, 'c4': 7, 'c5': 11 }) a42 = AlternativePerformances('a42', { 'c1': 7, 'c2': 11, 'c3': 11, 'c4': 7, 'c5': 11 }) a43 = AlternativePerformances('a43', { 'c1': 11, 'c2': 7, 'c3': 7, 'c4': 11, 'c5': 11 }) a44 = AlternativePerformances('a44', { 'c1': 7, 'c2': 11, 'c3': 7, 'c4': 11, 'c5': 11 }) a45 = AlternativePerformances('a45', { 'c1': 7, 'c2': 7, 'c3': 11, 'c4': 11, 'c5': 11 }) pt = PerformanceTable([ a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41, a42, a43, a44, a45 ]) ap1 = AlternativeAssignment('a1', 'cat2') ap2 = AlternativeAssignment('a2', 'cat2') ap3 = AlternativeAssignment('a3', 'cat2') ap4 = AlternativeAssignment('a4', 'cat2') ap5 = AlternativeAssignment('a5', 'cat2') ap6 = AlternativeAssignment('a6', 'cat2') ap7 = AlternativeAssignment('a7', 'cat1') ap8 = AlternativeAssignment('a8', 'cat2') ap9 = AlternativeAssignment('a9', 'cat2') ap10 = AlternativeAssignment('a10', 'cat2') ap11 = AlternativeAssignment('a11', 'cat1') ap12 = AlternativeAssignment('a12', 'cat2') ap13 = AlternativeAssignment('a13', 'cat1') ap14 = AlternativeAssignment('a14', 'cat1') ap15 = AlternativeAssignment('a15', 'cat1') ap16 = AlternativeAssignment('a16', 'cat2') ap17 = AlternativeAssignment('a17', 'cat2') ap18 = AlternativeAssignment('a18', 'cat2') ap19 = AlternativeAssignment('a19', 'cat1') ap20 = AlternativeAssignment('a20', 'cat2') ap21 = AlternativeAssignment('a21', 'cat1') ap22 = AlternativeAssignment('a22', 'cat1') ap23 = AlternativeAssignment('a23', 'cat1') ap24 = AlternativeAssignment('a24', 'cat2') ap25 = AlternativeAssignment('a25', 'cat1') ap26 = AlternativeAssignment('a26', 'cat1') ap27 = AlternativeAssignment('a27', 'cat1') ap28 = AlternativeAssignment('a28', 'cat1') ap29 = AlternativeAssignment('a29', 'cat1') ap30 = AlternativeAssignment('a30', 'cat1') ap31 = AlternativeAssignment('a31', 'cat1') ap32 = AlternativeAssignment('a32', 'cat1') ap33 = AlternativeAssignment('a33', 'cat1') ap34 = AlternativeAssignment('a34', 'cat1') ap35 = AlternativeAssignment('a35', 'cat1') ap36 = AlternativeAssignment('a36', 'cat2') ap37 = AlternativeAssignment('a37', 'cat2') ap38 = AlternativeAssignment('a38', 'cat2') ap39 = AlternativeAssignment('a39', 'cat2') ap40 = AlternativeAssignment('a40', 'cat2') ap41 = AlternativeAssignment('a41', 'cat2') ap42 = AlternativeAssignment('a42', 'cat2') ap43 = AlternativeAssignment('a43', 'cat2') ap44 = AlternativeAssignment('a44', 'cat2') ap45 = AlternativeAssignment('a45', 'cat2') aa = AlternativesAssignments([ ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8, ap9, ap10, ap11, ap12, ap13, ap14, ap15, ap16, ap17, ap18, ap19, ap20, ap21, ap22, ap23, ap24, ap25, ap26, ap27, ap28, ap29, ap30, ap31, ap32, ap33, ap34, ap35, ap36, ap37, ap38, ap39, ap40, ap41, ap42, ap43, ap44, ap45 ]) model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4) aa2 = model.pessimist(pt) ok = compare_assignments(aa, aa2) self.assertEqual(ok, 1, "One or more alternatives were wrongly " "assigned")
def generate_best_ap(crits, value=1): return AlternativePerformances( "best", {c.id: value if c.direction == 1 else 0 for c in crits})
def generate_worst_ap(crits, value=0): return AlternativePerformances( "worst", {c.id: value if c.direction == 1 else 1 for c in crits})