def generate_random_performance_table(alts,
                                      crits,
                                      seed=None,
                                      k=3,
                                      worst=None,
                                      best=None):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    for a in alts:
        perfs = {}
        for c in crits:
            if worst is None or best is None:
                rdom = round(random.random(), k)
            else:
                rdom = round(
                    random.uniform(worst.performances[c.id],
                                   best.performances[c.id]), k)

            perfs[c.id] = rdom

        ap = AlternativePerformances(a.id, perfs)
        pt.append(ap)

    return pt
Пример #2
0
def generate_binary_performance_table_and_assignments(criteria,
                                                      categories, fmins):
    cids = list(criteria.keys())
    cats = categories.get_ordered_categories()
    aa = AlternativesAssignments()
    pt = PerformanceTable()
    i = 1
    for coalition in powerset(cids):
        if set(coalition) == set({}) or set(coalition) == set(cids):
            continue

        aid = "a%d" % i
        ap = AlternativePerformances(aid,
                                     OrderedDict({c: 1 if c in coalition else 0
                                                  for c in cids}))
        pt.append(ap)

        cat = cats[0]
        for fmin in fmins:
            if fmin.issubset(set(coalition)) is True:
                cat = cats[1]
                break

        aa.append(AlternativeAssignment(aid, cat))

        i += 1

    return pt, aa
Пример #3
0
    def solve_cplex(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
Пример #4
0
    def solve_cplex(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
Пример #5
0
    def solve_glpk(self):
        self.lp.solvopt(method='exact', integer='advanced')
        self.lp.solve()

        status = self.lp.status()
        if status != 'opt':
            raise RuntimeError("Solver status: %s" % self.lp.status())

        #print(self.lp.reportKKT())
        obj = self.lp.vobj()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = float(self.w[c.id].primal)
            cvs.append(cv)

        self.model.cv = cvs
        self.model.lbda = self.lbda.primal

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.g[p][c.id].primal
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
Пример #6
0
    def solve_glpk(self):
        self.lp.solvopt(method='exact', integer='advanced')
        self.lp.solve()

        status = self.lp.status()
        if status != 'opt':
            raise RuntimeError("Solver status: %s" % self.lp.status())

        #print(self.lp.reportKKT())
        obj = self.lp.vobj()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = float(self.w[c.id].primal)
            cvs.append(cv)

        self.model.cv = cvs
        self.model.lbda = self.lbda.primal

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.g[p][c.id].primal
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
def duplicate_performance_table_msjp(pt,
                                     alts,
                                     crits,
                                     seed=None,
                                     k=3,
                                     worst=None,
                                     best=None,
                                     dupl_crits=None):
    if seed is not None:
        random.seed(seed)

    pt_dupl = PerformanceTable()
    tmp_ids = [i.id for i in dupl_crits]
    for ap in pt:
        ap_perfs = ap.performances
        for c in crits:
            #import pdb; pdb.set_trace()
            #            pt[c.id] = None
            #            perfs_dupl[c.id] = None
            if (c.id + "d") in tmp_ids:
                ap_perfs[(c.id + "d")] = ap_perfs[c.id]

        ap_dupl = AlternativePerformances(ap.altid, ap_perfs)
        #pt.append(ap)
        pt_dupl.append(ap_dupl)

    return pt_dupl
Пример #8
0
def generate_binary_performance_table_and_assignments(criteria, categories,
                                                      fmins):
    cids = list(criteria.keys())
    cats = categories.get_ordered_categories()
    aa = AlternativesAssignments()
    pt = PerformanceTable()
    i = 1
    for coalition in powerset(cids):
        if set(coalition) == set({}) or set(coalition) == set(cids):
            continue

        aid = "a%d" % i
        ap = AlternativePerformances(
            aid, OrderedDict({c: 1 if c in coalition else 0
                              for c in cids}))
        pt.append(ap)

        cat = cats[0]
        for fmin in fmins:
            if fmin.issubset(set(coalition)) is True:
                cat = cats[1]
                break

        aa.append(AlternativeAssignment(aid, cat))

        i += 1

    return pt, aa
Пример #9
0
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto
Пример #10
0
    def veto(self, veto):
        if veto is None:
            self.vpt = None
            return

        self.vpt = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - veto[bp.id]
            self.vpt.append(vbp)
Пример #11
0
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto
def generate_random_profiles_msjp(alts,
                                  crits,
                                  seed=None,
                                  k=3,
                                  worst=None,
                                  best=None,
                                  fct_percentile=[],
                                  nb_unk_criteria=0):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    pt = PerformanceTable()
    random.seed(seed)
    for c in crits:
        rdom = []
        random.seed(seed)
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp
            if (c.id[-1] != "d") and (int(c.id[1:]) <= nb_unk_criteria):
                rdom.append(
                    round(
                        random.uniform(
                            max(minp, fct_percentile[int(c.id[1:]) - 1][0]),
                            min(maxp, fct_percentile[int(c.id[1:]) - 1][1])),
                        k))
            else:
                rdom.append(round(random.uniform(minp, maxp), k))

        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()

        crit_random[c.id] = rdom

    #import pdb; pdb.set_trace()
    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)

    return pt
def generate_random_performance_table_msjp_mip(alts,
                                               crits,
                                               seed=None,
                                               k=2,
                                               worst=None,
                                               best=None,
                                               dupl_crits=None,
                                               cardinality=10):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    pt_dupl = PerformanceTable()
    tmp_ids = [i.id for i in dupl_crits]
    for a in alts:
        perfs = {}
        perfs_dupl = {}
        for c in crits:
            rdom = round(
                random.choice([
                    f for f in np.arange(0, 1. + 1. / (10**k), 1. /
                                         (cardinality - 1))
                ]), k)

            perfs[c.id] = rdom
            perfs_dupl[c.id] = rdom
            if (c.id + "d") in tmp_ids:
                perfs_dupl[(c.id + "d")] = rdom

        ap = AlternativePerformances(a.id, perfs)
        ap_dupl = AlternativePerformances(a.id, perfs_dupl)
        pt.append(ap)
        pt_dupl.append(ap_dupl)

    return pt, pt_dupl
def generate_random_performance_table_msjp(alts, crits, seed = None, k = 3,
                                      worst = None, best = None, dupl_crits = None):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    pt_dupl = PerformanceTable()
    tmp_ids = [i.id for i in dupl_crits]
    for a in alts:
        perfs = {}
        perfs_dupl = {}
        for c in crits:
            if worst is None or best is None:
                #random.seed()
                rdom = round(random.random(), k)
            else:
                rdom = round(random.uniform(worst.performances[c.id],
                                            best.performances[c.id]), k)

            perfs[c.id] = rdom
            perfs_dupl[c.id] = rdom
            if (c.id+"d") in tmp_ids:
                perfs_dupl[(c.id+"d")] = rdom


        ap = AlternativePerformances(a.id, perfs)
        ap_dupl = AlternativePerformances(a.id, perfs_dupl)
        pt.append(ap)
        pt_dupl.append(ap_dupl)

    return pt,pt_dupl
    def test001(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        ap1 = AlternativePerformances('a1', {'c1': 0.3, 'c2': 0.3, 'c3': 0.3})
        ap2 = AlternativePerformances('a2', {'c1': 0.8, 'c2': 0.8, 'c3': 0.8})
        ap3 = AlternativePerformances('a3', {'c1': 0.3, 'c2': 0.3, 'c3': 0.1})
        ap4 = AlternativePerformances('a4', {'c1': 0.3, 'c2': 0.1, 'c3': 0.3})
        ap5 = AlternativePerformances('a5', {'c1': 0.1, 'c2': 0.3, 'c3': 0.3})
        ap6 = AlternativePerformances('a6', {'c1': 0.8, 'c2': 0.8, 'c3': 0.1})
        ap7 = AlternativePerformances('a7', {'c1': 0.8, 'c2': 0.1, 'c3': 0.8})
        ap8 = AlternativePerformances('a8', {'c1': 0.1, 'c2': 0.8, 'c3': 0.8})
        pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8])

        aa = model.get_assignments(pt)

        self.assertEqual(aa['a1'].category_id, "cat2")
        self.assertEqual(aa['a2'].category_id, "cat1")
        self.assertEqual(aa['a3'].category_id, "cat3")
        self.assertEqual(aa['a4'].category_id, "cat2")
        self.assertEqual(aa['a5'].category_id, "cat2")
        self.assertEqual(aa['a6'].category_id, "cat3")
        self.assertEqual(aa['a7'].category_id, "cat1")
        self.assertEqual(aa['a8'].category_id, "cat1")
Пример #16
0
def generate_random_veto_profiles(model, worst = None, k = 3):
    if worst is None:
        worst = generate_worst_ap(model.criteria)

    vpt = PerformanceTable()
    for bid in model.profiles:
        ap = AlternativePerformances(bid, {})
        for c in model.criteria:
            a = model.bpt[bid].performances[c.id]
            b = worst.performances[c.id]
            ap.performances[c.id] = round(random.uniform(a, b), k)
        vpt.append(ap)
        worst = ap

    return vpt
def generate_random_veto_profiles(model, worst = None, k = 3):
    if worst is None:
        worst = generate_worst_ap(model.criteria)

    vpt = PerformanceTable()
    for bid in model.profiles:
        ap = AlternativePerformances(bid, {})
        for c in model.criteria:
            a = model.bpt[bid].performances[c.id]
            b = worst.performances[c.id]
            ap.performances[c.id] = round(random.uniform(a, b), k)
        vpt.append(ap)
        worst = ap

    return vpt
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        bpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            bp = self.init_profile(profile_id, cats[i], cats[i + 1], pabove)
            bpt.append(bp)
            pabove = bp

        self.model.bpt = bpt
Пример #19
0
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        vpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            b1 = self.model.bpt['b1']
            vp = self.init_profile(profile_id, cats[i], cats[i+1])
            vpt.append(b1 - vp)

        self.model.veto = vpt
Пример #20
0
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        bpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            bp = self.init_profile(profile_id, cats[i], cats[i+1], pabove)
            bpt.append(bp)
            pabove = bp

        self.model.bpt = bpt
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        vpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            b1 = self.model.bpt['b1']
            vp = self.init_profile(profile_id, cats[i], cats[i + 1])
            vpt.append(b1 - vp)

        self.model.veto = vpt
 def __init__(self, criteria = None, cv = None, bpt = None,
              lbda = None, categories_profiles = None, veto = None,
              veto_weights = None, veto_lbda = None, id = None):
     super(MRSort, self).__init__(criteria, cv, bpt, lbda,
                                  categories_profiles)
     #for possibly single-peaked criteria (or single valleyed) : 
     #self.bpt_sp = [(0,1)]*len(categories_profiles)
     self.bpt_sp = PerformanceTable()
     #import pdb; pdb.set_trace()
     if not(self.bpt is None):
         for i,j in categories_profiles.items():
             ap = deepcopy(self.bpt[i])
             self.bpt_sp.append(ap)
     #import pdb; pdb.set_trace()
     self.b_peak = None
     self.veto = veto
     self.veto_weights = veto_weights
     self.veto_lbda = veto_lbda
Пример #23
0
    def veto(self, veto):
        if veto is None:
            self.vpt = None
            return

        self.vpt = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - veto[bp.id]
            self.vpt.append(vbp)
def generate_random_profiles_msjp(alts,
                                  crits,
                                  seed=None,
                                  k=3,
                                  worst=None,
                                  best=None):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    pt = PerformanceTable()
    random.seed(seed)
    for c in crits:
        rdom = []
        random.seed(seed)
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp

            rdom.append(round(random.uniform(minp, maxp), k))

        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()

        crit_random[c.id] = rdom

    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)

    return pt
Пример #25
0
    def solve(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        wv = CriteriaValues()
        for c in self.criteria:
            w = CriterionValue()
            w.id = c.id
            w.value = self.lp.solution.get_values('z_' + c.id)
            wv.append(w)

        self.model.veto_weights = wv
        self.model.veto_lbda = self.lp.solution.get_values("LAMBDA")

        v = PerformanceTable()
        for p in self.__profiles:
            vp = AlternativePerformances(p, {})
            for c in self.criteria:
                perf = self.lp.solution.get_values('v_%s_%s' % (p, c.id))
                vp.performances[c.id] = round(perf, 5)
            v.append(vp)

        self.model.veto = v

        return obj
Пример #26
0
def generate_random_performance_table(alts, crits, seed = None, k = 3,
                                      worst = None, best = None):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    for a in alts:
        perfs = {}
        for c in crits:
            if worst is None or best is None:
                rdom = round(random.random(), k)
            else:
                rdom = round(random.uniform(worst.performances[c.id],
                                            best.performances[c.id]), k)

            perfs[c.id] = rdom

        ap = AlternativePerformances(a.id, perfs)
        pt.append(ap)

    return pt
    def test002(self):
        random.seed(2)
        c = generate_criteria(4)

        cv1 = CriterionValue('c1', 0.25)
        cv2 = CriterionValue('c2', 0.25)
        cv3 = CriterionValue('c3', 0.25)
        cv4 = CriterionValue('c4', 0.25)
        cv = CriteriaValues([cv1, cv2, cv3, cv4])

        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75,
            'c4': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25,
            'c4': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])
        lbda = 0.5

        etri = MRSort(c, cv, bpt, 0.5, cps)

        a = generate_alternatives(1000)
        pt = generate_random_performance_table(a, c)
        aas = etri.pessimist(pt)

        for aa in aas:
            w1 = w2 = 0
            perfs = pt[aa.id].performances
            for c, val in perfs.items():
                if val >= bp1.performances[c]:
                    w1 += cv[c].value
                if val >= bp2.performances[c]:
                    w2 += cv[c].value

            if aa.category_id == 'cat3':
                self.assertLess(w1, lbda)
                self.assertLess(w2, lbda)
            elif aa.category_id == 'cat2':
                self.assertLess(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
            else:
                self.assertGreaterEqual(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
Пример #28
0
def generate_random_profiles(alts, crits, seed = None, k = 3,
                             worst = None, best = None):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    pt = PerformanceTable()
    for c in crits:
        rdom = []
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp

            rdom.append(round(random.uniform(minp, maxp), k))

        if c.direction == -1:
            rdom.sort(reverse = True)
        else:
            rdom.sort()

        crit_random[c.id] = rdom

    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)

    return pt
Пример #29
0
    def from_xmcda(self, xmcda, id=None):
        xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id)

        self.id = xmcda.get('id')
        value = xmcda.find(".//methodParameters/parameter/value[@id='lambda']")
        self.lbda = unmarshal(value.getchildren()[0])

        setattr(self, 'criteria', Criteria().from_xmcda(xmcda, 'criteria'))
        setattr(self, 'cv', CriteriaValues().from_xmcda(xmcda, 'cv'))
        setattr(self, 'bpt', PerformanceTable().from_xmcda(xmcda, 'bpt'))
        setattr(self, 'categories_profiles',
                CategoriesProfiles().from_xmcda(xmcda, 'categories_profiles'))

        return self
Пример #30
0
    def from_xmcda(self, xmcda, id=None):
        super(MRSort, self).from_xmcda(xmcda, id)

        xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id)
        value = xmcda.find(
            ".//methodParameters/parameter/value[@id='veto_lbda']")
        if value is not None:
            self.veto_lbda = unmarshal(value.getchildren()[0])

        if xmcda.find(".//criteriaValues[@id='veto_weights']") is not None:
            setattr(self, 'veto_weights',
                    CriteriaValues().from_xmcda(xmcda, 'veto_weights'))
        if xmcda.find(".//performanceTable[@id='veto']") is not None:
            setattr(self, 'veto', PerformanceTable().from_xmcda(xmcda, 'veto'))

        return self
Пример #31
0
    def solve(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        wv = CriteriaValues()
        for c in self.criteria:
            w = CriterionValue()
            w.id = c.id
            w.value = self.lp.solution.get_values('z_' + c.id)
            wv.append(w)

        self.model.veto_weights = wv
        self.model.veto_lbda = self.lp.solution.get_values("LAMBDA")

        v = PerformanceTable()
        for p in self.__profiles:
            vp = AlternativePerformances(p, {})
            for c in self.criteria:
                perf = self.lp.solution.get_values('v_%s_%s' % (p, c.id))
                vp.performances[c.id] = round(perf, 5)
            v.append(vp)

        self.model.veto = v

        return obj
    def test002(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        a = generate_alternatives(10000)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
Пример #33
0
    from pymcda.types import AlternativesAssignments
    from pymcda.utils import print_pt_and_assignments
    from pymcda.ui.graphic import display_electre_tri_models

    cat = generate_categories(2)
    cps = generate_categories_profiles(cat)
    c = generate_criteria(4)

    # Generate assignment incompatible with an MR-Sort model
    ap1 = AlternativePerformances('a1', {'c1': 1, 'c2': 1, 'c3': 0, 'c4': 0})
    ap2 = AlternativePerformances('a2', {'c1': 0, 'c2': 0, 'c3': 1, 'c4': 1})
    ap3 = AlternativePerformances('a3', {'c1': 1, 'c2': 0, 'c3': 1, 'c4': 0})
    ap4 = AlternativePerformances('a4', {'c1': 1, 'c2': 0, 'c3': 0, 'c4': 1})
    ap5 = AlternativePerformances('a5', {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0})
    ap6 = AlternativePerformances('a6', {'c1': 0, 'c2': 1, 'c3': 0, 'c4': 1})
    pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6])

    aa1 = AlternativeAssignment('a1', 'cat1')
    aa2 = AlternativeAssignment('a2', 'cat1')
    aa3 = AlternativeAssignment('a3', 'cat2')
    aa4 = AlternativeAssignment('a4', 'cat2')
    aa5 = AlternativeAssignment('a5', 'cat2')
    aa6 = AlternativeAssignment('a6', 'cat2')
    aa = AlternativesAssignments([aa1, aa2, aa3, aa4, aa5, aa6])
    print_pt_and_assignments(aa.keys(), c.keys(), [aa], pt)

    model = MRSort(c, None, None, None, cps)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)
Пример #34
0
cmatrix_test = {}

DATADIR = os.getenv('DATADIR', '%s/pymcda-data' % os.path.expanduser('~'))
directory='%s/test-veto2' % (DATADIR)

for f in sys.argv[1:]:
    fname = os.path.splitext(os.path.basename(f))[0]

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root,
                                                       'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root,
                                                  'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

#    # Remove alternatives that cannot be corrected with a veto rule
#    aa_learning_m2p = discard_undersorted_alternatives(m.categories,
#                                                      aa_learning,
#                                                      aa_learning_m2)
#    aa_learning_m2p = discard_alternatives_in_category(aa_learning_m2p,
Пример #35
0
class MRSort(ElectreTri):
    def __init__(self,
                 criteria=None,
                 cv=None,
                 bpt=None,
                 lbda=None,
                 categories_profiles=None,
                 veto=None,
                 veto_weights=None,
                 veto_lbda=None,
                 id=None):
        super(MRSort, self).__init__(criteria, cv, bpt, lbda,
                                     categories_profiles)
        self.veto = veto
        self.veto_weights = veto_weights
        self.veto_lbda = veto_lbda

    @property
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto

    @veto.setter
    def veto(self, veto):
        if veto is None:
            self.vpt = None
            return

        self.vpt = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - veto[bp.id]
            self.vpt.append(vbp)

    def criteria_coalition(self, ap1, ap2):
        criteria_set = set()

        for c in self.criteria:
            diff = ap2.performances[c.id] - ap1.performances[c.id]
            diff *= c.direction
            if diff <= 0:
                criteria_set.add(c.id)

        return criteria_set

    def concordance(self, ap, profile):
        criteria_set = self.criteria_coalition(ap, profile)
        return sum(
            [c.value for c in self.cv if c.id_issubset(criteria_set) is True])

    def coalition_weight(self, criteria_coalition):
        return sum([
            c.value for c in self.cv
            if c.id_issubset(criteria_coalition) is True
        ])

    def veto_concordance(self, ap, profile):
        if self.bpt is None:
            return 0

        criteria_set = self.criteria_coalition(profile, ap)
        if self.veto_weights is None:
            if len(criteria_set) > 0:
                return 1
            else:
                return 0

        return sum([
            c.value for c in self.veto_weights
            if c.id_issubset(criteria_set) is True
        ])

    def get_assignment(self, ap):
        categories = list(reversed(self.categories))
        cat = categories[0]
        for i, profile in enumerate(reversed(self.profiles)):
            bp = self.bpt[profile]
            cw = self.concordance(ap, bp)
            if cw >= self.lbda:
                if self.vpt is None:
                    break
                else:
                    vp = self.vpt[profile]
                    vw = self.veto_concordance(ap, vp)
                    if self.veto_lbda and vw < self.veto_lbda:
                        break

                    if vw == 0:
                        break

            cat = categories[i + 1]

        return AlternativeAssignment(ap.id, cat)

    def get_assignments(self, pt):
        aa = AlternativesAssignments()
        for ap in pt:
            a = self.get_assignment(ap)
            aa.append(a)
        return aa

    def pessimist(self, pt):
        return self.get_assignments(pt)

    def credibility(self, x, y, profile):
        c = self.concordance(x, y)

        if self.vpt is None:
            return c

        vp = self.vpt[profile]
        vc = self.veto_concordance(x, vp)
        if self.veto_lbda and (eq(vc, self.veto_lbda) or vc > self.veto_lbda):
            return 0
        elif self.veto_lbda is None and vc > 0:
            return 0

        return c

    def count_veto_pessimist(self, ap):
        n = 0
        profiles = self.profiles[:]
        profiles.reverse()
        for i, profile in enumerate(profiles):
            c = self.concordance(ap, self.bpt[profile])
            if c < self.lbda:
                continue

            vc = self.veto_concordance(ap, self.bpt[profile])
            if self.veto_lbda and (eq(vc, self.veto_lbda)
                                   or vc > self.veto_lbda):
                n += 1
            elif self.veto_lbda is None and vc > 0:
                n += 1

        return n

    def get_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return None

        return self.bpt[self.profiles[index + 1]]

    def get_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if self.vpt is None:
            if index == 0:
                return None
            else:
                return self.bpt[self.profiles[index - 1]]

        if index == 0:
            return self.vpt[bid]

        bp = self.bpt[self.profiles[index - 1]]
        vp = self.vpt[self.profiles[index]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = max(bperf, vperf) * direction

        return ap

    def get_veto_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return self.bpt[self.profiles[index]]

        bp = self.bpt[bid]
        vp = self.vpt[self.profiles[index + 1]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = min(bperf, vperf) * direction

        return ap

    def get_veto_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if index == 0:
            return None

        return self.vpt[self.profiles[index - 1]]

    def to_xmcda(self):
        root = super(MRSort, self).to_xmcda()

        for obj in ['veto', 'veto_weights']:
            mcda = getattr(self, obj)
            if mcda is None:
                continue

            mcda.id = obj
            xmcda = mcda.to_xmcda()
            root.append(xmcda)

        if self.veto_lbda:
            mparams = ElementTree.SubElement(root, 'methodParameters')
            param = ElementTree.SubElement(mparams, 'parameter')
            value = ElementTree.SubElement(param, 'value')
            value.set('id', 'veto_lbda')
            lbda = marshal(self.veto_lbda)
            value.append(lbda)

        return root

    def from_xmcda(self, xmcda, id=None):
        super(MRSort, self).from_xmcda(xmcda, id)

        xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id)
        value = xmcda.find(
            ".//methodParameters/parameter/value[@id='veto_lbda']")
        if value is not None:
            self.veto_lbda = unmarshal(value.getchildren()[0])

        if xmcda.find(".//criteriaValues[@id='veto_weights']") is not None:
            setattr(self, 'veto_weights',
                    CriteriaValues().from_xmcda(xmcda, 'veto_weights'))
        if xmcda.find(".//performanceTable[@id='veto']") is not None:
            setattr(self, 'veto', PerformanceTable().from_xmcda(xmcda, 'veto'))

        return self
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_criteria_weights
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativePerformances
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(7, 2, 5)
    worst = AlternativePerformances("worst", {c.id: 0 for c in model.criteria})
    best = AlternativePerformances("best", {c.id: 1 for c in model.criteria})

    # Add veto
    vpt = generate_random_profiles(model.profiles, model.criteria, None, 3,
                                   worst, model.bpt['b1'])
    model.veto = PerformanceTable([model.bpt['b1'] - vpt['b1']])
    model.veto_weights = generate_random_criteria_weights(model.criteria)
    model.veto_lbda = random.random()

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    nmeta = 20
    nloops = 10

    print('Original model')
    print('==============')
    cids = model.criteria.keys()
    model.bpt.display(criterion_ids=cids)
Пример #37
0
class MRSort(ElectreTri):

    def __init__(self, criteria = None, cv = None, bpt = None,
                 lbda = None, categories_profiles = None, veto = None,
                 veto_weights = None, veto_lbda = None, id = None):
        super(MRSort, self).__init__(criteria, cv, bpt, lbda,
                                     categories_profiles)
        self.veto = veto
        self.veto_weights = veto_weights
        self.veto_lbda = veto_lbda

    @property
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto

    @veto.setter
    def veto(self, veto):
        if veto is None:
            self.vpt = None
            return

        self.vpt = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - veto[bp.id]
            self.vpt.append(vbp)

    def criteria_coalition(self, ap1, ap2):
        criteria_set = set()

        for c in self.criteria:
            diff = ap2.performances[c.id] - ap1.performances[c.id]
            diff *= c.direction
            if diff <= 0:
                criteria_set.add(c.id)

        return criteria_set

    def concordance(self, ap, profile):
        criteria_set = self.criteria_coalition(ap, profile)
        return sum([c.value for c in self.cv
                    if c.id_issubset(criteria_set) is True])

    def coalition_weight(self, criteria_coalition):
        return sum([c.value for c in self.cv
                   if c.id_issubset(criteria_coalition) is True])

    def veto_concordance(self, ap, profile):
        if self.bpt is None:
            return 0

        criteria_set = self.criteria_coalition(profile, ap)
        if self.veto_weights is None:
            if len(criteria_set) > 0:
                return 1
            else:
                return 0

        return sum([c.value for c in self.veto_weights
                    if c.id_issubset(criteria_set) is True])

    def get_assignment(self, ap):
        categories = list(reversed(self.categories))
        cat = categories[0]
        for i, profile in enumerate(reversed(self.profiles)):
            bp = self.bpt[profile]
            cw = self.concordance(ap, bp)
            if cw >= self.lbda:
                if self.vpt is None:
                    break
                else:
                    vp = self.vpt[profile]
                    vw = self.veto_concordance(ap, vp)
                    if self.veto_lbda and vw < self.veto_lbda:
                        break

                    if vw == 0:
                        break

            cat = categories[i + 1]

        return AlternativeAssignment(ap.id, cat)

    def get_assignments(self, pt):
        aa = AlternativesAssignments()
        for ap in pt:
            a = self.get_assignment(ap)
            aa.append(a)
        return aa

    def pessimist(self, pt):
        return self.get_assignments(pt)

    def credibility(self, x, y, profile):
        c = self.concordance(x, y)

        if self.vpt is None:
            return c

        vp = self.vpt[profile]
        vc = self.veto_concordance(x, vp)
        if self.veto_lbda and (eq(vc, self.veto_lbda)
                               or vc > self.veto_lbda):
            return 0
        elif self.veto_lbda is None and vc > 0:
            return 0

        return c

    def count_veto_pessimist(self, ap):
        n = 0
        profiles = self.profiles[:]
        profiles.reverse()
        for i, profile in enumerate(profiles):
            c = self.concordance(ap, self.bpt[profile])
            if c < self.lbda:
                continue

            vc = self.veto_concordance(ap, self.bpt[profile])
            if self.veto_lbda and (eq(vc, self.veto_lbda)
                                   or vc > self.veto_lbda):
                n += 1
            elif self.veto_lbda is None and vc > 0:
                n += 1

        return n

    def get_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return None

        return self.bpt[self.profiles[index + 1]]

    def get_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if self.vpt is None:
            if index == 0:
                return None
            else:
                return self.bpt[self.profiles[index - 1]]

        if index == 0:
            return self.vpt[bid]

        bp = self.bpt[self.profiles[index - 1]]
        vp = self.vpt[self.profiles[index]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = max(bperf, vperf) * direction

        return ap

    def get_veto_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return self.bpt[self.profiles[index]]

        bp = self.bpt[bid]
        vp = self.vpt[self.profiles[index + 1]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = min(bperf, vperf) * direction

        return ap

    def get_veto_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if index == 0:
            return None

        return self.vpt[self.profiles[index - 1]]

    def to_xmcda(self):
        root = super(MRSort, self).to_xmcda()

        for obj in ['veto', 'veto_weights']:
            mcda = getattr(self, obj)
            if mcda is None:
                continue

            mcda.id = obj
            xmcda = mcda.to_xmcda()
            root.append(xmcda)

        if self.veto_lbda:
            mparams = ElementTree.SubElement(root, 'methodParameters')
            param = ElementTree.SubElement(mparams, 'parameter')
            value = ElementTree.SubElement(param, 'value')
            value.set('id', 'veto_lbda')
            lbda = marshal(self.veto_lbda)
            value.append(lbda)

        return root

    def from_xmcda(self, xmcda, id = None):
        super(MRSort, self).from_xmcda(xmcda, id)

        xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id)
        value = xmcda.find(".//methodParameters/parameter/value[@id='veto_lbda']")
        if value is not None:
            self.veto_lbda = unmarshal(value.getchildren()[0])

        if xmcda.find(".//criteriaValues[@id='veto_weights']") is not None:
            setattr(self, 'veto_weights',
                    CriteriaValues().from_xmcda(xmcda, 'veto_weights'))
        if xmcda.find(".//performanceTable[@id='veto']") is not None:
            setattr(self, 'veto',
                    PerformanceTable().from_xmcda(xmcda, 'veto'))

        return self
    from pymcda.generate import generate_random_performance_table
    from pymcda.types import AlternativePerformances, PerformanceTable
    from pymcda.types import CriterionValue, CriteriaValues, CriteriaSet
    from pymcda.electre_tri import MRSort

    random.seed(0)

    c = generate_criteria(5)
    cat = generate_categories(3)
    cps = generate_categories_profiles(cat)

    bp1 = AlternativePerformances('b1', {'c1': 0.75, 'c2': 0.75, 'c3': 0.75,
                                  'c4': 0.75, 'c5': 0.75})
    bp2 = AlternativePerformances('b2', {'c1': 0.25, 'c2': 0.25, 'c3': 0.25,
                                  'c4': 0.25, 'c5': 0.25})
    bpt = PerformanceTable([bp1, bp2])

    cv1 = CriterionValue('c1', 0.2)
    cv2 = CriterionValue('c2', 0.2)
    cv3 = CriterionValue('c3', 0.2)
    cv4 = CriterionValue('c4', 0.2)
    cv5 = CriterionValue('c5', 0.2)
    cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
    cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.1)
    cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1)
    cv15 = CriterionValue(CriteriaSet(['c1', 'c5']), 0.1)
    cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1)
    cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1)
    cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1)
    cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1)
    cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1)
Пример #39
0
    model = generate_random_mrsort_model(10, 5, 890)

    # Generate random alternatives
    a = generate_alternatives(15000)
    pt = generate_random_performance_table(a, model.criteria)

    errors = 0.0
    delta = 0.0001
    nlearn = 1.00

    # Assign the alternative with the model
    aa = model.pessimist(pt)

    a_learn = random.sample(a, int(nlearn * len(a)))
    aa_learn = AlternativesAssignments([aa[alt.id] for alt in a_learn])
    pt_learn = PerformanceTable([pt[alt.id] for alt in a_learn])

    aa_err = aa_learn.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories, errors)

    print('Original model')
    print('==============')
    print("Number of alternatives: %d" % len(a))
    print("Number of learning alternatives: %d" % len(aa_learn))
    print("Errors in alternatives assignments: %g%%" % (errors * 100))
    cids = model.criteria.keys()
    model.bpt.display(criterion_ids=cids)
    model.cv.display(criterion_ids=cids)
    print("lambda\t%.7s" % model.lbda)
    print("delta: %g" % delta)
    #print(aa)
for f in sys.argv[1:]:
    if not os.path.isfile(f):
        xmcda_models_toshow.append(f)
        continue

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()

    xmcda_models = root.findall(".//ElectreTri")

    m = MRSort().from_xmcda(xmcda_models[0])

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')

    uniquevalues = pt_learning.get_unique_values()

    bname = os.path.basename(os.path.splitext(f.name)[0])
    fweights = open('%s-w.dat' % bname, 'w+')
    fprofiles = open('%s-p.dat' % bname, 'w+')

    print("Processing %s..." % bname)

    criteria = m.criteria.keys()
    for c in criteria:
        print("{%s} " % criteria_names[c], end='', file=fprofiles)
    print('', file=fprofiles)
def generate_random_profiles(alts,
                             crits,
                             seed=None,
                             k=3,
                             worst=None,
                             best=None,
                             prof_threshold=0.05,
                             fixed_profc1=None):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    #print(alts)
    pt = PerformanceTable()
    for c in crits:
        rdom = []

        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp

            if fixed_profc1 is not None and c.id == "c1":
                #rdom.append(0.5)
                rdom.append(
                    round(
                        random.uniform(0.5 - fixed_profc1, 0.5 + fixed_profc1),
                        k))
                #print("rdom ",rdom)
            else:
                if c.direction == 2 or c.direction == -2:
                    #print(c,crits)
                    #if criteria are known
                    b_sp = tuple(
                        sorted([
                            round(
                                random.uniform(max(minp, prof_threshold),
                                               min(1 - prof_threshold, maxp)),
                                k),
                            round(
                                random.uniform(max(minp, prof_threshold),
                                               min(1 - prof_threshold, maxp)),
                                k)
                        ]))
                    #b_sp = (round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),1)
                    rdom.append(b_sp)
                    #the following assumes that criteria types are not known, but all fixed under SP construction
                    # if c.id == [c.id for c in crits][-1]:
                    #     b_sp =tuple(sorted([round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k)]))
                    #     rdom.append(b_sp)
                    # else:
                    #     b_sp = (round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),1)
                    #     rdom.append(b_sp)
                    #print(rdom)
                else:
                    rdom.append(
                        round(
                            random.uniform(max(minp, prof_threshold),
                                           min(1 - prof_threshold, maxp)), k))
                    #rdom.append((round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),1))
            #rdom.append(round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k))

        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()
        #rdom.sort(reverse = True)

        crit_random[c.id] = rdom

    # if n==2:
    #     import pdb; pdb.set_trace()
    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)
        # if n==2:
        #     import pdb; pdb.set_trace()

    return pt
cmatrix_learning = {}
cmatrix_test = {}

nveto = 0
for f in sys.argv[1:]:
    f = os.path.abspath(f)
    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    import pdb
    pdb.set_trace()
    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root, 'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

    # Compute classification accuracy
    ca_learning = compute_ca(aa_learning, aa_learning_m2)
    ca_test = compute_ca(aa_test, aa_test_m2)

    table_ca_learning.append(ca_learning)
    table_ca_test.append(ca_test)
def generate_random_profiles_msjp_sp(alts,
                                     crits,
                                     seed=None,
                                     k=3,
                                     worst=None,
                                     best=None,
                                     fct_percentile=[],
                                     nb_unk_criteria=0):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)  # here it represents profiles
    pt = PerformanceTable()
    random.seed(seed)
    for c in crits:
        rdom = []
        random.seed(seed)
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp
            # if (c.id[-1] != "d") and (int(c.id[1:]) <= nb_unk_criteria):
            #     rdom.append(round(random.uniform(max(minp,fct_percentile[int(c.id[1:])-1][0]), min(maxp,fct_percentile[int(c.id[1:])-1][1])), k))
            # else:
            #     rdom.append(round(random.uniform(minp,maxp),k))
            if c.direction == 2 or c.direction == -2:
                b_sp = tuple(
                    sorted([
                        round(random.uniform(minp, maxp), k),
                        round(random.uniform(minp, maxp), k)
                    ]))
                #we assume to know temporarily the value of the bottom
                #b_sp = (0.4,round(random.uniform(0.4,maxp), k))
                #b_sp = (round(random.uniform(0,0.6), k),0.6)
                rdom.append(b_sp)
            else:
                # if c.id == "c1":
                #     rdom.append(0.3)
                # elif c.id == "c2" or c.id == "c3":
                #     rdom.append(0.8)
                # else:
                rdom.append(round(random.uniform(minp, maxp), k))

        #For the moment this test below is useless as long as we have only 2 categories  (neeed to be may be review)
        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()
        #import pdb; pdb.set_trace()

        crit_random[c.id] = rdom

    #import pdb; pdb.set_trace()
    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)
    #import pdb; pdb.set_trace()
    return pt
    from pymcda.types import AlternativesAssignments
    from pymcda.utils import print_pt_and_assignments
    from pymcda.ui.graphic import display_electre_tri_models

    cat = generate_categories(2)
    cps = generate_categories_profiles(cat)
    c = generate_criteria(4)

    # Generate assignment incompatible with an MR-Sort model
    ap1 = AlternativePerformances('a1', {'c1': 1, 'c2': 1, 'c3': 0, 'c4': 0})
    ap2 = AlternativePerformances('a2', {'c1': 0, 'c2': 0, 'c3': 1, 'c4': 1})
    ap3 = AlternativePerformances('a3', {'c1': 1, 'c2': 0, 'c3': 1, 'c4': 0})
    ap4 = AlternativePerformances('a4', {'c1': 1, 'c2': 0, 'c3': 0, 'c4': 1})
    ap5 = AlternativePerformances('a5', {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0})
    ap6 = AlternativePerformances('a6', {'c1': 0, 'c2': 1, 'c3': 0, 'c4': 1})
    pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6])

    aa1 = AlternativeAssignment('a1', 'cat1')
    aa2 = AlternativeAssignment('a2', 'cat1')
    aa3 = AlternativeAssignment('a3', 'cat2')
    aa4 = AlternativeAssignment('a4', 'cat2')
    aa5 = AlternativeAssignment('a5', 'cat2')
    aa6 = AlternativeAssignment('a6', 'cat2')
    aa = AlternativesAssignments([aa1, aa2, aa3, aa4, aa5, aa6])
    print_pt_and_assignments(aa.keys(), c.keys(), [aa], pt)

    model = MRSort(c, None, None, None, cps)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)