示例#1
0
文件: mip_mrsort.py 项目: oso/pymcda
    def solve_glpk(self):
        self.lp.solvopt(method='exact', integer='advanced')
        self.lp.solve()

        status = self.lp.status()
        if status != 'opt':
            raise RuntimeError("Solver status: %s" % self.lp.status())

        #print(self.lp.reportKKT())
        obj = self.lp.vobj()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = float(self.w[c.id].primal)
            cvs.append(cv)

        self.model.cv = cvs
        self.model.lbda = self.lbda.primal

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.g[p][c.id].primal
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
def generate_random_performance_table_msjp(alts, crits, seed = None, k = 3,
                                      worst = None, best = None, dupl_crits = None):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    pt_dupl = PerformanceTable()
    tmp_ids = [i.id for i in dupl_crits]
    for a in alts:
        perfs = {}
        perfs_dupl = {}
        for c in crits:
            if worst is None or best is None:
                #random.seed()
                rdom = round(random.random(), k)
            else:
                rdom = round(random.uniform(worst.performances[c.id],
                                            best.performances[c.id]), k)

            perfs[c.id] = rdom
            perfs_dupl[c.id] = rdom
            if (c.id+"d") in tmp_ids:
                perfs_dupl[(c.id+"d")] = rdom


        ap = AlternativePerformances(a.id, perfs)
        ap_dupl = AlternativePerformances(a.id, perfs_dupl)
        pt.append(ap)
        pt_dupl.append(ap_dupl)

    return pt,pt_dupl
def generate_random_performance_table(alts,
                                      crits,
                                      seed=None,
                                      k=3,
                                      worst=None,
                                      best=None):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    for a in alts:
        perfs = {}
        for c in crits:
            if worst is None or best is None:
                rdom = round(random.random(), k)
            else:
                rdom = round(
                    random.uniform(worst.performances[c.id],
                                   best.performances[c.id]), k)

            perfs[c.id] = rdom

        ap = AlternativePerformances(a.id, perfs)
        pt.append(ap)

    return pt
def generate_random_performance_table_msjp_mip(alts,
                                               crits,
                                               seed=None,
                                               k=2,
                                               worst=None,
                                               best=None,
                                               dupl_crits=None,
                                               cardinality=10):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    pt_dupl = PerformanceTable()
    tmp_ids = [i.id for i in dupl_crits]
    for a in alts:
        perfs = {}
        perfs_dupl = {}
        for c in crits:
            rdom = round(
                random.choice([
                    f for f in np.arange(0, 1. + 1. / (10**k), 1. /
                                         (cardinality - 1))
                ]), k)

            perfs[c.id] = rdom
            perfs_dupl[c.id] = rdom
            if (c.id + "d") in tmp_ids:
                perfs_dupl[(c.id + "d")] = rdom

        ap = AlternativePerformances(a.id, perfs)
        ap_dupl = AlternativePerformances(a.id, perfs_dupl)
        pt.append(ap)
        pt_dupl.append(ap_dupl)

    return pt, pt_dupl
示例#5
0
    def solve_glpk(self):
        self.lp.solvopt(method='exact', integer='advanced')
        self.lp.solve()

        status = self.lp.status()
        if status != 'opt':
            raise RuntimeError("Solver status: %s" % self.lp.status())

        #print(self.lp.reportKKT())
        obj = self.lp.vobj()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = float(self.w[c.id].primal)
            cvs.append(cv)

        self.model.cv = cvs
        self.model.lbda = self.lbda.primal

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.g[p][c.id].primal
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
示例#6
0
文件: mip_mrsort.py 项目: oso/pymcda
    def solve_cplex(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
示例#7
0
def generate_binary_performance_table_and_assignments(criteria,
                                                      categories, fmins):
    cids = list(criteria.keys())
    cats = categories.get_ordered_categories()
    aa = AlternativesAssignments()
    pt = PerformanceTable()
    i = 1
    for coalition in powerset(cids):
        if set(coalition) == set({}) or set(coalition) == set(cids):
            continue

        aid = "a%d" % i
        ap = AlternativePerformances(aid,
                                     OrderedDict({c: 1 if c in coalition else 0
                                                  for c in cids}))
        pt.append(ap)

        cat = cats[0]
        for fmin in fmins:
            if fmin.issubset(set(coalition)) is True:
                cat = cats[1]
                break

        aa.append(AlternativeAssignment(aid, cat))

        i += 1

    return pt, aa
def duplicate_performance_table_msjp(pt,
                                     alts,
                                     crits,
                                     seed=None,
                                     k=3,
                                     worst=None,
                                     best=None,
                                     dupl_crits=None):
    if seed is not None:
        random.seed(seed)

    pt_dupl = PerformanceTable()
    tmp_ids = [i.id for i in dupl_crits]
    for ap in pt:
        ap_perfs = ap.performances
        for c in crits:
            #import pdb; pdb.set_trace()
            #            pt[c.id] = None
            #            perfs_dupl[c.id] = None
            if (c.id + "d") in tmp_ids:
                ap_perfs[(c.id + "d")] = ap_perfs[c.id]

        ap_dupl = AlternativePerformances(ap.altid, ap_perfs)
        #pt.append(ap)
        pt_dupl.append(ap_dupl)

    return pt_dupl
示例#9
0
def generate_binary_performance_table_and_assignments(criteria, categories,
                                                      fmins):
    cids = list(criteria.keys())
    cats = categories.get_ordered_categories()
    aa = AlternativesAssignments()
    pt = PerformanceTable()
    i = 1
    for coalition in powerset(cids):
        if set(coalition) == set({}) or set(coalition) == set(cids):
            continue

        aid = "a%d" % i
        ap = AlternativePerformances(
            aid, OrderedDict({c: 1 if c in coalition else 0
                              for c in cids}))
        pt.append(ap)

        cat = cats[0]
        for fmin in fmins:
            if fmin.issubset(set(coalition)) is True:
                cat = cats[1]
                break

        aa.append(AlternativeAssignment(aid, cat))

        i += 1

    return pt, aa
示例#10
0
    def solve_cplex(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        return obj
示例#11
0
文件: electre_tri.py 项目: oso/pymcda
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto
示例#12
0
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto
def generate_random_profiles_msjp(alts,
                                  crits,
                                  seed=None,
                                  k=3,
                                  worst=None,
                                  best=None,
                                  fct_percentile=[],
                                  nb_unk_criteria=0):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    pt = PerformanceTable()
    random.seed(seed)
    for c in crits:
        rdom = []
        random.seed(seed)
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp
            if (c.id[-1] != "d") and (int(c.id[1:]) <= nb_unk_criteria):
                rdom.append(
                    round(
                        random.uniform(
                            max(minp, fct_percentile[int(c.id[1:]) - 1][0]),
                            min(maxp, fct_percentile[int(c.id[1:]) - 1][1])),
                        k))
            else:
                rdom.append(round(random.uniform(minp, maxp), k))

        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()

        crit_random[c.id] = rdom

    #import pdb; pdb.set_trace()
    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)

    return pt
示例#14
0
    def solve(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        wv = CriteriaValues()
        for c in self.criteria:
            w = CriterionValue()
            w.id = c.id
            w.value = self.lp.solution.get_values('z_' + c.id)
            wv.append(w)

        self.model.veto_weights = wv
        self.model.veto_lbda = self.lp.solution.get_values("LAMBDA")

        v = PerformanceTable()
        for p in self.__profiles:
            vp = AlternativePerformances(p, {})
            for c in self.criteria:
                perf = self.lp.solution.get_values('v_%s_%s' % (p, c.id))
                vp.performances[c.id] = round(perf, 5)
            v.append(vp)

        self.model.veto = v

        return obj
示例#15
0
    def solve(self):
        self.lp.solve()

        status = self.lp.solution.get_status()
        if status != self.lp.solution.status.MIP_optimal:
            raise RuntimeError("Solver status: %s" % status)

        obj = self.lp.solution.get_objective_value()

        cvs = CriteriaValues()
        for c in self.criteria:
            cv = CriterionValue()
            cv.id = c.id
            cv.value = self.lp.solution.get_values('w_' + c.id)
            cvs.append(cv)

        self.model.cv = cvs

        self.model.lbda = self.lp.solution.get_values("lambda")

        pt = PerformanceTable()
        for p in self.__profiles:
            ap = AlternativePerformances(p)
            for c in self.criteria:
                perf = self.lp.solution.get_values("g_%s_%s" % (p, c.id))
                ap.performances[c.id] = round(perf, 5)
            pt.append(ap)

        self.model.bpt = pt
        self.model.bpt.update_direction(self.model.criteria)

        wv = CriteriaValues()
        for c in self.criteria:
            w = CriterionValue()
            w.id = c.id
            w.value = self.lp.solution.get_values('z_' + c.id)
            wv.append(w)

        self.model.veto_weights = wv
        self.model.veto_lbda = self.lp.solution.get_values("LAMBDA")

        v = PerformanceTable()
        for p in self.__profiles:
            vp = AlternativePerformances(p, {})
            for c in self.criteria:
                perf = self.lp.solution.get_values('v_%s_%s' % (p, c.id))
                vp.performances[c.id] = round(perf, 5)
            v.append(vp)

        self.model.veto = v

        return obj
示例#16
0
文件: generate.py 项目: oso/pymcda
def generate_random_veto_profiles(model, worst = None, k = 3):
    if worst is None:
        worst = generate_worst_ap(model.criteria)

    vpt = PerformanceTable()
    for bid in model.profiles:
        ap = AlternativePerformances(bid, {})
        for c in model.criteria:
            a = model.bpt[bid].performances[c.id]
            b = worst.performances[c.id]
            ap.performances[c.id] = round(random.uniform(a, b), k)
        vpt.append(ap)
        worst = ap

    return vpt
def generate_random_veto_profiles(model, worst = None, k = 3):
    if worst is None:
        worst = generate_worst_ap(model.criteria)

    vpt = PerformanceTable()
    for bid in model.profiles:
        ap = AlternativePerformances(bid, {})
        for c in model.criteria:
            a = model.bpt[bid].performances[c.id]
            b = worst.performances[c.id]
            ap.performances[c.id] = round(random.uniform(a, b), k)
        vpt.append(ap)
        worst = ap

    return vpt
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        vpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            b1 = self.model.bpt['b1']
            vp = self.init_profile(profile_id, cats[i], cats[i + 1])
            vpt.append(b1 - vp)

        self.model.veto = vpt
示例#19
0
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        bpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            bp = self.init_profile(profile_id, cats[i], cats[i+1], pabove)
            bpt.append(bp)
            pabove = bp

        self.model.bpt = bpt
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        vpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            b1 = self.model.bpt['b1']
            vp = self.init_profile(profile_id, cats[i], cats[i+1])
            vpt.append(b1 - vp)

        self.model.veto = vpt
    def solve(self):
        cats = self.model.categories[:]
        cats.reverse()

        profiles = self.model.profiles[:]
        profiles.reverse()

        bpt = PerformanceTable()
        pabove = self.pt_sorted.pt.get_best(self.model.criteria)
        for i in range(len(cats) - 1):
            profile_id = profiles[i]
            bp = self.init_profile(profile_id, cats[i], cats[i + 1], pabove)
            bpt.append(bp)
            pabove = bp

        self.model.bpt = bpt
def generate_random_profiles_msjp(alts,
                                  crits,
                                  seed=None,
                                  k=3,
                                  worst=None,
                                  best=None):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    pt = PerformanceTable()
    random.seed(seed)
    for c in crits:
        rdom = []
        random.seed(seed)
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp

            rdom.append(round(random.uniform(minp, maxp), k))

        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()

        crit_random[c.id] = rdom

    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)

    return pt
示例#23
0
文件: generate.py 项目: oso/pymcda
def generate_random_performance_table(alts, crits, seed = None, k = 3,
                                      worst = None, best = None):
    if seed is not None:
        random.seed(seed)

    pt = PerformanceTable()
    for a in alts:
        perfs = {}
        for c in crits:
            if worst is None or best is None:
                rdom = round(random.random(), k)
            else:
                rdom = round(random.uniform(worst.performances[c.id],
                                            best.performances[c.id]), k)

            perfs[c.id] = rdom

        ap = AlternativePerformances(a.id, perfs)
        pt.append(ap)

    return pt
示例#24
0
文件: generate.py 项目: oso/pymcda
def generate_random_profiles(alts, crits, seed = None, k = 3,
                             worst = None, best = None):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    pt = PerformanceTable()
    for c in crits:
        rdom = []
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp

            rdom.append(round(random.uniform(minp, maxp), k))

        if c.direction == -1:
            rdom.sort(reverse = True)
        else:
            rdom.sort()

        crit_random[c.id] = rdom

    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)

    return pt
示例#25
0
文件: electre_tri.py 项目: oso/pymcda
class MRSort(ElectreTri):

    def __init__(self, criteria = None, cv = None, bpt = None,
                 lbda = None, categories_profiles = None, veto = None,
                 veto_weights = None, veto_lbda = None, id = None):
        super(MRSort, self).__init__(criteria, cv, bpt, lbda,
                                     categories_profiles)
        self.veto = veto
        self.veto_weights = veto_weights
        self.veto_lbda = veto_lbda

    @property
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto

    @veto.setter
    def veto(self, veto):
        if veto is None:
            self.vpt = None
            return

        self.vpt = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - veto[bp.id]
            self.vpt.append(vbp)

    def criteria_coalition(self, ap1, ap2):
        criteria_set = set()

        for c in self.criteria:
            diff = ap2.performances[c.id] - ap1.performances[c.id]
            diff *= c.direction
            if diff <= 0:
                criteria_set.add(c.id)

        return criteria_set

    def concordance(self, ap, profile):
        criteria_set = self.criteria_coalition(ap, profile)
        return sum([c.value for c in self.cv
                    if c.id_issubset(criteria_set) is True])

    def coalition_weight(self, criteria_coalition):
        return sum([c.value for c in self.cv
                   if c.id_issubset(criteria_coalition) is True])

    def veto_concordance(self, ap, profile):
        if self.bpt is None:
            return 0

        criteria_set = self.criteria_coalition(profile, ap)
        if self.veto_weights is None:
            if len(criteria_set) > 0:
                return 1
            else:
                return 0

        return sum([c.value for c in self.veto_weights
                    if c.id_issubset(criteria_set) is True])

    def get_assignment(self, ap):
        categories = list(reversed(self.categories))
        cat = categories[0]
        for i, profile in enumerate(reversed(self.profiles)):
            bp = self.bpt[profile]
            cw = self.concordance(ap, bp)
            if cw >= self.lbda:
                if self.vpt is None:
                    break
                else:
                    vp = self.vpt[profile]
                    vw = self.veto_concordance(ap, vp)
                    if self.veto_lbda and vw < self.veto_lbda:
                        break

                    if vw == 0:
                        break

            cat = categories[i + 1]

        return AlternativeAssignment(ap.id, cat)

    def get_assignments(self, pt):
        aa = AlternativesAssignments()
        for ap in pt:
            a = self.get_assignment(ap)
            aa.append(a)
        return aa

    def pessimist(self, pt):
        return self.get_assignments(pt)

    def credibility(self, x, y, profile):
        c = self.concordance(x, y)

        if self.vpt is None:
            return c

        vp = self.vpt[profile]
        vc = self.veto_concordance(x, vp)
        if self.veto_lbda and (eq(vc, self.veto_lbda)
                               or vc > self.veto_lbda):
            return 0
        elif self.veto_lbda is None and vc > 0:
            return 0

        return c

    def count_veto_pessimist(self, ap):
        n = 0
        profiles = self.profiles[:]
        profiles.reverse()
        for i, profile in enumerate(profiles):
            c = self.concordance(ap, self.bpt[profile])
            if c < self.lbda:
                continue

            vc = self.veto_concordance(ap, self.bpt[profile])
            if self.veto_lbda and (eq(vc, self.veto_lbda)
                                   or vc > self.veto_lbda):
                n += 1
            elif self.veto_lbda is None and vc > 0:
                n += 1

        return n

    def get_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return None

        return self.bpt[self.profiles[index + 1]]

    def get_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if self.vpt is None:
            if index == 0:
                return None
            else:
                return self.bpt[self.profiles[index - 1]]

        if index == 0:
            return self.vpt[bid]

        bp = self.bpt[self.profiles[index - 1]]
        vp = self.vpt[self.profiles[index]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = max(bperf, vperf) * direction

        return ap

    def get_veto_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return self.bpt[self.profiles[index]]

        bp = self.bpt[bid]
        vp = self.vpt[self.profiles[index + 1]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = min(bperf, vperf) * direction

        return ap

    def get_veto_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if index == 0:
            return None

        return self.vpt[self.profiles[index - 1]]

    def to_xmcda(self):
        root = super(MRSort, self).to_xmcda()

        for obj in ['veto', 'veto_weights']:
            mcda = getattr(self, obj)
            if mcda is None:
                continue

            mcda.id = obj
            xmcda = mcda.to_xmcda()
            root.append(xmcda)

        if self.veto_lbda:
            mparams = ElementTree.SubElement(root, 'methodParameters')
            param = ElementTree.SubElement(mparams, 'parameter')
            value = ElementTree.SubElement(param, 'value')
            value.set('id', 'veto_lbda')
            lbda = marshal(self.veto_lbda)
            value.append(lbda)

        return root

    def from_xmcda(self, xmcda, id = None):
        super(MRSort, self).from_xmcda(xmcda, id)

        xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id)
        value = xmcda.find(".//methodParameters/parameter/value[@id='veto_lbda']")
        if value is not None:
            self.veto_lbda = unmarshal(value.getchildren()[0])

        if xmcda.find(".//criteriaValues[@id='veto_weights']") is not None:
            setattr(self, 'veto_weights',
                    CriteriaValues().from_xmcda(xmcda, 'veto_weights'))
        if xmcda.find(".//performanceTable[@id='veto']") is not None:
            setattr(self, 'veto',
                    PerformanceTable().from_xmcda(xmcda, 'veto'))

        return self
示例#26
0
class MRSort(ElectreTri):
    def __init__(self,
                 criteria=None,
                 cv=None,
                 bpt=None,
                 lbda=None,
                 categories_profiles=None,
                 veto=None,
                 veto_weights=None,
                 veto_lbda=None,
                 id=None):
        super(MRSort, self).__init__(criteria, cv, bpt, lbda,
                                     categories_profiles)
        self.veto = veto
        self.veto_weights = veto_weights
        self.veto_lbda = veto_lbda

    @property
    def veto(self):
        if self.vpt is None:
            return None

        veto = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - self.vpt[bp.id]
            veto.append(vbp)
        return veto

    @veto.setter
    def veto(self, veto):
        if veto is None:
            self.vpt = None
            return

        self.vpt = PerformanceTable()
        for bp in self.bpt:
            vbp = bp - veto[bp.id]
            self.vpt.append(vbp)

    def criteria_coalition(self, ap1, ap2):
        criteria_set = set()

        for c in self.criteria:
            diff = ap2.performances[c.id] - ap1.performances[c.id]
            diff *= c.direction
            if diff <= 0:
                criteria_set.add(c.id)

        return criteria_set

    def concordance(self, ap, profile):
        criteria_set = self.criteria_coalition(ap, profile)
        return sum(
            [c.value for c in self.cv if c.id_issubset(criteria_set) is True])

    def coalition_weight(self, criteria_coalition):
        return sum([
            c.value for c in self.cv
            if c.id_issubset(criteria_coalition) is True
        ])

    def veto_concordance(self, ap, profile):
        if self.bpt is None:
            return 0

        criteria_set = self.criteria_coalition(profile, ap)
        if self.veto_weights is None:
            if len(criteria_set) > 0:
                return 1
            else:
                return 0

        return sum([
            c.value for c in self.veto_weights
            if c.id_issubset(criteria_set) is True
        ])

    def get_assignment(self, ap):
        categories = list(reversed(self.categories))
        cat = categories[0]
        for i, profile in enumerate(reversed(self.profiles)):
            bp = self.bpt[profile]
            cw = self.concordance(ap, bp)
            if cw >= self.lbda:
                if self.vpt is None:
                    break
                else:
                    vp = self.vpt[profile]
                    vw = self.veto_concordance(ap, vp)
                    if self.veto_lbda and vw < self.veto_lbda:
                        break

                    if vw == 0:
                        break

            cat = categories[i + 1]

        return AlternativeAssignment(ap.id, cat)

    def get_assignments(self, pt):
        aa = AlternativesAssignments()
        for ap in pt:
            a = self.get_assignment(ap)
            aa.append(a)
        return aa

    def pessimist(self, pt):
        return self.get_assignments(pt)

    def credibility(self, x, y, profile):
        c = self.concordance(x, y)

        if self.vpt is None:
            return c

        vp = self.vpt[profile]
        vc = self.veto_concordance(x, vp)
        if self.veto_lbda and (eq(vc, self.veto_lbda) or vc > self.veto_lbda):
            return 0
        elif self.veto_lbda is None and vc > 0:
            return 0

        return c

    def count_veto_pessimist(self, ap):
        n = 0
        profiles = self.profiles[:]
        profiles.reverse()
        for i, profile in enumerate(profiles):
            c = self.concordance(ap, self.bpt[profile])
            if c < self.lbda:
                continue

            vc = self.veto_concordance(ap, self.bpt[profile])
            if self.veto_lbda and (eq(vc, self.veto_lbda)
                                   or vc > self.veto_lbda):
                n += 1
            elif self.veto_lbda is None and vc > 0:
                n += 1

        return n

    def get_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return None

        return self.bpt[self.profiles[index + 1]]

    def get_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if self.vpt is None:
            if index == 0:
                return None
            else:
                return self.bpt[self.profiles[index - 1]]

        if index == 0:
            return self.vpt[bid]

        bp = self.bpt[self.profiles[index - 1]]
        vp = self.vpt[self.profiles[index]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = max(bperf, vperf) * direction

        return ap

    def get_veto_profile_upper_limit(self, bid):
        index = self.profiles.index(bid)
        if index == (len(self.profiles) - 1):
            return self.bpt[self.profiles[index]]

        bp = self.bpt[bid]
        vp = self.vpt[self.profiles[index + 1]]

        ap = AlternativePerformances(bid, {})
        for crit in bp.performances.keys():
            direction = self.criteria[crit].direction
            bperf = bp.performances[crit] * direction
            vperf = vp.performances[crit] * direction
            ap.performances[crit] = min(bperf, vperf) * direction

        return ap

    def get_veto_profile_lower_limit(self, bid):
        index = self.profiles.index(bid)
        if index == 0:
            return None

        return self.vpt[self.profiles[index - 1]]

    def to_xmcda(self):
        root = super(MRSort, self).to_xmcda()

        for obj in ['veto', 'veto_weights']:
            mcda = getattr(self, obj)
            if mcda is None:
                continue

            mcda.id = obj
            xmcda = mcda.to_xmcda()
            root.append(xmcda)

        if self.veto_lbda:
            mparams = ElementTree.SubElement(root, 'methodParameters')
            param = ElementTree.SubElement(mparams, 'parameter')
            value = ElementTree.SubElement(param, 'value')
            value.set('id', 'veto_lbda')
            lbda = marshal(self.veto_lbda)
            value.append(lbda)

        return root

    def from_xmcda(self, xmcda, id=None):
        super(MRSort, self).from_xmcda(xmcda, id)

        xmcda = find_xmcda_tag(xmcda, 'ElectreTri', id)
        value = xmcda.find(
            ".//methodParameters/parameter/value[@id='veto_lbda']")
        if value is not None:
            self.veto_lbda = unmarshal(value.getchildren()[0])

        if xmcda.find(".//criteriaValues[@id='veto_weights']") is not None:
            setattr(self, 'veto_weights',
                    CriteriaValues().from_xmcda(xmcda, 'veto_weights'))
        if xmcda.find(".//performanceTable[@id='veto']") is not None:
            setattr(self, 'veto', PerformanceTable().from_xmcda(xmcda, 'veto'))

        return self
def generate_random_profiles(alts,
                             crits,
                             seed=None,
                             k=3,
                             worst=None,
                             best=None,
                             prof_threshold=0.05,
                             fixed_profc1=None):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)
    #print(alts)
    pt = PerformanceTable()
    for c in crits:
        rdom = []

        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp

            if fixed_profc1 is not None and c.id == "c1":
                #rdom.append(0.5)
                rdom.append(
                    round(
                        random.uniform(0.5 - fixed_profc1, 0.5 + fixed_profc1),
                        k))
                #print("rdom ",rdom)
            else:
                if c.direction == 2 or c.direction == -2:
                    #print(c,crits)
                    #if criteria are known
                    b_sp = tuple(
                        sorted([
                            round(
                                random.uniform(max(minp, prof_threshold),
                                               min(1 - prof_threshold, maxp)),
                                k),
                            round(
                                random.uniform(max(minp, prof_threshold),
                                               min(1 - prof_threshold, maxp)),
                                k)
                        ]))
                    #b_sp = (round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),1)
                    rdom.append(b_sp)
                    #the following assumes that criteria types are not known, but all fixed under SP construction
                    # if c.id == [c.id for c in crits][-1]:
                    #     b_sp =tuple(sorted([round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k)]))
                    #     rdom.append(b_sp)
                    # else:
                    #     b_sp = (round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),1)
                    #     rdom.append(b_sp)
                    #print(rdom)
                else:
                    rdom.append(
                        round(
                            random.uniform(max(minp, prof_threshold),
                                           min(1 - prof_threshold, maxp)), k))
                    #rdom.append((round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k),1))
            #rdom.append(round(random.uniform(max(minp,prof_threshold), min(1-prof_threshold,maxp)), k))

        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()
        #rdom.sort(reverse = True)

        crit_random[c.id] = rdom

    # if n==2:
    #     import pdb; pdb.set_trace()
    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)
        # if n==2:
        #     import pdb; pdb.set_trace()

    return pt
def generate_random_profiles_msjp_sp(alts,
                                     crits,
                                     seed=None,
                                     k=3,
                                     worst=None,
                                     best=None,
                                     fct_percentile=[],
                                     nb_unk_criteria=0):
    if seed is not None:
        random.seed(seed)

    if worst is None:
        worst = generate_worst_ap(crits)
    if best is None:
        best = generate_best_ap(crits)

    crit_random = {}
    n = len(alts)  # here it represents profiles
    pt = PerformanceTable()
    random.seed(seed)
    for c in crits:
        rdom = []
        random.seed(seed)
        for i in range(n):
            minp = worst.performances[c.id]
            maxp = best.performances[c.id]
            if minp > maxp:
                minp, maxp = maxp, minp
            # if (c.id[-1] != "d") and (int(c.id[1:]) <= nb_unk_criteria):
            #     rdom.append(round(random.uniform(max(minp,fct_percentile[int(c.id[1:])-1][0]), min(maxp,fct_percentile[int(c.id[1:])-1][1])), k))
            # else:
            #     rdom.append(round(random.uniform(minp,maxp),k))
            if c.direction == 2 or c.direction == -2:
                b_sp = tuple(
                    sorted([
                        round(random.uniform(minp, maxp), k),
                        round(random.uniform(minp, maxp), k)
                    ]))
                #we assume to know temporarily the value of the bottom
                #b_sp = (0.4,round(random.uniform(0.4,maxp), k))
                #b_sp = (round(random.uniform(0,0.6), k),0.6)
                rdom.append(b_sp)
            else:
                # if c.id == "c1":
                #     rdom.append(0.3)
                # elif c.id == "c2" or c.id == "c3":
                #     rdom.append(0.8)
                # else:
                rdom.append(round(random.uniform(minp, maxp), k))

        #For the moment this test below is useless as long as we have only 2 categories  (neeed to be may be review)
        if c.direction == -1:
            rdom.sort(reverse=True)
        else:
            rdom.sort()
        #import pdb; pdb.set_trace()

        crit_random[c.id] = rdom

    #import pdb; pdb.set_trace()
    for i, a in enumerate(alts):
        perfs = {}
        for c in crits:
            perfs[c.id] = crit_random[c.id][i]
        ap = AlternativePerformances(a, perfs)
        pt.append(ap)
    #import pdb; pdb.set_trace()
    return pt