Пример #1
0
def generate_binary_performance_table_and_assignments(criteria,
                                                      categories, fmins):
    cids = list(criteria.keys())
    cats = categories.get_ordered_categories()
    aa = AlternativesAssignments()
    pt = PerformanceTable()
    i = 1
    for coalition in powerset(cids):
        if set(coalition) == set({}) or set(coalition) == set(cids):
            continue

        aid = "a%d" % i
        ap = AlternativePerformances(aid,
                                     OrderedDict({c: 1 if c in coalition else 0
                                                  for c in cids}))
        pt.append(ap)

        cat = cats[0]
        for fmin in fmins:
            if fmin.issubset(set(coalition)) is True:
                cat = cats[1]
                break

        aa.append(AlternativeAssignment(aid, cat))

        i += 1

    return pt, aa
Пример #2
0
def compute_assignments_majority(models_ca, pt):
    models =  sorted(models_ca, key = lambda k: models_ca[k],
                      reverse = True)

    models_aa = {}
    for model in models[:len(models)/2]:
        models_aa[model] = model.get_assignments(pt)

    aa = AlternativesAssignments()
    for ap in pt:
        aid = ap.id
        cat_scores = {}
        for model in models[:int(math.ceil(len(models) / 2))]:
            cat = models_aa[model][aid].category_id
            if cat in cat_scores:
                cat_scores[cat] += models_ca[model]
            else:
                cat_scores[cat] = models_ca[model]

        b = AlternativeAssignment(aid,
                                  max(cat_scores,
                                      key = lambda cat: cat_scores[cat]))
        aa.append(b)

    return aa
Пример #3
0
def discard_alternatives_in_category(aa, category):
    out = AlternativesAssignments()

    for a in aa:
        if a.category_id != category:
            out.append(a)

    return out
Пример #4
0
def discard_undersorted_alternatives(categories, aa, aa2):
    out = AlternativesAssignments()
    cat_order = {cat: i + 1 for i, cat in enumerate(categories)}
    for a in aa:
        aid = a.id
        acat = cat_order[a.category_id]
        a2 = aa2[aid]
        a2cat = cat_order[a2.category_id]
        if a2cat >= acat:
            out.append(a)

    return out
Пример #5
0
def add_errors_in_assignments(aa, category_ids, errors_pc):
    n = int(len(aa)*errors_pc)
    aa_erroned = random.sample(list(aa), n)

    l = AlternativesAssignments([])
    for a in aa_erroned:
        cat = a.category_id
        new_cat = a.category_id
        while new_cat == cat:
            new_cat = random.sample(category_ids, 1)[0]
        a.category_id = new_cat
        l.append(a)

    return l
Пример #6
0
def add_errors_in_assignments_proba(aa, category_ids, proba):
    l = AlternativesAssignments([])

    for a in aa:
        r = random.random()
        if r <= proba:
            cat = a.category_id
            new_cat = a.category_id
            while new_cat == cat:
                new_cat = random.sample(category_ids, 1)[0]
            a.category_id = new_cat

            l.append(a)

    return l
Пример #7
0
    def optimist(self, pt):
        self.__check_input_params()
        profiles = self.profiles
        assignments = AlternativesAssignments([])
        for action_perfs in pt:
            cat_rank = 0
            for i, profile in enumerate(profiles):
                outr = self.__outrank(action_perfs, self.criteria,
                                      self.bpt[profile], self.lbda)
                if outr != "-":
                    cat_rank += 1

            cat_id = self.categories[cat_rank]
            id = action_perfs.id
            alt_affect = AlternativeAssignment(id, cat_id)
            assignments.append(alt_affect)

        return assignments
Пример #8
0
    def pessimist(self, pt):
        self.__check_input_params()
        profiles = self.profiles[:]
        profiles.reverse()
        assignments = AlternativesAssignments([])
        for action_perfs in pt:
            cat_rank = len(profiles)
            for i, profile in enumerate(profiles):
                s_ab = self.credibility(action_perfs, self.bpt[profile],
                                        profile)
                if not eq(s_ab, self.lbda) and s_ab < self.lbda:
                    cat_rank -= 1

            cat_id = self.categories[cat_rank]
            id = action_perfs.id
            alt_affect = AlternativeAssignment(id, cat_id)
            assignments.append(alt_affect)

        return assignments
    def pessimist(self, pt):
        self.__check_input_params()
        profiles = self.profiles[:]
        profiles.reverse()
        assignments = AlternativesAssignments([])
        for action_perfs in pt:
            cat_rank = len(profiles)
            for i, profile in enumerate(profiles):
                s_ab = self.credibility(action_perfs, self.bpt[profile],
                                        profile)
                if not eq(s_ab, self.lbda) and s_ab < self.lbda:
                    cat_rank -= 1

            cat_id = self.categories[cat_rank]
            id = action_perfs.id
            alt_affect = AlternativeAssignment(id, cat_id)
            assignments.append(alt_affect)

        return assignments
Пример #10
0
    def build_assignments_table(self):
        self.good = 0
        self.aa = AlternativesAssignments()
        for aa in self.aa_ori.values():
            aid = aa.id
            cat = self.get_alternative_assignment(aid)
            self.aa.append(AlternativeAssignment(aid, cat))

            cat_ori = aa.category_id
            if cat == cat_ori:
                self.good += 1
def compute_assignments_majority(models_ca, pt):
    models = sorted(models_ca, key=lambda k: models_ca[k], reverse=True)

    models_aa = {}
    for model in models[:len(models) / 2]:
        models_aa[model] = model.get_assignments(pt)

    aa = AlternativesAssignments()
    for ap in pt:
        aid = ap.id
        cat_scores = {}
        for model in models[:int(math.ceil(len(models) / 2))]:
            cat = models_aa[model][aid].category_id
            if cat in cat_scores:
                cat_scores[cat] += models_ca[model]
            else:
                cat_scores[cat] = models_ca[model]

        b = AlternativeAssignment(
            aid, max(cat_scores, key=lambda cat: cat_scores[cat]))
        aa.append(b)

    return aa
Пример #12
0
directory='%s/test-veto2' % (DATADIR)

for f in sys.argv[1:]:
    fname = os.path.splitext(os.path.basename(f))[0]

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root,
                                                       'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root,
                                                  'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

#    # Remove alternatives that cannot be corrected with a veto rule
#    aa_learning_m2p = discard_undersorted_alternatives(m.categories,
#                                                      aa_learning,
#                                                      aa_learning_m2)
#    aa_learning_m2p = discard_alternatives_in_category(aa_learning_m2p,
#                                                      m.categories[0])

    # Run the metaheuristic
    meta = MetaMRSortVCPop3(10, m, SortedPerformanceTable(pt_learning),
class MetaMRSortProfilesChoquet():
    def __init__(self, model, pt_sorted, aa_ori):
        self.na = len(aa_ori)
        self.nc = len(model.criteria)
        self.model = model
        self.nprofiles = len(model.profiles)
        self.pt_sorted = pt_sorted
        self.aa_ori = aa_ori
        self.cat = self.categories_rank()
        self.cat_ranked = self.model.categories
        self.aa_by_cat = self.sort_alternative_by_category(aa_ori)
        self.b0 = pt_sorted.pt.get_worst(model.criteria)
        self.bp = pt_sorted.pt.get_best(model.criteria)
        self.rebuild_tables()

    def categories_rank(self):
        return {cat: i + 1 for i, cat in enumerate(self.model.categories)}

    def sort_alternative_by_category(self, aa):
        aa_by_cat = {}
        for a in aa:
            aid = a.id
            cat = self.cat[a.category_id]
            if cat in aa_by_cat:
                aa_by_cat[cat].append(aid)
            else:
                aa_by_cat[cat] = [aid]
        return aa_by_cat

    def compute_above_histogram(self, cid, perf_profile, perf_above, cat_b,
                                cat_a, ct):
        lbda = self.model.lbda
        direction = self.model.criteria[cid].direction
        delta = 0.00001 * direction

        h_above = {}
        num = total = 0
        alts, perfs = self.pt_sorted.get_middle(cid, perf_profile, perf_above,
                                                True, True)

        for i, a in enumerate(alts):
            if (perfs[i] + delta) * direction > perf_above * direction:
                continue

            aa_ori = self.aa_ori._d[a].category_id
            aa = self.aa._d[a].category_id
            diff = self.mobius[frozenset(ct[a] | set([cid]))]
            if aa_ori == cat_a:
                if aa == cat_a and diff < lbda:
                    # --
                    total += 5
                elif aa == cat_b:
                    # -
                    total += 1
            elif aa_ori == cat_b and aa == cat_a:
                if diff >= lbda:
                    # +
                    num += 0.5
                    total += 1
                    h_above[perfs[i] + delta] = num / total
                else:
                    # ++
                    num += 2
                    total += 1
                    h_above[perfs[i] + delta] = num / total
#            elif self.aa_ori(a) < self.aa(a) and \
            elif aa_ori != aa and \
                 self.cat[aa] < self.cat[cat_a] and \
                 self.cat[aa_ori] < self.cat[cat_a]:
                num += 0.1
                total += 1
                h_above[perfs[i] + delta] = num / total

        return h_above

    def compute_below_histogram(self, cid, perf_profile, perf_below, cat_b,
                                cat_a, ct):
        lbda = self.model.lbda

        h_below = {}
        num = total = 0
        alts, perfs = self.pt_sorted.get_middle(cid, perf_profile, perf_below,
                                                True, True)

        for i, a in enumerate(alts):
            aa_ori = self.aa_ori._d[a].category_id
            aa = self.aa._d[a].category_id
            diff = self.mobius[frozenset(ct[a] | set([cid]))]
            if aa_ori == cat_a and aa == cat_b:
                if diff >= lbda:
                    # ++
                    num += 2
                    total += 1
                    h_below[perfs[i]] = num / total
                else:
                    # +
                    num += 0.5
                    total += 1
                    h_below[perfs[i]] = num / total
            elif aa_ori == cat_b:
                if aa == cat_b and diff >= lbda:
                    # --
                    total += 5
                elif aa == cat_a:
                    # -
                    total += 1


#            elif self.aa_ori(a) > self.aa(a) and \
            elif aa_ori != aa and \
                 self.cat[aa] > self.cat[cat_b] and \
                 self.cat[aa_ori] > self.cat[cat_b]:
                num += 0.1
                total += 1
                h_below[perfs[i]] = num / total

        return h_below

    def histogram_choose(self, h, current):
        key = random.choice(h.keys())
        val = h[key]
        diff = abs(current - key)
        for k, v in h.items():
            if v >= val:
                tmp = abs(current - k)
                if tmp > diff:
                    key = k
                    val = v
                    diff = tmp
        return key

    def get_alternative_assignment(self, aid):
        profile = self.model.profiles[0]
        for profile in reversed(self.model.profiles):
            if self.mobius[self.ct[profile][aid]] >= self.model.lbda:
                return self.model.categories_profiles[profile].value.upper

        return self.model.categories_profiles[profile].value.lower

    def build_assignments_table(self):
        self.good = 0
        self.aa = AlternativesAssignments()
        for aa in self.aa_ori.values():
            aid = aa.id
            cat = self.get_alternative_assignment(aid)
            self.aa.append(AlternativeAssignment(aid, cat))

            cat_ori = aa.category_id
            if cat == cat_ori:
                self.good += 1

    def build_concordance_table(self):
        self.ct = {bp.id: dict() for bp in self.model.bpt}
        for aid, bp in product(self.aa_ori.keys(), self.model.bpt):
            ap = self.pt_sorted[aid]
            conc = self.model.concordance(ap, bp)
            self.ct[bp.id][aid] = conc

    def build_coalition_table(self):
        self.ct = {bp.id: dict() for bp in self.model.bpt}
        for aid, bp in product(self.aa_ori.keys(), self.model.bpt):
            ap = self.pt_sorted[aid]
            criteria_coalition = self.model.criteria_coalition(ap, bp)
            self.ct[bp.id][aid] = frozenset(criteria_coalition)

    def build_mobius_table(self):
        cids = self.model.criteria.keys()
        self.mobius = {frozenset([]): 0}
        for i in range(len(cids)):
            for c in combinations(cids, i + 1):
                s = frozenset(c)
                wsum = self.model.coalition_weight(s)
                self.mobius[s] = wsum

    def rebuild_tables(self):
        self.build_mobius_table()
        self.build_coalition_table()
        self.build_assignments_table()

    def update_tables(self, profile, cid, old, new):
        direction = self.model.criteria[cid].direction
        if old > new:
            if direction == 1:
                down, up = True, False
            else:
                down, up = False, True
            add = True
        else:
            if direction == 1:
                down, up = False, True
            else:
                down, up = True, False
            add = False

        alts, perfs = self.pt_sorted.get_middle(cid, old, new, up, down)

        for a in alts:
            self.ct[profile][a] = set(self.ct[profile][a])
            if add is True:
                self.ct[profile][a].add(cid)
            else:
                self.ct[profile][a].discard(cid)
            self.ct[profile][a] = frozenset(self.ct[profile][a])

            old_cat = self.aa[a].category_id
            new_cat = self.get_alternative_assignment(a)
            ori_cat = self.aa_ori[a].category_id

            if old_cat == new_cat:
                continue
            elif old_cat == ori_cat:
                self.good -= 1
            elif new_cat == ori_cat:
                self.good += 1

            self.aa[a].category_id = new_cat

    def optimize_profile(self, profile, below, above, cat_b, cat_a):
        cids = self.model.criteria.keys()
        random.shuffle(cids)

        for cid in cids:
            ct = self.ct[profile.id]

            perf_profile = profile.performances[cid]
            perf_above = above.performances[cid]
            perf_below = below.performances[cid]

            h_below = self.compute_below_histogram(cid, perf_profile,
                                                   perf_below, cat_b, cat_a,
                                                   ct)
            h_above = self.compute_above_histogram(cid, perf_profile,
                                                   perf_above, cat_b, cat_a,
                                                   ct)
            h = h_below
            h.update(h_above)

            if not h:
                continue

            i = self.histogram_choose(h, perf_profile)

            r = random.uniform(0, 1)

            if r <= h[i]:
                self.update_tables(profile.id, cid, perf_profile, i)
                profile.performances[cid] = i

    def get_below_and_above_profiles(self, i):
        profiles = self.model.profiles
        bpt = self.model.bpt

        if i == 0:
            below = self.b0
        else:
            below = bpt[profiles[i - 1]]

        if i == self.nprofiles - 1:
            above = self.bp
        else:
            above = bpt[profiles[i + 1]]

        return below, above

    def optimize(self):
        profiles = self.model.profiles
        for i, profile in enumerate(profiles):
            pperfs = self.model.bpt[profile]
            below, above = self.get_below_and_above_profiles(i)
            cat_b, cat_a = self.cat_ranked[i], self.cat_ranked[i + 1]
            self.optimize_profile(pperfs, below, above, cat_b, cat_a)

        return self.good / self.na
Пример #14
0
    # Original Electre Tri model
    model = generate_random_mrsort_model(10, 5, 890)

    # Generate random alternatives
    a = generate_alternatives(15000)
    pt = generate_random_performance_table(a, model.criteria)

    errors = 0.0
    delta = 0.0001
    nlearn = 1.00

    # Assign the alternative with the model
    aa = model.pessimist(pt)

    a_learn = random.sample(a, int(nlearn * len(a)))
    aa_learn = AlternativesAssignments([aa[alt.id] for alt in a_learn])
    pt_learn = PerformanceTable([pt[alt.id] for alt in a_learn])

    aa_err = aa_learn.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories, errors)

    print('Original model')
    print('==============')
    print("Number of alternatives: %d" % len(a))
    print("Number of learning alternatives: %d" % len(aa_learn))
    print("Errors in alternatives assignments: %g%%" % (errors * 100))
    cids = model.criteria.keys()
    model.bpt.display(criterion_ids=cids)
    model.cv.display(criterion_ids=cids)
    print("lambda\t%.7s" % model.lbda)
    print("delta: %g" % delta)
    def test001(self):
        c = generate_criteria(5)
        w1 = CriterionValue('c1', 0.2)
        w2 = CriterionValue('c2', 0.2)
        w3 = CriterionValue('c3', 0.2)
        w4 = CriterionValue('c4', 0.2)
        w5 = CriterionValue('c5', 0.2)
        w = CriteriaValues([w1, w2, w3, w4, w5])

        b1 = AlternativePerformances('b1', {
            'c1': 10,
            'c2': 10,
            'c3': 10,
            'c4': 10,
            'c5': 10
        })
        bpt = PerformanceTable([b1])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        vb1 = AlternativePerformances('b1', {
            'c1': 2,
            'c2': 2,
            'c3': 2,
            'c4': 2,
            'c5': 2
        }, 'b1')
        v = PerformanceTable([vb1])
        vw = w.copy()

        a1 = AlternativePerformances('a1', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a2 = AlternativePerformances('a2', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a3 = AlternativePerformances('a3', {
            'c1': 9,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a4 = AlternativePerformances('a4', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a5 = AlternativePerformances('a5', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a6 = AlternativePerformances('a6', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a7 = AlternativePerformances('a7', {
            'c1': 9,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a8 = AlternativePerformances('a8', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a9 = AlternativePerformances('a9', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a10 = AlternativePerformances('a10', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a11 = AlternativePerformances('a11', {
            'c1': 9,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a12 = AlternativePerformances('a12', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a13 = AlternativePerformances('a13', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a14 = AlternativePerformances('a14', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a15 = AlternativePerformances('a15', {
            'c1': 9,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a16 = AlternativePerformances('a16', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a17 = AlternativePerformances('a17', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a18 = AlternativePerformances('a18', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a19 = AlternativePerformances('a19', {
            'c1': 11,
            'c2': 9,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a20 = AlternativePerformances('a20', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a21 = AlternativePerformances('a21', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a22 = AlternativePerformances('a22', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a23 = AlternativePerformances('a23', {
            'c1': 11,
            'c2': 9,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a24 = AlternativePerformances('a24', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 9
        })
        a25 = AlternativePerformances('a25', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 9,
            'c5': 11
        })
        a26 = AlternativePerformances('a26', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 9
        })
        a27 = AlternativePerformances('a27', {
            'c1': 11,
            'c2': 11,
            'c3': 9,
            'c4': 11,
            'c5': 11
        })
        a28 = AlternativePerformances('a28', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 9
        })
        a29 = AlternativePerformances('a29', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 9,
            'c5': 11
        })
        a30 = AlternativePerformances('a30', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 9
        })
        a31 = AlternativePerformances('a31', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a32 = AlternativePerformances('a32', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a33 = AlternativePerformances('a33', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a34 = AlternativePerformances('a34', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a35 = AlternativePerformances('a35', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        a36 = AlternativePerformances('a36', {
            'c1': 11,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 7
        })
        a37 = AlternativePerformances('a37', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 7
        })
        a38 = AlternativePerformances('a38', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a39 = AlternativePerformances('a39', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 11,
            'c5': 7
        })
        a40 = AlternativePerformances('a40', {
            'c1': 11,
            'c2': 11,
            'c3': 7,
            'c4': 7,
            'c5': 11
        })
        a41 = AlternativePerformances('a41', {
            'c1': 11,
            'c2': 7,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a42 = AlternativePerformances('a42', {
            'c1': 7,
            'c2': 11,
            'c3': 11,
            'c4': 7,
            'c5': 11
        })
        a43 = AlternativePerformances('a43', {
            'c1': 11,
            'c2': 7,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a44 = AlternativePerformances('a44', {
            'c1': 7,
            'c2': 11,
            'c3': 7,
            'c4': 11,
            'c5': 11
        })
        a45 = AlternativePerformances('a45', {
            'c1': 7,
            'c2': 7,
            'c3': 11,
            'c4': 11,
            'c5': 11
        })
        pt = PerformanceTable([
            a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
            a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28,
            a29, a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, a40, a41,
            a42, a43, a44, a45
        ])

        ap1 = AlternativeAssignment('a1', 'cat2')
        ap2 = AlternativeAssignment('a2', 'cat2')
        ap3 = AlternativeAssignment('a3', 'cat2')
        ap4 = AlternativeAssignment('a4', 'cat2')
        ap5 = AlternativeAssignment('a5', 'cat2')
        ap6 = AlternativeAssignment('a6', 'cat2')
        ap7 = AlternativeAssignment('a7', 'cat1')
        ap8 = AlternativeAssignment('a8', 'cat2')
        ap9 = AlternativeAssignment('a9', 'cat2')
        ap10 = AlternativeAssignment('a10', 'cat2')
        ap11 = AlternativeAssignment('a11', 'cat1')
        ap12 = AlternativeAssignment('a12', 'cat2')
        ap13 = AlternativeAssignment('a13', 'cat1')
        ap14 = AlternativeAssignment('a14', 'cat1')
        ap15 = AlternativeAssignment('a15', 'cat1')
        ap16 = AlternativeAssignment('a16', 'cat2')
        ap17 = AlternativeAssignment('a17', 'cat2')
        ap18 = AlternativeAssignment('a18', 'cat2')
        ap19 = AlternativeAssignment('a19', 'cat1')
        ap20 = AlternativeAssignment('a20', 'cat2')
        ap21 = AlternativeAssignment('a21', 'cat1')
        ap22 = AlternativeAssignment('a22', 'cat1')
        ap23 = AlternativeAssignment('a23', 'cat1')
        ap24 = AlternativeAssignment('a24', 'cat2')
        ap25 = AlternativeAssignment('a25', 'cat1')
        ap26 = AlternativeAssignment('a26', 'cat1')
        ap27 = AlternativeAssignment('a27', 'cat1')
        ap28 = AlternativeAssignment('a28', 'cat1')
        ap29 = AlternativeAssignment('a29', 'cat1')
        ap30 = AlternativeAssignment('a30', 'cat1')
        ap31 = AlternativeAssignment('a31', 'cat1')
        ap32 = AlternativeAssignment('a32', 'cat1')
        ap33 = AlternativeAssignment('a33', 'cat1')
        ap34 = AlternativeAssignment('a34', 'cat1')
        ap35 = AlternativeAssignment('a35', 'cat1')
        ap36 = AlternativeAssignment('a36', 'cat2')
        ap37 = AlternativeAssignment('a37', 'cat2')
        ap38 = AlternativeAssignment('a38', 'cat2')
        ap39 = AlternativeAssignment('a39', 'cat2')
        ap40 = AlternativeAssignment('a40', 'cat2')
        ap41 = AlternativeAssignment('a41', 'cat2')
        ap42 = AlternativeAssignment('a42', 'cat2')
        ap43 = AlternativeAssignment('a43', 'cat2')
        ap44 = AlternativeAssignment('a44', 'cat2')
        ap45 = AlternativeAssignment('a45', 'cat2')
        aa = AlternativesAssignments([
            ap1, ap2, ap3, ap4, ap5, ap6, ap7, ap8, ap9, ap10, ap11, ap12,
            ap13, ap14, ap15, ap16, ap17, ap18, ap19, ap20, ap21, ap22, ap23,
            ap24, ap25, ap26, ap27, ap28, ap29, ap30, ap31, ap32, ap33, ap34,
            ap35, ap36, ap37, ap38, ap39, ap40, ap41, ap42, ap43, ap44, ap45
        ])

        model = MRSort(c, w, bpt, 0.6, cps, v, vw, 0.4)
        aa2 = model.pessimist(pt)
        ok = compare_assignments(aa, aa2)
        self.assertEqual(ok, 1, "One or more alternatives were wrongly "
                         "assigned")
Пример #16
0
class MetaMRSortProfiles3():

    def __init__(self, model, pt_sorted, aa_ori):
        self.na = len(aa_ori)
        self.model = model
        self.nprofiles = len(model.profiles)
        self.pt_sorted = pt_sorted
        self.aa_ori = aa_ori
        self.cat = self.categories_rank()
        self.cat_ranked = self.model.categories
        self.aa_by_cat = self.sort_alternative_by_category(aa_ori)
        self.b0 = pt_sorted.pt.get_worst(model.criteria)
        self.bp = pt_sorted.pt.get_best(model.criteria)
        self.compute_interval_ratios(3)
        self.build_concordance_table()
        self.build_assignments_table()

    def categories_rank(self):
        return { cat: i + 1 for i, cat in enumerate(self.model.categories) }

    def compute_interval_ratios(self, n):
        self.nintervals = n
        intervals = []
        for i in range(n-1):
            intervals += [ math.exp(i+1) ]
        s = sum(intervals)
        self.interval_ratios = [ i/s for i in intervals ] + [ 0.9 ]

    def update_intervals(self, fitness):
        if fitness > 0.99:
            self.compute_interval_ratios(8)
        elif fitness > 0.95:
            self.compute_interval_ratios(6)
        elif fitness > 0.9:
            self.compute_interval_ratios(5)
        else:
            self.compute_interval_ratios(4)

    def rank_categories(self, cat):
        c_rank = { c.id: c.rank for c in cat }
        return sorted([cat for cat  in c_rank.iterkeys()])

    def sort_alternative_by_category(self, aa):
        aa_by_cat = {}
        for a in aa:
            aid = a.id
            cat = self.cat[a.category_id]
            if cat in aa_by_cat:
                aa_by_cat[cat].append(aid)
            else:
                aa_by_cat[cat] = [ aid ]
        return aa_by_cat

    def get_alternative_assignment(self, aid):
        profile = self.model.profiles[0]
        for profile in reversed(self.model.profiles):
            if self.ct[profile][aid] >= self.model.lbda:
                return self.model.categories_profiles[profile].value.upper

        return self.model.categories_profiles[profile].value.lower

    def build_assignments_table(self):
        self.good = 0
        self.aa = AlternativesAssignments()
        for aa in self.aa_ori.values():
            aid = aa.id
            cat = self.get_alternative_assignment(aid)
            self.aa.append(AlternativeAssignment(aid, cat))

            cat_ori = aa.category_id
            if cat == cat_ori:
                self.good += 1

    def build_concordance_table(self):
        self.ct = { bp.id: dict() for bp in self.model.bpt }
        for aid, bp in product(self.aa_ori.keys(), self.model.bpt):
            ap = self.pt_sorted[aid]
            conc = self.model.concordance(ap, bp)
            self.ct[bp.id][aid] = conc

    def rebuild_tables(self):
        self.build_concordance_table()
        self.build_assignments_table()

    def compute_above_histogram(self, cid, profile, above, cat_b, cat_a):
        h_above = {}
        size = above - profile
        intervals = [ profile + self.interval_ratios[i]*size \
                      for i in range(self.nintervals) ]
        intervals = [ profile ] + intervals
        ok = nok = 0
        for i in range(self.nintervals):
            alts = self.pt_sorted.get_middle(cid, intervals[i],
                                             intervals[i+1])[0]
            for a in alts:
                if self.aa(a) == cat_b and self.aa_ori(a) == cat_a:
                    ok += 1
                elif self.aa(a) == cat_a:
                    if self.aa_ori(a) == cat_a:
                        ok += 1
                    elif self.aa_ori(a) == cat_b:
                        nok += 1

            if (ok + nok) > 0:
                h_above[intervals[i+1]] = nok / (ok + nok)
            else:
                h_above[intervals[i+1]] = 0

        return h_above

    def compute_below_histogram(self, cid, profile, below, cat_b, cat_a):
        h_below = {}
        size = profile - below
        intervals = [ profile - self.interval_ratios[i]*size \
                      for i in range(self.nintervals) ]
        intervals = [ profile ] + intervals
        ok = nok = 0
        for i in range(self.nintervals):
            alts = self.pt_sorted.get_middle(cid, intervals[i],
                                             intervals[i+1])[0]
            for a in alts:
                if self.aa(a) == cat_a and self.aa_ori(a) == cat_b:
                    ok += 1
                elif self.aa(a) == cat_b:
                    if self.aa_ori(a) == cat_b:
                        ok += 1
                    elif self.aa_ori(a) == cat_a:
                        nok += 1

            if (ok + nok) > 0:
                h_below[intervals[i+1]] = nok / (ok + nok)
            else:
                h_below[intervals[i+1]] = 0

        return h_below

    def optimize_profile(self, profile, below, above, cat_b, cat_a):
        p_perfs = profile.performances
        a_perfs = above.performances
        b_perfs = below.performances

        moved = False
        max_val = 0

        for c in self.model.criteria:
            cid = c.id
            h_below = self.compute_below_histogram(cid, p_perfs[cid],
                                                   b_perfs[cid], cat_b,
                                                   cat_a)
            h_above = self.compute_above_histogram(cid, p_perfs[cid],
                                                   a_perfs[cid], cat_b,
                                                   cat_a)

            i_b = max(h_below, key=h_below.get)
            i_a = max(h_above, key=h_above.get)
            r = random.random()

            if h_below[i_b] > h_above[i_a]:
                if r < h_below[i_b]:
                    p_perfs[cid] = i_b
                    moved = True
                elif moved is False and h_below[i_b] > max_val:
                    max_val = h_below[i_b]
                    max_cid = cid
                    max_move = i_b
            elif h_below[i_b] < h_above[i_a]:
                if r < h_above[i_a]:
                    p_perfs[cid] = i_a
                    moved = True
                elif moved is False and h_above[i_a] > max_val:
                    max_val = h_above[i_a]
                    max_cid = cid
                    max_move = i_a
            elif r > 0.5:
                r2 = random.random()
                if r2 < h_below[i_b]:
                    p_perfs[cid] = i_b
                    moved = True
                elif moved is False and h_below[i_b] > max_val:
                    max_val = h_below[i_b]
                    max_cid = cid
                    max_move = i_b
            elif r < 0.5:
                r2 = random.random()
                if r2 < h_above[i_a]:
                    p_perfs[cid] = i_a
                    moved = True
                elif moved is False and h_above[i_a] > max_val:
                    max_val = h_above[i_a]
                    max_cid = cid
                    max_move = i_a

        if moved is False and max_val > 0:
            p_perfs[max_cid] = max_move

    def get_below_and_above_profiles(self, i):
        profiles = self.model.profiles
        bpt = self.model.bpt

        if i == 0:
            below = self.b0
        else:
            below = bpt[profiles[i-1]]

        if i == self.nprofiles-1:
            above = self.bp
        else:
            above = bpt[profiles[i+1]]

        return below, above

    def optimize(self):
        profiles = self.model.profiles
        for i, profile in enumerate(profiles):
            pperfs = self.model.bpt[profile]
            below, above = self.get_below_and_above_profiles(i)
            cat_b, cat_a = self.cat_ranked[i], self.cat_ranked[i+1]
            self.update_intervals(self.good / self.na)
            self.optimize_profile(pperfs, below, above, cat_b, cat_a)

        self.rebuild_tables()

        return self.good / self.na
Пример #17
0
 def get_assignments(self, pt):
     aa = AlternativesAssignments()
     for ap in pt:
         a = self.get_assignment(ap)
         aa.append(a)
     return aa
    # Generate assignment incompatible with an MR-Sort model
    ap1 = AlternativePerformances('a1', {'c1': 1, 'c2': 1, 'c3': 0, 'c4': 0})
    ap2 = AlternativePerformances('a2', {'c1': 0, 'c2': 0, 'c3': 1, 'c4': 1})
    ap3 = AlternativePerformances('a3', {'c1': 1, 'c2': 0, 'c3': 1, 'c4': 0})
    ap4 = AlternativePerformances('a4', {'c1': 1, 'c2': 0, 'c3': 0, 'c4': 1})
    ap5 = AlternativePerformances('a5', {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0})
    ap6 = AlternativePerformances('a6', {'c1': 0, 'c2': 1, 'c3': 0, 'c4': 1})
    pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6])

    aa1 = AlternativeAssignment('a1', 'cat1')
    aa2 = AlternativeAssignment('a2', 'cat1')
    aa3 = AlternativeAssignment('a3', 'cat2')
    aa4 = AlternativeAssignment('a4', 'cat2')
    aa5 = AlternativeAssignment('a5', 'cat2')
    aa6 = AlternativeAssignment('a6', 'cat2')
    aa = AlternativesAssignments([aa1, aa2, aa3, aa4, aa5, aa6])
    print_pt_and_assignments(aa.keys(), c.keys(), [aa], pt)

    model = MRSort(c, None, None, None, cps)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)

    # Run the MIP
    mip = MipCMRSort(model, pt, aa)
    mip.solve()

    # Display learned model parameters
    print('Learned model')
    print('=============')
    model.bpt.display()
Пример #19
0
    def get_assignments(self, pt):
        aas = AlternativesAssignments([])
        for ap in pt:
            aas.append(self.get_assignment(ap))

        return aas
Пример #20
0
class MetaMRSortProfiles4():

    def __init__(self, model, pt_sorted, aa_ori):
        self.na = len(aa_ori)
        self.nc = len(model.criteria)
        self.model = model
        self.nprofiles = len(model.profiles)
        self.pt_sorted = pt_sorted
        self.aa_ori = aa_ori
        self.cat = self.categories_rank()
        self.cat_ranked = self.model.categories
        self.aa_by_cat = self.sort_alternative_by_category(aa_ori)
        self.b0 = pt_sorted.pt.get_worst(model.criteria)
        self.bp = pt_sorted.pt.get_best(model.criteria)
        self.build_concordance_table()
        self.build_assignments_table()

    def categories_rank(self):
        return { cat: i + 1 for i, cat in enumerate(self.model.categories) }

    def sort_alternative_by_category(self, aa):
        aa_by_cat = {}
        for a in aa:
            aid = a.id
            cat = self.cat[a.category_id]
            if cat in aa_by_cat:
                aa_by_cat[cat].append(aid)
            else:
                aa_by_cat[cat] = [ aid ]
        return aa_by_cat

    def compute_above_histogram(self, cid, perf_profile, perf_above,
                                cat_b, cat_a, ct):
        w = self.model.cv[cid].value
        lbda = self.model.lbda
        direction = self.model.criteria[cid].direction
        delta = 0.00001 * direction

        h_above = {}
        num = total = 0
        alts, perfs = self.pt_sorted.get_middle(cid,
                                                perf_profile, perf_above,
                                                True, True)

        for i, a in enumerate(alts):
            if (perfs[i] + delta) * direction > perf_above * direction:
                continue

            conc = ct[a]
            aa_ori = self.aa_ori._d[a].category_id
            aa = self.aa._d[a].category_id
            diff = conc - w
            if aa_ori == cat_a:
                if aa == cat_a and diff < lbda:
                    # --
                    total += 5
                elif aa == cat_b:
                    # -
                    total += 1
            elif aa_ori == cat_b and aa == cat_a:
                if diff >= lbda:
                    # +
                    num += 0.5
                    total += 1
                    h_above[perfs[i] + delta] = num / total
                else:
                    # ++
                    num += 2
                    total += 1
                    h_above[perfs[i] + delta] = num / total
#            elif self.aa_ori(a) < self.aa(a) and \
            elif aa_ori != aa and \
                 self.cat[aa] < self.cat[cat_a] and \
                 self.cat[aa_ori] < self.cat[cat_a]:
                num += 0.1
                total += 1
                h_above[perfs[i] + delta] = num / total

        return h_above

    def compute_below_histogram(self, cid, perf_profile, perf_below,
                                cat_b, cat_a, ct):
        w = self.model.cv[cid].value
        lbda = self.model.lbda

        h_below = {}
        num = total = 0
        alts, perfs = self.pt_sorted.get_middle(cid,
                                                perf_profile, perf_below,
                                                True, True)

        for i, a in enumerate(alts):
            conc = ct[a]
            aa_ori = self.aa_ori._d[a].category_id
            aa = self.aa._d[a].category_id
            diff = conc + w
            if aa_ori == cat_a and aa == cat_b:
                if diff >= lbda:
                    # ++
                    num += 2
                    total += 1
                    h_below[perfs[i]] = num / total
                else:
                    # +
                    num += 0.5
                    total += 1
                    h_below[perfs[i]] = num / total
            elif aa_ori == cat_b:
                if aa == cat_b and diff >= lbda:
                    # --
                    total += 5
                elif aa == cat_a:
                    # -
                    total += 1
#            elif self.aa_ori(a) > self.aa(a) and \
            elif aa_ori != aa and \
                 self.cat[aa] > self.cat[cat_b] and \
                 self.cat[aa_ori] > self.cat[cat_b]:
                num += 0.1
                total += 1
                h_below[perfs[i]] = num / total

        return h_below

    def histogram_choose(self, h, current):
        key = random.choice(h.keys())
        val = h[key]
        diff = abs(current - key)
        for k, v in h.items():
            if v >= val:
                tmp = abs(current - k)
                if tmp > diff:
                    key = k
                    val = v
                    diff = tmp
        return key

    def get_alternative_assignment(self, aid):
        profile = self.model.profiles[0]
        for profile in reversed(self.model.profiles):
            if self.ct[profile][aid] >= self.model.lbda \
               or eq(self.ct[profile][aid], self.model.lbda):
                return self.model.categories_profiles[profile].value.upper

        return self.model.categories_profiles[profile].value.lower

    def build_assignments_table(self):
        self.good = 0
        self.aa = AlternativesAssignments()
        for aa in self.aa_ori.values():
            aid = aa.id
            cat = self.get_alternative_assignment(aid)
            self.aa.append(AlternativeAssignment(aid, cat))

            cat_ori = aa.category_id
            if cat == cat_ori:
                self.good += 1

    def build_concordance_table(self):
        self.ct = { bp.id: dict() for bp in self.model.bpt }
        for aid, bp in product(self.aa_ori.keys(), self.model.bpt):
            ap = self.pt_sorted[aid]
            conc = self.model.concordance(ap, bp)
            self.ct[bp.id][aid] = conc

    def rebuild_tables(self):
        self.build_concordance_table()
        self.build_assignments_table()

    def update_tables(self, profile, cid, old, new):
        direction = self.model.criteria[cid].direction
        if old > new:
            if direction == 1:
                down, up = True, False
            else:
                down, up = False, True
            w = self.model.cv[cid].value * direction
        else:
            if direction == 1:
                down, up = False, True
            else:
                down, up = True, False
            w = -self.model.cv[cid].value * direction

        alts, perfs = self.pt_sorted.get_middle(cid, old, new, up, down)

        for a in alts:
            self.ct[profile][a] += w
            old_cat = self.aa[a].category_id
            new_cat = self.get_alternative_assignment(a)
            ori_cat = self.aa_ori[a].category_id

            if old_cat == new_cat:
                continue
            elif old_cat == ori_cat:
                self.good -= 1
            elif new_cat == ori_cat:
                self.good += 1

            self.aa[a].category_id = new_cat

    def optimize_profile(self, profile, below, above, cat_b, cat_a):
        cids = self.model.criteria.keys()
        random.shuffle(cids)

        for cid in cids:
            ct = self.ct[profile.id]

            perf_profile = profile.performances[cid]
            perf_above = above.performances[cid]
            perf_below = below.performances[cid]

            h_below = self.compute_below_histogram(cid, perf_profile,
                                                   perf_below, cat_b,
                                                   cat_a, ct)
            h_above = self.compute_above_histogram(cid, perf_profile,
                                                   perf_above, cat_b,
                                                   cat_a, ct)
            h = h_below
            h.update(h_above)

            if not h:
                continue

            i = self.histogram_choose(h, perf_profile)

            r = random.uniform(0, 1)

            if r <= h[i]:
                self.update_tables(profile.id, cid, perf_profile, i)
                profile.performances[cid] = i

    def get_below_and_above_profiles(self, i):
        profiles = self.model.profiles
        bpt = self.model.bpt

        if i == 0:
            below = self.b0
        else:
            below = bpt[profiles[i-1]]

        if i == self.nprofiles-1:
            above = self.bp
        else:
            above = bpt[profiles[i+1]]

        return below, above

    def optimize(self):
        profiles = self.model.profiles
        for i, profile in enumerate(profiles):
            pperfs = self.model.bpt[profile]
            below, above = self.get_below_and_above_profiles(i)
            cat_b, cat_a = self.cat_ranked[i], self.cat_ranked[i+1]
            self.optimize_profile(pperfs, below, above, cat_b, cat_a)

        return self.good / self.na
    fcoalitions = open('%s-wcoalitions.dat' % bname, 'w+')

    mwinning = compute_minimal_winning_coalitions(lp.fmins)
    for win in mwinning:
        win = list(win)
        win.sort(key=criteria_order.index)
        buf = ""
        for c in win:
            buf += "%s, " % criteria_names[c]
        print('[%s]' % buf[:-2], file=fcoalitions)

    fcoalitions.close()

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')
    aa_learned = m.pessimist(pt_learning)

    fca = open('%s-ca_learning.dat' % bname, 'w+')
    ca = compute_ca(aa_learning, aa_learned)
    print("%.4f" % ca, end='', file=fca)
    fca.close()

    fauc = open('%s-auc_learning.dat' % bname, 'w+')
    auc = m.auc(aa_learning, pt_learning)
    print("%.4f" % auc, end='', file=fauc)
    fauc.close()

    ca_learning.append(ca)
    auc_learning.append(auc)
nveto = 0
for f in sys.argv[1:]:
    f = os.path.abspath(f)
    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    import pdb
    pdb.set_trace()
    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root, 'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

    # Compute classification accuracy
    ca_learning = compute_ca(aa_learning, aa_learning_m2)
    ca_test = compute_ca(aa_test, aa_test_m2)

    table_ca_learning.append(ca_learning)
    table_ca_test.append(ca_test)

    # Compute area under the curve
    auc_learning = m.auc(aa_learning, pt_learning)
    auc_test = m.auc(aa_test, pt_test)
Пример #23
0
# Categories
cat1 = Category('cat1', rank=3)
cat2 = Category('cat2', rank=2)
cat3 = Category('cat3', rank=1)
cats = Categories([cat1, cat2, cat3])

# Categories profiles
cp1 = CategoryProfile('b1', Limits('cat1', 'cat2'))
cp2 = CategoryProfile('b2', Limits('cat2', 'cat3'))
cps = CategoriesProfiles([cp1, cp2])

# Alternatives assignments
aap1 = AlternativeAssignment('a1', 'cat2')
aap2 = AlternativeAssignment('a2', 'cat1')
aap3 = AlternativeAssignment('a3', 'cat2')
aap4 = AlternativeAssignment('a4', 'cat3')
aap5 = AlternativeAssignment('a5', 'cat1')
aap6 = AlternativeAssignment('a6', 'cat2')
aap7 = AlternativeAssignment('a7', 'cat2')
aap = AlternativesAssignments([aap1, aap2, aap3, aap4, aap5, aap6, aap7])

aao1 = AlternativeAssignment('a1', 'cat2')
aao2 = AlternativeAssignment('a2', 'cat3')
aao3 = AlternativeAssignment('a3', 'cat2')
aao4 = AlternativeAssignment('a4', 'cat3')
aao5 = AlternativeAssignment('a5', 'cat2')
aao6 = AlternativeAssignment('a6', 'cat2')
aao7 = AlternativeAssignment('a7', 'cat2')
aao = AlternativesAssignments([aao1, aao2, aao3, aao4, aao5, aao6, aao7])
Пример #24
0
    # MR-Sort model
    model = generate_random_mrsort_model_with_coalition_veto(5, 3, 4)

    # Generate random alternatives
    a = generate_alternatives(15000)
    pt = generate_random_performance_table(a, model.criteria)

    errors = 0.0
    delta = 0.0001
    nlearn = 1.00

    # Assign the alternative with the model
    aa = model.pessimist(pt)

    a_learn = random.sample(a, int(nlearn*len(a)))
    aa_learn = AlternativesAssignments([ aa[alt.id] for alt in a_learn ])
    pt_learn = PerformanceTable([ pt[alt.id] for alt in a_learn ])

    aa_err = aa_learn.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories, errors)

    print('Original model')
    print('==============')
    print("Number of alternatives: %d" % len(a))
    print("Number of learning alternatives: %d" % len(aa_learn))
    print("Errors in alternatives assignments: %g%%" % (errors*100))
    cids = model.criteria.keys()
    model.bpt.display(criterion_ids = cids)
    model.cv.display(criterion_ids = cids)
    print("lambda\t%.7s" % model.lbda)
    print("delta: %g" % delta)
Пример #25
0
cmatrix_learning = {}
cmatrix_test = {}

nveto = 0
for f in sys.argv[1:]:
    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()
    m = MRSort().from_xmcda(root, 'learned')

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    pt_test = PerformanceTable().from_xmcda(root, 'test_set')

    aa_learning = AlternativesAssignments().from_xmcda(root,
                                                       'learning_set')
    aa_test = AlternativesAssignments().from_xmcda(root,
                                                  'test_set')

    aa_learning_m2 = m.pessimist(pt_learning)
    aa_test_m2 = m.pessimist(pt_test)

    # Compute classification accuracy
    ca_learning = compute_ca(aa_learning, aa_learning_m2)
    ca_test = compute_ca(aa_test, aa_test_m2)

    table_ca_learning.append(ca_learning)
    table_ca_test.append(ca_test)

    # Compute area under the curve
    auc_learning = m.auc(aa_learning, pt_learning)
Пример #26
0
 def get_assignments(self, pt):
     aa = AlternativesAssignments()
     for ap in pt:
         a = self.get_assignment(ap)
         aa.append(a)
     return aa
class MetaMRSortProfiles3():
    def __init__(self, model, pt_sorted, aa_ori):
        self.na = len(aa_ori)
        self.model = model
        self.nprofiles = len(model.profiles)
        self.pt_sorted = pt_sorted
        self.aa_ori = aa_ori
        self.cat = self.categories_rank()
        self.cat_ranked = self.model.categories
        self.aa_by_cat = self.sort_alternative_by_category(aa_ori)
        self.b0 = pt_sorted.pt.get_worst(model.criteria)
        self.bp = pt_sorted.pt.get_best(model.criteria)
        self.compute_interval_ratios(3)
        self.build_concordance_table()
        self.build_assignments_table()

    def categories_rank(self):
        return {cat: i + 1 for i, cat in enumerate(self.model.categories)}

    def compute_interval_ratios(self, n):
        self.nintervals = n
        intervals = []
        for i in range(n - 1):
            intervals += [math.exp(i + 1)]
        s = sum(intervals)
        self.interval_ratios = [i / s for i in intervals] + [0.9]

    def update_intervals(self, fitness):
        if fitness > 0.99:
            self.compute_interval_ratios(8)
        elif fitness > 0.95:
            self.compute_interval_ratios(6)
        elif fitness > 0.9:
            self.compute_interval_ratios(5)
        else:
            self.compute_interval_ratios(4)

    def rank_categories(self, cat):
        c_rank = {c.id: c.rank for c in cat}
        return sorted([cat for cat in c_rank.iterkeys()])

    def sort_alternative_by_category(self, aa):
        aa_by_cat = {}
        for a in aa:
            aid = a.id
            cat = self.cat[a.category_id]
            if cat in aa_by_cat:
                aa_by_cat[cat].append(aid)
            else:
                aa_by_cat[cat] = [aid]
        return aa_by_cat

    def get_alternative_assignment(self, aid):
        profile = self.model.profiles[0]
        for profile in reversed(self.model.profiles):
            if self.ct[profile][aid] >= self.model.lbda:
                return self.model.categories_profiles[profile].value.upper

        return self.model.categories_profiles[profile].value.lower

    def build_assignments_table(self):
        self.good = 0
        self.aa = AlternativesAssignments()
        for aa in self.aa_ori.values():
            aid = aa.id
            cat = self.get_alternative_assignment(aid)
            self.aa.append(AlternativeAssignment(aid, cat))

            cat_ori = aa.category_id
            if cat == cat_ori:
                self.good += 1

    def build_concordance_table(self):
        self.ct = {bp.id: dict() for bp in self.model.bpt}
        for aid, bp in product(self.aa_ori.keys(), self.model.bpt):
            ap = self.pt_sorted[aid]
            conc = self.model.concordance(ap, bp)
            self.ct[bp.id][aid] = conc

    def rebuild_tables(self):
        self.build_concordance_table()
        self.build_assignments_table()

    def compute_above_histogram(self, cid, profile, above, cat_b, cat_a):
        h_above = {}
        size = above - profile
        intervals = [ profile + self.interval_ratios[i]*size \
                      for i in range(self.nintervals) ]
        intervals = [profile] + intervals
        ok = nok = 0
        for i in range(self.nintervals):
            alts = self.pt_sorted.get_middle(cid, intervals[i],
                                             intervals[i + 1])[0]
            for a in alts:
                if self.aa(a) == cat_b and self.aa_ori(a) == cat_a:
                    ok += 1
                elif self.aa(a) == cat_a:
                    if self.aa_ori(a) == cat_a:
                        ok += 1
                    elif self.aa_ori(a) == cat_b:
                        nok += 1

            if (ok + nok) > 0:
                h_above[intervals[i + 1]] = nok / (ok + nok)
            else:
                h_above[intervals[i + 1]] = 0

        return h_above

    def compute_below_histogram(self, cid, profile, below, cat_b, cat_a):
        h_below = {}
        size = profile - below
        intervals = [ profile - self.interval_ratios[i]*size \
                      for i in range(self.nintervals) ]
        intervals = [profile] + intervals
        ok = nok = 0
        for i in range(self.nintervals):
            alts = self.pt_sorted.get_middle(cid, intervals[i],
                                             intervals[i + 1])[0]
            for a in alts:
                if self.aa(a) == cat_a and self.aa_ori(a) == cat_b:
                    ok += 1
                elif self.aa(a) == cat_b:
                    if self.aa_ori(a) == cat_b:
                        ok += 1
                    elif self.aa_ori(a) == cat_a:
                        nok += 1

            if (ok + nok) > 0:
                h_below[intervals[i + 1]] = nok / (ok + nok)
            else:
                h_below[intervals[i + 1]] = 0

        return h_below

    def optimize_profile(self, profile, below, above, cat_b, cat_a):
        p_perfs = profile.performances
        a_perfs = above.performances
        b_perfs = below.performances

        moved = False
        max_val = 0

        for c in self.model.criteria:
            cid = c.id
            h_below = self.compute_below_histogram(cid, p_perfs[cid],
                                                   b_perfs[cid], cat_b, cat_a)
            h_above = self.compute_above_histogram(cid, p_perfs[cid],
                                                   a_perfs[cid], cat_b, cat_a)

            i_b = max(h_below, key=h_below.get)
            i_a = max(h_above, key=h_above.get)
            r = random.random()

            if h_below[i_b] > h_above[i_a]:
                if r < h_below[i_b]:
                    p_perfs[cid] = i_b
                    moved = True
                elif moved is False and h_below[i_b] > max_val:
                    max_val = h_below[i_b]
                    max_cid = cid
                    max_move = i_b
            elif h_below[i_b] < h_above[i_a]:
                if r < h_above[i_a]:
                    p_perfs[cid] = i_a
                    moved = True
                elif moved is False and h_above[i_a] > max_val:
                    max_val = h_above[i_a]
                    max_cid = cid
                    max_move = i_a
            elif r > 0.5:
                r2 = random.random()
                if r2 < h_below[i_b]:
                    p_perfs[cid] = i_b
                    moved = True
                elif moved is False and h_below[i_b] > max_val:
                    max_val = h_below[i_b]
                    max_cid = cid
                    max_move = i_b
            elif r < 0.5:
                r2 = random.random()
                if r2 < h_above[i_a]:
                    p_perfs[cid] = i_a
                    moved = True
                elif moved is False and h_above[i_a] > max_val:
                    max_val = h_above[i_a]
                    max_cid = cid
                    max_move = i_a

        if moved is False and max_val > 0:
            p_perfs[max_cid] = max_move

    def get_below_and_above_profiles(self, i):
        profiles = self.model.profiles
        bpt = self.model.bpt

        if i == 0:
            below = self.b0
        else:
            below = bpt[profiles[i - 1]]

        if i == self.nprofiles - 1:
            above = self.bp
        else:
            above = bpt[profiles[i + 1]]

        return below, above

    def optimize(self):
        profiles = self.model.profiles
        for i, profile in enumerate(profiles):
            pperfs = self.model.bpt[profile]
            below, above = self.get_below_and_above_profiles(i)
            cat_b, cat_a = self.cat_ranked[i], self.cat_ranked[i + 1]
            self.update_intervals(self.good / self.na)
            self.optimize_profile(pperfs, below, above, cat_b, cat_a)

        self.rebuild_tables()

        return self.good / self.na
    if not os.path.isfile(f):
        xmcda_models_toshow.append(f)
        continue

    if is_bz2_file(f) is True:
        f = bz2.BZ2File(f)

    tree = ElementTree.parse(f)
    root = tree.getroot()

    xmcda_models = root.findall(".//ElectreTri")

    m = MRSort().from_xmcda(xmcda_models[0])

    pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
    aa_learning = AlternativesAssignments().from_xmcda(root, 'learning_set')

    uniquevalues = pt_learning.get_unique_values()

    bname = os.path.basename(os.path.splitext(f.name)[0])
    fweights = open('%s-w.dat' % bname, 'w+')
    fprofiles = open('%s-p.dat' % bname, 'w+')

    print("Processing %s..." % bname)

    criteria = m.criteria.keys()
    for c in criteria:
        print("{%s} " % criteria_names[c], end='', file=fprofiles)
    print('', file=fprofiles)

    profiles = reversed(m.categories_profiles.get_ordered_profiles())
if root.find("ElectreTri[@id='initial']") is not None:
    m1 = MRSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.pessimist(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.pessimist(pt_test)
elif root.find("AVFSort[@id='initial']") is not None:
    m1 = AVFSort().from_xmcda(root, 'initial')
    if pt_learning is not None:
        aa_learning_m1 = m1.get_assignments(pt_learning)
    if pt_test is not None:
        aa_test_m1 = m1.get_assignments(pt_test)
else:
    if root.find("alternativesAffectations[@id='learning_set']") is not None:
        aa_learning_m1 = AlternativesAssignments().from_xmcda(root,
                                                              'learning_set')

    if root.find("alternativesAffectations[@id='test_set']") is not None:
        aa_test_m1 = AlternativesAssignments().from_xmcda(root, 'test_set')

if root.find("ElectreTri[@id='learned']") is not None:
    m2 = MRSort().from_xmcda(root, 'learned')
    if pt_learning is not None:
        aa_learning_m2 = m2.pessimist(pt_learning)
    if pt_test is not None:
        aa_test_m2 = m2.pessimist(pt_test)
elif root.find("AVFSort[@id='learned']") is not None:
    m2 = AVFSort().from_xmcda(root, 'learned')
    if pt_learning is not None:
        aa_learning_m2 = m2.get_assignments(pt_learning)
        aids = []
Пример #30
0
    def get_assignments(self, pt):
        aas = AlternativesAssignments([])
        for ap in pt:
            aas.append(self.get_assignment(ap))

        return aas
Пример #31
0
    # Generate assignment incompatible with an MR-Sort model
    ap1 = AlternativePerformances('a1', {'c1': 1, 'c2': 1, 'c3': 0, 'c4': 0})
    ap2 = AlternativePerformances('a2', {'c1': 0, 'c2': 0, 'c3': 1, 'c4': 1})
    ap3 = AlternativePerformances('a3', {'c1': 1, 'c2': 0, 'c3': 1, 'c4': 0})
    ap4 = AlternativePerformances('a4', {'c1': 1, 'c2': 0, 'c3': 0, 'c4': 1})
    ap5 = AlternativePerformances('a5', {'c1': 0, 'c2': 1, 'c3': 1, 'c4': 0})
    ap6 = AlternativePerformances('a6', {'c1': 0, 'c2': 1, 'c3': 0, 'c4': 1})
    pt = PerformanceTable([ap1, ap2, ap3, ap4, ap5, ap6])

    aa1 = AlternativeAssignment('a1', 'cat1')
    aa2 = AlternativeAssignment('a2', 'cat1')
    aa3 = AlternativeAssignment('a3', 'cat2')
    aa4 = AlternativeAssignment('a4', 'cat2')
    aa5 = AlternativeAssignment('a5', 'cat2')
    aa6 = AlternativeAssignment('a6', 'cat2')
    aa = AlternativesAssignments([aa1, aa2, aa3, aa4, aa5, aa6])
    print_pt_and_assignments(aa.keys(), c.keys(), [aa], pt)

    model = MRSort(c, None, None, None, cps)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)

    # Run the MIP
    mip = MipCMRSort(model, pt, aa)
    mip.solve()

    # Display learned model parameters
    print('Learned model')
    print('=============')
    model.bpt.display()