Beispiel #1
0
    def one_test2(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)

        bids = model.categories_profiles.get_ordered_profiles()
        bpt = generate_random_profiles(bids, model.criteria)

        model.bpt = model2.bpt = bpt
        aa = model.pessimist(pt)

        lp_weights.aa_ori = aa
        lp_weights.update_linear_program()

        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
    def test003(self):
        model = generate_random_mrsort_model(5, 2, seed=3)
        a = generate_alternatives(10)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        self.mcda_object_to_xmcda_file(model.criteria.to_xmcda(),
                                       "criteria.xml")
        categories = model.categories_profiles.to_categories()
        self.mcda_object_to_xmcda_file(categories.to_xmcda(), "categories.xml")
        self.mcda_object_to_xmcda_file(a.to_xmcda(), "alternatives.xml")
        self.mcda_object_to_xmcda_file(pt.to_xmcda(), "perfs_table.xml")
        self.mcda_object_to_xmcda_file(aa.to_xmcda(), "assign.xml")
        self.mcda_object_to_xmcda_file(model.bpt.to_xmcda(),
                                       "profiles_perfs.xml")

        mrsort_mip(self.indir, self.outdir)

        self.check_output_is_complete()

        model2 = self.get_output_model(model.criteria)
        aa2 = model2.get_assignments(pt)

        self.assertEqual(aa, aa2)
        self.assertEqual(model.bpt, model2.bpt)
    def one_test2(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)

        bids = model.categories_profiles.get_ordered_profiles()
        bpt = generate_random_profiles(bids, model.criteria)

        model.bpt = model2.bpt = bpt
        aa = model.pessimist(pt)

        lp_weights.aa_ori = aa
        lp_weights.update_linear_program()

        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
Beispiel #4
0
    def test003(self):
        model = generate_random_mrsort_model(5, 2, seed = 3)
        a = generate_alternatives(10)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        self.mcda_object_to_xmcda_file(model.criteria.to_xmcda(),
                                       "criteria.xml")
        categories = model.categories_profiles.to_categories()
        self.mcda_object_to_xmcda_file(categories.to_xmcda(),
                                       "categories.xml")
        self.mcda_object_to_xmcda_file(a.to_xmcda(), "alternatives.xml")
        self.mcda_object_to_xmcda_file(pt.to_xmcda(), "perfs_table.xml")
        self.mcda_object_to_xmcda_file(aa.to_xmcda(), "assign.xml")
        self.mcda_object_to_xmcda_file(model.bpt.to_xmcda(),
                                       "profiles_perfs.xml")

        mrsort_mip(self.indir, self.outdir)

        self.check_output_is_complete()

        model2 = self.get_output_model(model.criteria)
        aa2 = model2.get_assignments(pt)

        self.assertEqual(aa, aa2)
        self.assertEqual(model.bpt, model2.bpt)
    def generate_mr_sort_model(self):
        ncrit = self.spinbox_criteria.value()
        ncat = self.spinbox_categories.value()

        self.model = generate_random_mrsort_model(ncrit,
                                                  ncat,
                                                  worst=self.worst,
                                                  best=self.best)
        self.categories = self.model.categories_profiles.to_categories()
        self.plot_mr_sort_model(self.model, self.layout_original)
Beispiel #6
0
def test_heur_mrsort_init_profiles(seed, na, nc, ncat, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()
    model3 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Initialize the second model with random generated profiles
    b = model.categories_profiles.get_ordered_profiles()
    model2.bpt = generate_random_profiles(b, model2.criteria)

    # Run the heuristic
    cats = model.categories_profiles.to_categories()
    pt_sorted = SortedPerformanceTable(pt)
    heur = HeurMRSortInitProfiles(model3, pt_sorted, aa)
    heur.solve()

    # Learn the weights and cut threshold
    cps = model.categories_profiles

    lp_weights = LpMRSortWeights(model2, pt, aa)
    lp_weights.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    # Compute the classification accuracy
    aa2 = model2.pessimist(pt)
    aa3 = model3.pessimist(pt)

    ca2 = compute_ca(aa, aa2)
    ca3 = compute_ca(aa, aa3)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g" % (seed, na, nc, ncat, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcerrors'] = pcerrors

    # Output params
    t['ca_rdom'] = ca2
    t['ca_heur'] = ca3

    return t
    def test003_auc_no_errors(self):
        random.seed(3)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 3)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        auc = model.auc(aa, pt)
        self.assertEqual(auc, 1)
def test_heur_mrsort_init_profiles(seed, na, nc, ncat, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()
    model3 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Initialize the second model with random generated profiles
    b = model.categories_profiles.get_ordered_profiles()
    model2.bpt = generate_random_profiles(b, model2.criteria)

    # Run the heuristic
    cats = model.categories_profiles.to_categories()
    pt_sorted = SortedPerformanceTable(pt)
    heur = HeurMRSortInitProfiles(model3, pt_sorted, aa)
    heur.solve()

    # Learn the weights and cut threshold
    cps = model.categories_profiles

    lp_weights = LpMRSortWeights(model2, pt, aa)
    lp_weights.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    # Compute the classification accuracy
    aa2 = model2.pessimist(pt)
    aa3 = model3.pessimist(pt)

    ca2 = compute_ca(aa, aa2)
    ca3 = compute_ca(aa, aa3)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g" % (seed, na, nc, ncat, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcerrors'] = pcerrors

    # Output params
    t['ca_rdom'] = ca2
    t['ca_heur'] = ca3

    return t
Beispiel #9
0
    def test003_auc_no_errors(self):
        random.seed(3)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 3)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        auc = model.auc(aa, pt)
        self.assertEqual(auc, 1)
    def test002_auck_all_errors(self):
        random.seed(2)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)
        aa_err = add_errors_in_assignments(aa, model.categories, 1)

        auck = model.auck(aa_err, pt, 1)
        self.assertEqual(auck, 0)
Beispiel #11
0
    def test002_auck_all_errors(self):
        random.seed(2)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)
        aa_err = add_errors_in_assignments(aa, model.categories, 1)

        auck = model.auck(aa_err, pt, 1)
        self.assertEqual(auck, 0)
    def test003(self):
        random.seed(0)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        aa = model.get_assignments(pt)

        self.assertEqual(len(aa.get_alternatives_in_categories(['cat1'])), 65)
        self.assertEqual(len(aa.get_alternatives_in_categories(['cat2'])), 935)
Beispiel #13
0
    def test003(self):
        random.seed(0)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        aa = model.get_assignments(pt)

        self.assertEqual(len(aa.get_alternatives_in_categories(['cat1'])), 65)
        self.assertEqual(len(aa.get_alternatives_in_categories(['cat2'])), 935)
    def one_test(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)
        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
Beispiel #15
0
    def one_test(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)
        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
    def one_test(self, seed, na, nc, ncat, ca_expected):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        pt_sorted = SortedPerformanceTable(pt)
        heur = HeurMRSortInitProfiles(model, pt_sorted, aa)
        heur.solve()

        aa2 = model.pessimist(pt)

        ca = compute_ca(aa, aa2)

        self.assertEqual(ca, ca_expected)
Beispiel #17
0
    def one_test(self, seed, na, nc, ncat, ca_expected):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        pt_sorted = SortedPerformanceTable(pt)
        heur = HeurMRSortInitProfiles(model, pt_sorted, aa)
        heur.solve()

        aa2 = model.pessimist(pt)

        ca = compute_ca(aa, aa2)

        self.assertEqual(ca, ca_expected)
    def generate_model(self):
        ncrit = self.spinbox_criteria.value()
        ncat = self.spinbox_categories.value()

        # FIXME
        crit = generate_criteria(ncrit)
        self.worst = AlternativePerformances("worst", {c.id: 0 for c in crit})
        self.best = AlternativePerformances("best", {c.id: 10 for c in crit})

        self.model = generate_random_mrsort_model(ncrit,
                                                  ncat,
                                                  worst=self.worst,
                                                  best=self.best)

        self.graph_model = QGraphicsSceneEtri(self.model, self.worst,
                                              self.best,
                                              self.graphicv_original.size())
        self.graphicv_original.setScene(self.graph_model)
Beispiel #19
0
    def generate_model(self):
        ncrit = self.spinbox_criteria.value()
        ncat = self.spinbox_categories.value()

        # FIXME
        crit = generate_criteria(ncrit)
        self.worst = AlternativePerformances("worst",
                                    {c.id: 0 for c in crit})
        self.best = AlternativePerformances("best",
                                    {c.id: 10 for c in crit})

        self.model = generate_random_mrsort_model(ncrit, ncat,
                                                  worst = self.worst,
                                                  best = self.best)

        self.graph_model = QGraphicsSceneEtri(self.model,
                                              self.worst, self.best,
                                              self.graphicv_original.size())
        self.graphicv_original.setScene(self.graph_model)
Beispiel #20
0
def test_heur_mrsort_coalitions(seed, na, nc, ncat, pcexamples, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Run the heuristic
    heur = HeurMRSortCoalitions(model.criteria, model.categories, pt, aa)
    coal2 = heur.find_coalitions(int(na * pcexamples))

    # Compute the original winning coalitions
    winning, loosing = compute_winning_and_loosing_coalitions(
        model.cv, model.lbda)

    # Compare orignal and computed coalitions
    coal_ni = list((set(winning) ^ set(coal2)) & set(winning))
    coal_add = list((set(winning) ^ set(coal2)) & set(coal2))

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g-%g" %
                    (seed, na, nc, ncat, pcexamples, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcexamples'] = pcexamples
    t['pcerrors'] = pcerrors

    # Output params
    t['ncoal'] = len(winning)
    t['ncoal_ni'] = len(coal_ni)
    t['ncoal_add'] = len(coal_add)

    return t
Beispiel #21
0
    def one_test(self, seed, na, nc, ncat, max_loop, n):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        bids = model2.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(bids, model.criteria)

        pt_sorted = SortedPerformanceTable(pt)

        meta = MetaMRSortProfiles4(model2, pt_sorted, aa)

        for i in range(1, max_loop + 1):
            ca = meta.optimize()
            if ca == 1:
                break

        aa2 = model2.pessimist(pt)

        self.assertEqual(i, n)
        self.assertEqual(aa, aa2)
    def one_test(self, seed, na, nc, ncat, pcerrors):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)
        aa_err = aa.copy()
        add_errors_in_assignments(aa_err, model.categories, pcerrors / 100)

        model2 = model.copy()
        model2.bpt = None
        model2.cv = None
        model2.lbda = None

        mip = MipMRSort(model2, pt, aa_err)
        obj = mip.solve()

        aa2 = model2.pessimist(pt)

        ca = compute_ca(aa, aa2)
        ca2 = compute_ca(aa_err, aa2)

        self.assertEqual(ca2, obj / len(a))
        self.assertLessEqual(pcerrors / 100, ca2)
    def one_test(self, seed, na, nc, ncat, max_loop, n):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        bids = model2.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(bids, model.criteria)

        pt_sorted = SortedPerformanceTable(pt)

        meta = MetaMRSortProfiles4(model2, pt_sorted, aa)

        for i in range(1, max_loop + 1):
            ca = meta.optimize()
            if ca == 1:
                break

        aa2 = model2.pessimist(pt)

        self.assertEqual(i, n)
        self.assertEqual(aa, aa2)
Beispiel #24
0
    def one_test(self, seed, na, nc, ncat, pcerrors):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)
        aa_err = aa.copy()
        add_errors_in_assignments(aa_err, model.categories, pcerrors / 100)

        model2 = model.copy()
        model2.bpt = None
        model2.cv = None
        model2.lbda = None

        mip = MipMRSort(model2, pt, aa_err)
        obj = mip.solve()

        aa2 = model2.pessimist(pt)

        ca = compute_ca(aa, aa2)
        ca2 = compute_ca(aa_err, aa2)

        self.assertEqual(ca2, obj / len(a))
        self.assertLessEqual(pcerrors / 100, ca2)
        self.model.bpt = bpt


if __name__ == "__main__":
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.pt_sorted import SortedPerformanceTable
    from pymcda.utils import compute_ca
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.utils import display_coalitions
    from pymcda.learning.lp_mrsort_weights import LpMRSortWeights
    from pymcda.ui.graphic import display_electre_tri_models

    model = generate_random_mrsort_model(10, 3, 17)
    winning, loosing = compute_winning_and_loosing_coalitions(
        model.cv, model.lbda)
    print("Number of coalitions: %d" % len(winning))

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    sorted_pt = SortedPerformanceTable(pt)

    aa = model.pessimist(pt)

    for cat in model.categories_profiles.get_ordered_categories():
        pc = len(aa.get_alternatives_in_category(cat)) / len(aa) * 100
        print("Percentage of alternatives in %s: %g %%" % (cat, pc))

    # Learn the weights with random generated profiles
        self.model.bpt = best_bpt
        self.ca = best_ca
        aa2 = self.model.pessimist(self.pt_sorted.pt)
        return compute_ca(self.aa_ori, aa2)

if __name__ == "__main__":
    import time
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativePerformances
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(10, 3, 1)
    worst = AlternativePerformances("worst",
                                     {c.id: 0 for c in model.criteria})
    best = AlternativePerformances("best",
                                    {c.id: 1 for c in model.criteria})

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    nmeta = 20
    nloops = 30

    print('Original model')
    print('==============')
        self.model.veto = vpt


if __name__ == "__main__":
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.pt_sorted import SortedPerformanceTable
    from pymcda.utils import compute_ca
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.utils import display_coalitions
    from pymcda.learning.lp_mrsort_weights import LpMRSortWeights
    from pymcda.ui.graphic import display_electre_tri_models

    model = generate_random_mrsort_model(10, 2, 1)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    sorted_pt = SortedPerformanceTable(pt)

    aa = model.pessimist(pt)

    # Learn the veto weights with profiles generated by the heuristic
    model3 = model.copy()
    heur = HeurMRSortInitVetoProfiles(model3, sorted_pt, aa)
    heur.solve()

    print(model3.veto)
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors,
                                 max_oloops, nmodels, max_loops):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'mrsort':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'ncs':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)
    elif random_model_type == 'mrsortcv':
        model = generate_random_mrsort_model_with_coalition_veto2(
            nc, ncat, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    categories = model.categories_profiles.to_categories()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    meta = algo(nmodels, model.criteria, categories, pt_sorted, aa)
    metas_sorted = meta.sort_models()
    ca2_iter = [metas_sorted[0].ca] + [1] * (max_loops)

    t1 = time.time()

    for i in range(0, max_loops):
        model2, ca2_best = meta.optimize(max_oloops)
        ca2_iter[i + 1] = ca2_best
        if ca2_best == 1:
            break

    nloops = i + 1

    t_total = time.time() - t1

    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result(
        "%s-%d-%d-%d-%d-%g-%d-%d-%d" %
        (seed, na, nc, ncat, na_gen, pcerrors, max_loops, nmodels, max_oloops))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops
    t['nmodels'] = nmodels
    t['max_oloops'] = max_oloops

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Beispiel #29
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
            sum([k * self.yp[i] for i, k in enumerate(self.c_yi.values())]))


if __name__ == "__main__":
    import time
    import random
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.utils import add_errors_in_assignments
    from pymcda.utils import print_pt_and_assignments
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativesAssignments, PerformanceTable

    # Original Electre Tri model
    model = generate_random_mrsort_model(10, 5, 890)

    # Generate random alternatives
    a = generate_alternatives(15000)
    pt = generate_random_performance_table(a, model.criteria)

    errors = 0.0
    delta = 0.0001
    nlearn = 1.00

    # Assign the alternative with the model
    aa = model.pessimist(pt)

    a_learn = random.sample(a, int(nlearn * len(a)))
    aa_learn = AlternativesAssignments([aa[alt.id] for alt in a_learn])
    pt_learn = PerformanceTable([pt[alt.id] for alt in a_learn])
Beispiel #31
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" % (seed, na, nc, ncat,
                    na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
            vpt.append(b1 - vp)

        self.model.veto = vpt

if __name__ == "__main__":
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.pt_sorted import SortedPerformanceTable
    from pymcda.utils import compute_ca
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.utils import display_coalitions
    from pymcda.learning.lp_mrsort_weights import LpMRSortWeights
    from pymcda.ui.graphic import display_electre_tri_models

    model = generate_random_mrsort_model(10, 2, 1)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    sorted_pt = SortedPerformanceTable(pt)

    aa = model.pessimist(pt)

    # Learn the veto weights with profiles generated by the heuristic
    model3 = model.copy()
    heur = HeurMRSortInitVetoProfiles(model3, sorted_pt, aa)
    heur.solve()

    print(model3.veto)
def test_meta_electre_tri_profiles(seed, na, nc, ncat, na_gen, pcerrors,
                                   max_loops):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Initiate model with random profiles
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Run the algorithm
    meta = algo(model2, pt_sorted, aa_err)

    ca2_iter = [1] * (max_loops + 1)
    aa2 = model2.pessimist(pt)
    ca2 = compute_ca(aa_err, aa2)
    ca2_best = ca2
    best_bpt = model2.bpt.copy()
    ca2_iter[0] = ca2
    nloops = 0

    for k in range(max_loops):
        if ca2_best == 1:
            break

        meta.optimize()
        nloops += 1

        aa2 = meta.aa
        ca2 = compute_ca(aa_err, aa2)

        ca2_iter[k + 1] = ca2

        if ca2 > ca2_best:
            ca2_best = ca2
            best_bpt = model2.bpt.copy()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    model2.bpt = best_bpt
    aa2 = model2.pessimist(pt)

    ok = ok_errors = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

    total = len(a)
    ca_best = ok / total
    ca_best_errors = ok_errors / total
    ca2_best_errors = ok2_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d" %
                    (seed, na, nc, ncat, na_gen, pcerrors, max_loops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_best_errors'] = ca_best_errors
    t['ca2_best'] = ca2_best
    t['ca2_best_errors'] = ca2_best_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Beispiel #34
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors,
                                 max_oloops, nmodels, max_loops):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'mrsort':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'ncs':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)
    elif random_model_type == 'mrsortcv':
        model = generate_random_mrsort_model_with_coalition_veto2(nc, ncat,
                                                                  seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    categories = model.categories_profiles.to_categories()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    meta = algo(nmodels, model.criteria, categories, pt_sorted, aa)
    metas_sorted = meta.sort_models()
    ca2_iter = [metas_sorted[0].ca] + [1] * (max_loops)

    t1 = time.time()

    for i in range(0, max_loops):
        model2, ca2_best = meta.optimize(max_oloops)
        ca2_iter[i + 1] = ca2_best
        if ca2_best == 1:
            break

    nloops = i + 1

    t_total = time.time() - t1

    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d-%d-%d" % (seed, na, nc, ncat,
                    na_gen, pcerrors, max_loops, nmodels, max_oloops))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops
    t['nmodels'] = nmodels
    t['max_oloops'] = max_oloops

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Beispiel #35
0
    def solve(self):
        return self.solve_function()

if __name__ == "__main__":
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.utils import print_pt_and_assignments
    from pymcda.ui.graphic import display_electre_tri_models

    seed = 12
    ncrit = 5
    ncat = 3

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(ncrit, ncat, seed)

    # Display model parameters
    print('Original model')
    print('==============')
    cids = model.criteria.keys()
    cids.sort()
    model.bpt.display(criterion_ids = cids)
    model.cv.display(criterion_ids = cids)
    print("lambda: %.7s" % model.lbda)

    # Generate a set of alternatives
    a = generate_alternatives(100)
    pt = generate_random_performance_table(a, model.criteria)

    worst = pt.get_worst(model.criteria)
 def test001(self):
     model = generate_random_mrsort_model(5, 3)
     mxmcda = model.to_xmcda()
     model2 = MRSort().from_xmcda(mxmcda)
     self.assertEqual(model, model2)
Beispiel #37
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    gi_worst = AlternativePerformances('worst',
                                       {c.id: 0
                                        for c in model.criteria})
    gi_best = AlternativePerformances('best',
                                      {c.id: 1
                                       for c in model.criteria})

    css = CriteriaValues([])
    for c in model.criteria:
        cs = CriterionValue(c.id, ns)
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css,
                   model.categories_profiles.to_categories(), gi_worst,
                   gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, ns, na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Beispiel #38
0
def test_meta_electre_tri_profiles(seed, na, nc, ncat, na_gen, pcerrors,
                                   max_loops):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Initiate model with random profiles
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Run the algorithm
    meta = algo(model2, pt_sorted, aa_err)

    ca2_iter = [1] * (max_loops + 1)
    aa2 = model2.pessimist(pt)
    ca2 = compute_ca(aa_err, aa2)
    ca2_best = ca2
    best_bpt = model2.bpt.copy()
    ca2_iter[0] = ca2
    nloops = 0

    for k in range(max_loops):
        if ca2_best == 1:
            break

        meta.optimize()
        nloops += 1

        aa2 = meta.aa
        ca2 = compute_ca(aa_err, aa2)

        ca2_iter[k + 1] = ca2

        if ca2 > ca2_best:
            ca2_best = ca2
            best_bpt =  model2.bpt.copy()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    model2.bpt = best_bpt
    aa2 = model2.pessimist(pt)

    ok = ok_errors = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

    total = len(a)
    ca_best = ok / total
    ca_best_errors = ok_errors / total
    ca2_best_errors = ok2_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d" % (seed, na, nc, ncat, na_gen,
                    pcerrors, max_loops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_best_errors'] = ca_best_errors
    t['ca2_best'] = ca2_best
    t['ca2_best_errors'] = ca2_best_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
        self.ca = best_ca
        return best_ca


if __name__ == "__main__":
    import time
    import random
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_criteria_weights
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativePerformances
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(7, 2, 5)
    worst = AlternativePerformances("worst", {c.id: 0 for c in model.criteria})
    best = AlternativePerformances("best", {c.id: 1 for c in model.criteria})

    # Add veto
    vpt = generate_random_profiles(model.profiles, model.criteria, None, 3,
                                   worst, model.bpt['b1'])
    model.veto = PerformanceTable([model.bpt['b1'] - vpt['b1']])
    model.veto_weights = generate_random_criteria_weights(model.criteria)
    model.veto_lbda = random.random()

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
def test_lp_learning_weights(seed, na, nc, ncat, na_gen, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Run linear program
    t1 = time.time()
    if random_model_type == 'default':
        lp_weights = LpMRSortWeights(model2, pt, aa_err, 0.0001)
    else:
        lp_weights = LpMRSortMobius(model2, pt, aa_err, 0.0001)
    t2 = time.time()
    obj = lp_weights.solve()
    t3 = time.time()

    # Compute new assignment and classification accuracy
    aa2 = model2.pessimist(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Beispiel #41
0
 def test001(self):
     model = generate_random_mrsort_model(5, 3)
     mxmcda = model.to_xmcda()
     model2 = MRSort().from_xmcda(mxmcda)
     self.assertEqual(model, model2)
Beispiel #42
0
        coalitions = sorted(coalitions.items(), key = lambda (k, v): v,
                             reverse = True)
        return coalitions

    def find_coalitions(self, n, k = 0):
        coal_proba = self.compute_coalitions_probabilities(n)
        coalitions = []
        for c in coal_proba:
            if c[1] > k:
                coalitions.append(c[0])

        return coalitions

if __name__ == "__main__":
    m = generate_random_mrsort_model(5, 2, 123)

    winning, loosing = compute_winning_and_loosing_coalitions(m.cv, m.lbda)
    print("Number of winning coalitions: %d" % len(winning))
    print("List of coalitions:")
    display_coalitions(winning)

    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, m.criteria)

    aa = m.pessimist(pt)

    heur = HeurMRSortCoalitions(m.criteria, m.categories, pt, aa)
    aids = [aid[0] for aid in heur.sorted_extrem ]
    display_electre_tri_models([m], [pt.get_worst(m.criteria)],
                               [pt.get_best(m.criteria)],
Beispiel #43
0
        return obj

if __name__ == "__main__":
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.utils import print_pt_and_assignments
    from pymcda.ui.graphic import display_electre_tri_models

    seed = 12
    ncrit = 5
    ncat = 3

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(ncrit, ncat, seed)

    # Display model parameters
    print('Original model')
    print('==============')
    cids = model.criteria.keys()
    cids.sort()
    model.bpt.display(criterion_ids=cids)
    model.cv.display(criterion_ids=cids)
    print("lambda: %.7s" % model.lbda)

    # Generate a set of alternatives
    a = generate_alternatives(100)
    pt = generate_random_performance_table(a, model.criteria)

    worst = pt.get_worst(model.criteria)
if __name__ == "__main__":
    import time
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.types import CriteriaValues, CriterionValue
    from pymcda.types import CriteriaSet
    from pymcda.utils import print_pt_and_assignments
    from pymcda.utils import compute_ca
    from pymcda.utils import compute_number_of_winning_coalitions
    from pymcda.pt_sorted import SortedPerformanceTable
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(5, 3, 123)
    cv1 = CriterionValue('c1', 0.2)
    cv2 = CriterionValue('c2', 0.2)
    cv3 = CriterionValue('c3', 0.2)
    cv4 = CriterionValue('c4', 0.2)
    cv5 = CriterionValue('c5', 0.2)
    cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
    cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.1)
    cv14 = CriterionValue(CriteriaSet(['c1', 'c4']), -0.1)
    cv15 = CriterionValue(CriteriaSet(['c1', 'c5']), 0.1)
    cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.1)
    cv24 = CriterionValue(CriteriaSet(['c2', 'c4']), -0.1)
    cv25 = CriterionValue(CriteriaSet(['c2', 'c5']), 0.1)
    cv34 = CriterionValue(CriteriaSet(['c3', 'c4']), 0.1)
    cv35 = CriterionValue(CriteriaSet(['c3', 'c5']), -0.1)
    cv45 = CriterionValue(CriteriaSet(['c4', 'c5']), -0.1)
Beispiel #45
0
    def solve(self):
        return self.solve_function()

if __name__ == "__main__":
    import time
    import random
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.utils import add_errors_in_assignments
    from pymcda.utils import print_pt_and_assignments
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativesAssignments, PerformanceTable

    # Original Electre Tri model
    model = generate_random_mrsort_model(10, 5, 890)

    # Generate random alternatives
    a = generate_alternatives(15000)
    pt = generate_random_performance_table(a, model.criteria)

    errors = 0.0
    delta = 0.0001
    nlearn = 1.00

    # Assign the alternative with the model
    aa = model.pessimist(pt)

    a_learn = random.sample(a, int(nlearn*len(a)))
    aa_learn = AlternativesAssignments([ aa[alt.id] for alt in a_learn ])
    pt_learn = PerformanceTable([ pt[alt.id] for alt in a_learn ])
Beispiel #46
0
                break

        self.model.bpt = best_bpt
        return best_ca

if __name__ == "__main__":
    import time
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativePerformances
    from pymcda.electre_tri import ElectreTri
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(10, 3, 123)
    worst = AlternativePerformances("worst",
                                     {c.id: 0 for c in model.criteria})
    best = AlternativePerformances("best",
                                    {c.id: 1 for c in model.criteria})

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    nmodels = 1
    nmeta = 20
    nloops = 50

    print('Original model')
Beispiel #47
0
        return self.good / self.na

if __name__ == "__main__":
    from pymcda.generate import generate_random_mrsort_model
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.utils import print_pt_and_assignments
    from pymcda.utils import add_errors_in_assignments
    from pymcda.utils import compute_ca
    from pymcda.pt_sorted import SortedPerformanceTable
    from pymcda.types import PerformanceTable
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(10, 3, 123)

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    worst = pt.get_worst(model.criteria)
    best = pt.get_best(model.criteria)

    errors = 0.0
    nlearn = 1.0

    model2 = model.copy()
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)
Beispiel #48
0
def test_lp_learning_weights(seed, na, nc, ncat, na_gen, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Run linear program
    t1 = time.time()
    if random_model_type == 'default':
        lp_weights = LpMRSortWeights(model2, pt, aa_err, 0.0001)
    else:
        lp_weights = LpMRSortMobius(model2, pt, aa_err, 0.0001)
    t2 = time.time()
    obj = lp_weights.solve()
    t3 = time.time()

    # Compute new assignment and classification accuracy
    aa2 = model2.pessimist(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, na_gen,
                    pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Beispiel #49
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    gi_worst = AlternativePerformances('worst', {c.id: 0
                                                  for c in model.criteria})
    gi_best = AlternativePerformances('best', {c.id: 1
                                                for c in model.criteria})

    css = CriteriaValues([])
    for c in model.criteria:
        cs = CriterionValue(c.id, ns)
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css,
                   model.categories_profiles.to_categories(),
                   gi_worst, gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns,
                    na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
        self.model.bpt = best_bpt
        return best_ca


if __name__ == "__main__":
    import time
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativePerformances
    from pymcda.electre_tri import ElectreTri
    from pymcda.ui.graphic import display_electre_tri_models

    # Generate a random ELECTRE TRI BM model
    model = generate_random_mrsort_model(10, 3, 123)
    worst = AlternativePerformances("worst", {c.id: 0 for c in model.criteria})
    best = AlternativePerformances("best", {c.id: 1 for c in model.criteria})

    # Generate a set of alternatives
    a = generate_alternatives(1000)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    nmodels = 1
    nmeta = 20
    nloops = 50

    print('Original model')
    print('==============')
    cids = model.criteria.keys()