Exemplo n.º 1
0
    def test001(self):
        random.seed(1)
        c = generate_criteria(4)

        cv1 = CriterionValue('c1', 0.25)
        cv2 = CriterionValue('c2', 0.25)
        cv3 = CriterionValue('c3', 0.25)
        cv4 = CriterionValue('c4', 0.25)
        cv = CriteriaValues([cv1, cv2, cv3, cv4])

        cat = generate_categories(2)
        cps = generate_categories_profiles(cat)

        bp = AlternativePerformances('b1', {'c1': 0.5, 'c2': 0.5,
                                            'c3': 0.5, 'c4': 0.5})
        bpt = PerformanceTable([bp])
        lbda = 0.5

        etri = MRSort(c, cv, bpt, 0.5, cps)

        a = generate_alternatives(1000)
        pt = generate_random_performance_table(a, c)
        aas = etri.pessimist(pt)

        for aa in aas:
            w = 0
            perfs = pt[aa.id].performances
            for c, val in perfs.items():
                if val >= bp.performances[c]:
                    w += cv[c].value

            if aa.category_id == 'cat2':
                self.assertLess(w, lbda)
            else:
                self.assertGreaterEqual(w, lbda)
Exemplo n.º 2
0
    def one_test2(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)

        bids = model.categories_profiles.get_ordered_profiles()
        bpt = generate_random_profiles(bids, model.criteria)

        model.bpt = model2.bpt = bpt
        aa = model.pessimist(pt)

        lp_weights.aa_ori = aa
        lp_weights.update_linear_program()

        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
    def one_test2(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)

        bids = model.categories_profiles.get_ordered_profiles()
        bpt = generate_random_profiles(bids, model.criteria)

        model.bpt = model2.bpt = bpt
        aa = model.pessimist(pt)

        lp_weights.aa_ori = aa
        lp_weights.update_linear_program()

        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
    def test003(self):
        model = generate_random_mrsort_model(5, 2, seed=3)
        a = generate_alternatives(10)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        self.mcda_object_to_xmcda_file(model.criteria.to_xmcda(),
                                       "criteria.xml")
        categories = model.categories_profiles.to_categories()
        self.mcda_object_to_xmcda_file(categories.to_xmcda(), "categories.xml")
        self.mcda_object_to_xmcda_file(a.to_xmcda(), "alternatives.xml")
        self.mcda_object_to_xmcda_file(pt.to_xmcda(), "perfs_table.xml")
        self.mcda_object_to_xmcda_file(aa.to_xmcda(), "assign.xml")
        self.mcda_object_to_xmcda_file(model.bpt.to_xmcda(),
                                       "profiles_perfs.xml")

        mrsort_mip(self.indir, self.outdir)

        self.check_output_is_complete()

        model2 = self.get_output_model(model.criteria)
        aa2 = model2.get_assignments(pt)

        self.assertEqual(aa, aa2)
        self.assertEqual(model.bpt, model2.bpt)
Exemplo n.º 5
0
    def test002(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1',
                                      {'c1': 0.75, 'c2': 0.75, 'c3': 0.75})
        bp2 = AlternativePerformances('b2',
                                      {'c1': 0.25, 'c2': 0.25, 'c3': 0.25})
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        a = generate_alternatives(10000)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
Exemplo n.º 6
0
    def test003(self):
        model = generate_random_mrsort_model(5, 2, seed = 3)
        a = generate_alternatives(10)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        self.mcda_object_to_xmcda_file(model.criteria.to_xmcda(),
                                       "criteria.xml")
        categories = model.categories_profiles.to_categories()
        self.mcda_object_to_xmcda_file(categories.to_xmcda(),
                                       "categories.xml")
        self.mcda_object_to_xmcda_file(a.to_xmcda(), "alternatives.xml")
        self.mcda_object_to_xmcda_file(pt.to_xmcda(), "perfs_table.xml")
        self.mcda_object_to_xmcda_file(aa.to_xmcda(), "assign.xml")
        self.mcda_object_to_xmcda_file(model.bpt.to_xmcda(),
                                       "profiles_perfs.xml")

        mrsort_mip(self.indir, self.outdir)

        self.check_output_is_complete()

        model2 = self.get_output_model(model.criteria)
        aa2 = model2.get_assignments(pt)

        self.assertEqual(aa, aa2)
        self.assertEqual(model.bpt, model2.bpt)
    def generate_assignments(self):
        ncrit = len(self.model.criteria)
        ncat = len(self.model.categories)
        nalt = self.spinbox_nalt.value()

        self.a = generate_alternatives(nalt)
        self.pt = generate_random_performance_table(self.a,
                                                    self.model.criteria)
        self.aa = self.model.get_assignments(self.pt)
Exemplo n.º 8
0
def test_heur_mrsort_init_profiles(seed, na, nc, ncat, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()
    model3 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Initialize the second model with random generated profiles
    b = model.categories_profiles.get_ordered_profiles()
    model2.bpt = generate_random_profiles(b, model2.criteria)

    # Run the heuristic
    cats = model.categories_profiles.to_categories()
    pt_sorted = SortedPerformanceTable(pt)
    heur = HeurMRSortInitProfiles(model3, pt_sorted, aa)
    heur.solve()

    # Learn the weights and cut threshold
    cps = model.categories_profiles

    lp_weights = LpMRSortWeights(model2, pt, aa)
    lp_weights.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    # Compute the classification accuracy
    aa2 = model2.pessimist(pt)
    aa3 = model3.pessimist(pt)

    ca2 = compute_ca(aa, aa2)
    ca3 = compute_ca(aa, aa3)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g" % (seed, na, nc, ncat, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcerrors'] = pcerrors

    # Output params
    t['ca_rdom'] = ca2
    t['ca_heur'] = ca3

    return t
Exemplo n.º 9
0
def test_heur_mrsort_init_profiles(seed, na, nc, ncat, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()
    model3 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Initialize the second model with random generated profiles
    b = model.categories_profiles.get_ordered_profiles()
    model2.bpt = generate_random_profiles(b, model2.criteria)

    # Run the heuristic
    cats = model.categories_profiles.to_categories()
    pt_sorted = SortedPerformanceTable(pt)
    heur = HeurMRSortInitProfiles(model3, pt_sorted, aa)
    heur.solve()

    # Learn the weights and cut threshold
    cps = model.categories_profiles

    lp_weights = LpMRSortWeights(model2, pt, aa)
    lp_weights.solve()

    lp_weights = LpMRSortWeights(model3, pt, aa)
    lp_weights.solve()

    # Compute the classification accuracy
    aa2 = model2.pessimist(pt)
    aa3 = model3.pessimist(pt)

    ca2 = compute_ca(aa, aa2)
    ca3 = compute_ca(aa, aa3)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g" % (seed, na, nc, ncat, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcerrors'] = pcerrors

    # Output params
    t['ca_rdom'] = ca2
    t['ca_heur'] = ca3

    return t
Exemplo n.º 10
0
    def generate_alt(self):
        ncrit = len(self.model.criteria)
        ncat = len(self.model.categories)
        nalt = self.spinbox_nalt.value()

        self.a = generate_alternatives(nalt)
        self.pt = generate_random_performance_table(self.a,
                                                    self.model.criteria,
                                                    worst = self.worst,
                                                    best = self.best)
        self.aa = self.model.pessimist(self.pt)
Exemplo n.º 11
0
    def generate_alt(self):
        ncrit = len(self.model.criteria)
        ncat = len(self.model.categories)
        nalt = self.spinbox_nalt.value()

        self.a = generate_alternatives(nalt)
        self.pt = generate_random_performance_table(self.a,
                                                    self.model.criteria,
                                                    worst=self.worst,
                                                    best=self.best)
        self.aa = self.model.pessimist(self.pt)
    def test003_auc_no_errors(self):
        random.seed(3)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 3)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        auc = model.auc(aa, pt)
        self.assertEqual(auc, 1)
Exemplo n.º 13
0
    def test003_auc_no_errors(self):
        random.seed(3)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 3)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        auc = model.auc(aa, pt)
        self.assertEqual(auc, 1)
    def test002_auck_all_errors(self):
        random.seed(2)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)
        aa_err = add_errors_in_assignments(aa, model.categories, 1)

        auck = model.auck(aa_err, pt, 1)
        self.assertEqual(auck, 0)
Exemplo n.º 15
0
    def test002_auck_all_errors(self):
        random.seed(2)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)
        aa_err = add_errors_in_assignments(aa, model.categories, 1)

        auck = model.auck(aa_err, pt, 1)
        self.assertEqual(auck, 0)
    def test002(self):
        random.seed(2)
        c = generate_criteria(4)

        cv1 = CriterionValue('c1', 0.25)
        cv2 = CriterionValue('c2', 0.25)
        cv3 = CriterionValue('c3', 0.25)
        cv4 = CriterionValue('c4', 0.25)
        cv = CriteriaValues([cv1, cv2, cv3, cv4])

        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75,
            'c4': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25,
            'c4': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])
        lbda = 0.5

        etri = MRSort(c, cv, bpt, 0.5, cps)

        a = generate_alternatives(1000)
        pt = generate_random_performance_table(a, c)
        aas = etri.pessimist(pt)

        for aa in aas:
            w1 = w2 = 0
            perfs = pt[aa.id].performances
            for c, val in perfs.items():
                if val >= bp1.performances[c]:
                    w1 += cv[c].value
                if val >= bp2.performances[c]:
                    w2 += cv[c].value

            if aa.category_id == 'cat3':
                self.assertLess(w1, lbda)
                self.assertLess(w2, lbda)
            elif aa.category_id == 'cat2':
                self.assertLess(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
            else:
                self.assertGreaterEqual(w1, lbda)
                self.assertGreaterEqual(w2, lbda)
    def test003(self):
        random.seed(0)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        aa = model.get_assignments(pt)

        self.assertEqual(len(aa.get_alternatives_in_categories(['cat1'])), 65)
        self.assertEqual(len(aa.get_alternatives_in_categories(['cat2'])), 935)
Exemplo n.º 18
0
    def test003(self):
        random.seed(0)
        crits = generate_criteria(5)
        model = generate_random_mrsort_model(len(crits), 2)

        alts = generate_alternatives(1000)
        pt = generate_random_performance_table(alts, crits)
        aa = model.get_assignments(pt)

        aa = model.get_assignments(pt)

        self.assertEqual(len(aa.get_alternatives_in_categories(['cat1'])), 65)
        self.assertEqual(len(aa.get_alternatives_in_categories(['cat2'])), 935)
    def one_test(self, seed, na, nc, ncat, ca_expected):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        pt_sorted = SortedPerformanceTable(pt)
        heur = HeurMRSortInitProfiles(model, pt_sorted, aa)
        heur.solve()

        aa2 = model.pessimist(pt)

        ca = compute_ca(aa, aa2)

        self.assertEqual(ca, ca_expected)
Exemplo n.º 20
0
    def one_test(self, seed, na, nc, ncat, ca_expected):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        pt_sorted = SortedPerformanceTable(pt)
        heur = HeurMRSortInitProfiles(model, pt_sorted, aa)
        heur.solve()

        aa2 = model.pessimist(pt)

        ca = compute_ca(aa, aa2)

        self.assertEqual(ca, ca_expected)
    def one_test(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)
        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
Exemplo n.º 22
0
    def one_test(self, seed, ncrit, ncat, na):
        model = generate_random_mrsort_model(ncrit, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        model2.cvs = None

        lp_weights = LpMRSortWeights(model2, pt, aa)
        lp_weights.solve()

        aa2 = model2.pessimist(pt)

        self.assertEqual(aa, aa2)
Exemplo n.º 23
0
def count_veto(seed, na, nc, ncat, veto_param):
    # Generate a random ELECTRE TRI BM model
    if veto_type == COALITIONS_VETO:
        model = generate_random_mrsort_model_with_coalition_veto(nc, ncat,
                                            seed,
                                            veto_weights = True,
                                            veto_func = veto_function,
                                            veto_param = veto_param)
    elif veto_type == BINARY_VETO:
        model = generate_random_mrsort_model_with_binary_veto(nc, ncat,
                                            seed,
                                            veto_func = veto_function,
                                            veto_param = veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    return sum([model.count_veto_pessimist(ap) for ap in pt])
def count_veto(seed, na, nc, ncat, veto_param):
    # Generate a random ELECTRE TRI BM model
    if veto_type == COALITIONS_VETO:
        model = generate_random_mrsort_model_with_coalition_veto(
            nc,
            ncat,
            seed,
            veto_weights=True,
            veto_func=veto_function,
            veto_param=veto_param)
    elif veto_type == BINARY_VETO:
        model = generate_random_mrsort_model_with_binary_veto(
            nc, ncat, seed, veto_func=veto_function, veto_param=veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    return sum([model.count_veto_pessimist(ap) for ap in pt])
    def test002(self):
        c = generate_criteria(3)
        cat = generate_categories(3)
        cps = generate_categories_profiles(cat)

        bp1 = AlternativePerformances('b1', {
            'c1': 0.75,
            'c2': 0.75,
            'c3': 0.75
        })
        bp2 = AlternativePerformances('b2', {
            'c1': 0.25,
            'c2': 0.25,
            'c3': 0.25
        })
        bpt = PerformanceTable([bp1, bp2])

        cv1 = CriterionValue('c1', 0.2)
        cv2 = CriterionValue('c2', 0.2)
        cv3 = CriterionValue('c3', 0.2)
        cv12 = CriterionValue(CriteriaSet(['c1', 'c2']), -0.1)
        cv23 = CriterionValue(CriteriaSet(['c2', 'c3']), 0.2)
        cv13 = CriterionValue(CriteriaSet(['c1', 'c3']), 0.3)
        cvs = CriteriaValues([cv1, cv2, cv3, cv12, cv23, cv13])

        lbda = 0.6

        model = MRSort(c, cvs, bpt, lbda, cps)

        a = generate_alternatives(10000)
        pt = generate_random_performance_table(a, model.criteria)
        aa = model.get_assignments(pt)

        model2 = MRSort(c, None, bpt, None, cps)
        lp = LpMRSortMobius(model2, pt, aa)
        obj = lp.solve()

        aa2 = model2.get_assignments(pt)

        self.assertEqual(obj, 0)
        self.assertEqual(aa, aa2)
Exemplo n.º 26
0
def test_heur_mrsort_coalitions(seed, na, nc, ncat, pcexamples, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    # Compute assignments
    aa = model.pessimist(pt)

    # Run the heuristic
    heur = HeurMRSortCoalitions(model.criteria, model.categories, pt, aa)
    coal2 = heur.find_coalitions(int(na * pcexamples))

    # Compute the original winning coalitions
    winning, loosing = compute_winning_and_loosing_coalitions(
        model.cv, model.lbda)

    # Compare orignal and computed coalitions
    coal_ni = list((set(winning) ^ set(coal2)) & set(winning))
    coal_add = list((set(winning) ^ set(coal2)) & set(coal2))

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%g-%g" %
                    (seed, na, nc, ncat, pcexamples, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['pcexamples'] = pcexamples
    t['pcerrors'] = pcerrors

    # Output params
    t['ncoal'] = len(winning)
    t['ncoal_ni'] = len(coal_ni)
    t['ncoal_add'] = len(coal_add)

    return t
Exemplo n.º 27
0
    def one_test(self, seed, na, nc, ncat, ns):
        u = generate_random_avfsort_model(nc, ncat, ns, ns, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, u.criteria)

        aa = u.get_assignments(pt)

        css = CriteriaValues([])
        for cf in u.cfs:
            cs = CriterionValue(cf.id, len(cf.function))
            css.append(cs)

        cat = u.cat_values.to_categories()
        lp = LpAVFSort(u.criteria, css, cat, pt.get_worst(u.criteria),
                       pt.get_best(u.criteria))
        obj, cvs, cfs, catv = lp.solve(aa, pt)

        u2 = AVFSort(u.criteria, cvs, cfs, catv)
        aa2 = u2.get_assignments(pt)

        self.assertEqual(aa, aa2)
    def one_test(self, seed, na, nc, ncat, ns):
        u = generate_random_avfsort_model(nc, ncat, ns, ns, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, u.criteria)

        aa = u.get_assignments(pt)

        css = CriteriaValues([])
        for cf in u.cfs:
            cs = CriterionValue(cf.id, len(cf.function))
            css.append(cs)

        cat = u.cat_values.to_categories()
        lp = LpAVFSort(u.criteria, css, cat, pt.get_worst(u.criteria),
                       pt.get_best(u.criteria))
        obj, cvs, cfs, catv = lp.solve(aa, pt)

        u2 = AVFSort(u.criteria, cvs, cfs, catv)
        aa2 = u2.get_assignments(pt)

        self.assertEqual(aa, aa2)
    def one_test(self, seed, na, nc, ncat, pcerrors):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)
        aa_err = aa.copy()
        add_errors_in_assignments(aa_err, model.categories, pcerrors / 100)

        model2 = model.copy()
        model2.bpt = None
        model2.cv = None
        model2.lbda = None

        mip = MipMRSort(model2, pt, aa_err)
        obj = mip.solve()

        aa2 = model2.pessimist(pt)

        ca = compute_ca(aa, aa2)
        ca2 = compute_ca(aa_err, aa2)

        self.assertEqual(ca2, obj / len(a))
        self.assertLessEqual(pcerrors / 100, ca2)
    def one_test(self, seed, na, nc, ncat, max_loop, n):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        bids = model2.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(bids, model.criteria)

        pt_sorted = SortedPerformanceTable(pt)

        meta = MetaMRSortProfiles4(model2, pt_sorted, aa)

        for i in range(1, max_loop + 1):
            ca = meta.optimize()
            if ca == 1:
                break

        aa2 = model2.pessimist(pt)

        self.assertEqual(i, n)
        self.assertEqual(aa, aa2)
Exemplo n.º 31
0
    def one_test(self, seed, na, nc, ncat, pcerrors):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)
        aa_err = aa.copy()
        add_errors_in_assignments(aa_err, model.categories, pcerrors / 100)

        model2 = model.copy()
        model2.bpt = None
        model2.cv = None
        model2.lbda = None

        mip = MipMRSort(model2, pt, aa_err)
        obj = mip.solve()

        aa2 = model2.pessimist(pt)

        ca = compute_ca(aa, aa2)
        ca2 = compute_ca(aa_err, aa2)

        self.assertEqual(ca2, obj / len(a))
        self.assertLessEqual(pcerrors / 100, ca2)
Exemplo n.º 32
0
    def one_test(self, seed, na, nc, ncat, max_loop, n):
        model = generate_random_mrsort_model(nc, ncat, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, model.criteria)

        aa = model.pessimist(pt)

        model2 = model.copy()
        bids = model2.categories_profiles.get_ordered_profiles()
        model2.bpt = generate_random_profiles(bids, model.criteria)

        pt_sorted = SortedPerformanceTable(pt)

        meta = MetaMRSortProfiles4(model2, pt_sorted, aa)

        for i in range(1, max_loop + 1):
            ca = meta.optimize()
            if ca == 1:
                break

        aa2 = model2.pessimist(pt)

        self.assertEqual(i, n)
        self.assertEqual(aa, aa2)
Exemplo n.º 33
0
        self.ap_items[ap] = (item, print_values, color, link)


if __name__ == "__main__":
    import random
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_criteria
    from pymcda.generate import generate_random_criteria_values
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_categories
    from pymcda.generate import generate_random_profiles
    from pymcda.generate import generate_categories_profiles
    from pymcda.electre_tri import ElectreTri
    from pymcda.types import AlternativePerformances

    a = generate_alternatives(2)
    c = generate_criteria(5)
    cv = generate_random_criteria_values(c, 1234)
    cv.normalize_sum_to_unity()

    worst = AlternativePerformances("worst", {crit.id: 0 for crit in c})
    best = AlternativePerformances("best", {crit.id: 1 for crit in c})
    pt = generate_random_performance_table(a, c)

    cat = generate_categories(3)
    cps = generate_categories_profiles(cat)
    b = cps.get_ordered_profiles()
    bpt = generate_random_profiles(b, c)
    bpt['b2'].performances['c3'] = 0.2

    lbda = random.uniform(0.5, 1)
Exemplo n.º 34
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = meta_mrsort(nmodels,
                       model.criteria,
                       model.categories_profiles.to_categories(),
                       pt_sorted,
                       aa_learning,
                       seed=seed * 100)
    #lp_weights = lp_weights,
    #heur_profiles = heur_profiles,
    #lp_veto_weights = lp_veto_weights,
    #heur_veto_profiles = heur_veto_profiles,

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)

        if ca_learning == 1:
            break

    t_total = time.time() - t1

    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                             model.categories)

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" %
                    (data.name, seed, nloop, nmodels, nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning,
                  aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemplo n.º 35
0
    from pymcda.generate import generate_criteria
    from pymcda.generate import generate_categories
    from pymcda.generate import generate_categories_profiles
    from pymcda.utils import add_errors_in_assignments
    from pymcda.utils import print_pt_and_assignments
    from pymcda.utils import compute_winning_and_loosing_coalitions
    from pymcda.types import AlternativesAssignments, PerformanceTable
    from pymcda.types import AlternativePerformances
    from pymcda.types import CriteriaValues, CriterionValue
    from pymcda.electre_tri import MRSort

    # MR-Sort model
    model = generate_random_mrsort_model_with_coalition_veto(5, 3, 4)

    # Generate random alternatives
    a = generate_alternatives(15000)
    pt = generate_random_performance_table(a, model.criteria)

    errors = 0.0
    delta = 0.0001
    nlearn = 1.00

    # Assign the alternative with the model
    aa = model.pessimist(pt)

    a_learn = random.sample(a, int(nlearn*len(a)))
    aa_learn = AlternativesAssignments([ aa[alt.id] for alt in a_learn ])
    pt_learn = PerformanceTable([ pt[alt.id] for alt in a_learn ])

    aa_err = aa_learn.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories, errors)
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(
            nc, ncat, seed, veto_func=veto_func, veto_param=veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(
            nc,
            ncat,
            seed,
            veto_weights=indep_veto_weights,
            veto_func=veto_func,
            veto_param=veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues(
            [CriterionValue(c.id, w[c.id]) for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" %
                    (seed, na, nc, ncat, na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, a,
                  a_gen, pt, pt_gen, aa, aa2, aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
Exemplo n.º 37
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
Exemplo n.º 38
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.categories_profiles.to_categories(),
                          pt_sorted, aa_learning,
                          heur_init_profiles,
                          lp_weights,
                          heur_profiles)

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)

    t_total = time.time() - t1

    aa_learning2 = compute_assignments_majority(meta.models, pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = compute_auc_majority(meta.models, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                           model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = compute_assignments_majority(meta.models, pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = compute_auc_majority(meta.models, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = compute_assignments_majority(meta.models, data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = compute_auc_majority(meta.models, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels,
                                           nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  aa_learning, aa_test, pt_learning, pt_test, *meta.models)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemplo n.º 39
0
def test_meta_electre_tri_global(seed, na, nc, ncat, ns, na_gen, pcerrors,
                                 max_oloops, nmodels, max_loops):

    # Generate a random UTADIS model
    model = generate_random_avfsort_model(nc, ncat, ns, ns, seed)
    cats = model.cat_values.get_ordered_categories()

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, cats, pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Perform at max oloops on the set of metas
    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.cat_values.to_categories(), pt_sorted, aa_err)
    ca2_iter = [meta.metas[0].ca] + [1] * (max_loops)

    nloops = 0
    for i in range(0, max_loops):
        model2, ca2 = meta.optimize(max_oloops)

        ca2_iter[i + 1] = ca2
        nloops += 1

        if ca2 == 1:
            break

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.get_assignments(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result(
        "%s-%d-%d-%d-%d-%g-%d-%d-%d" %
        (seed, na, nc, ncat, na_gen, pcerrors, max_loops, nmodels, max_oloops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops
    t['nmodels'] = nmodels
    t['max_oloops'] = max_oloops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Exemplo n.º 40
0
def test_mip_mrsort_vc(seed, na, nc, ncat, na_gen, veto_param, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if vetot == 'binary':
        model = generate_random_mrsort_model_with_binary_veto(nc, ncat,
                            seed,
                            veto_func = veto_func,
                            veto_param = veto_param)
    elif vetot == 'coalition':
        model = generate_random_mrsort_model_with_coalition_veto(nc, ncat,
                            seed,
                            veto_weights = indep_veto_weights,
                            veto_func = veto_func,
                            veto_param = veto_param)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)
    nv_m1_learning = sum([model.count_veto_pessimist(ap) for ap in pt])

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)
    if algo == MipMRSortVC and vetot == 'binary':
        w = {c.id: 1 / len(model.criteria) for c in model.criteria}
        w1 = w.keys()[0]
        w[w1] += 1 - sum(w.values())
        model2.veto_weights = CriteriaValues([CriterionValue(c.id,
                                                             w[c.id])
                                              for c in model.criteria])
        model2.veto_lbda = min(w.values())

    if algo == MipMRSortVC:
        mip = MipMRSortVC(model2, pt, aa, indep_veto_weights)
    else:
        mip = MipMRSort(model2, pt, aa)

    mip.solve()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)
    nv_m2_learning = sum([model2.count_veto_pessimist(ap) for ap in pt])
    cmatrix_learning = compute_confusion_matrix(aa, aa2, model.categories)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    nv_m1_gen = sum([model.count_veto_pessimist(ap) for ap in pt_gen])
    nv_m2_gen = sum([model2.count_veto_pessimist(ap) for ap in pt_gen])
    if len(aa_gen) > 0:
        cmatrix_gen = compute_confusion_matrix(aa_gen, aa_gen2,
                                               model.categories)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%s-%d" % (seed, na, nc, ncat,
                    na_gen, veto_param, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    a.id, pt.id = 'learning_set', 'learning_set'
    aa.id, aa2.id = 'learning_set_m1', 'learning_set_m2'
    a_gen.id, pt_gen.id = 'test_set', 'test_set'
    aa_gen.id, aa_gen2.id = 'test_set_m1', 'test_set_m2'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, a, a_gen, pt, pt_gen, aa, aa2,
                  aa_gen, aa_gen2)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['veto_param'] = veto_param
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['nv_m1_learning'] = nv_m1_learning
    t['nv_m2_learning'] = nv_m2_learning
    t['nv_m1_gen'] = nv_m1_gen
    t['nv_m2_gen'] = nv_m2_gen
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    for k, v in cmatrix_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in cmatrix_gen.items():
        t['test_%s_%s' % (k[0], k[1])] = v

    return t
Exemplo n.º 41
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random UTADIS model and assignment examples
    model = generate_random_avfsort_model(nc, ncat, ns, ns)
    model.set_equal_weights()
    cat = model.cat_values.to_categories()

    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, cat.keys(),
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    gi_worst = AlternativePerformances('worst',
                                       {crit.id: 0
                                        for crit in model.criteria})
    gi_best = AlternativePerformances('best',
                                      {crit.id: 1
                                       for crit in model.criteria})

    css = CriteriaValues([])
    for cf in model.cfs:
        cs = CriterionValue(cf.id, len(cf.function))
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css, cat, gi_worst, gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = altered = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1
        elif alt.id not in aa_erroned:
            altered += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.get_assignments(pt_gen)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     cat.keys(),
                                                     pcerrors / 100)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns,
                    na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['na_err'] = na_err
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['altered'] = altered
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Exemplo n.º 42
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    gi_worst = AlternativePerformances('worst', {c.id: 0
                                                  for c in model.criteria})
    gi_best = AlternativePerformances('best', {c.id: 1
                                                for c in model.criteria})

    css = CriteriaValues([])
    for c in model.criteria:
        cs = CriterionValue(c.id, ns)
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css,
                   model.categories_profiles.to_categories(),
                   gi_worst, gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns,
                    na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Exemplo n.º 43
0
def run_test(seed, data, pclearning):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize ELECTRE-TRI BM model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the linear program
    t1 = time.time()

    mip = mip_mrsort(model, pt_learning, aa_learning)
    obj = mip.solve()

    t_total = time.time() - t1

    # CA learning set
    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)
    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d" % (data.name, seed, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['obj'] = obj
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemplo n.º 44
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Run the MIP
    t1 = time.time()

    model2 = MRSort(model.criteria, None, None, None,
                    model.categories_profiles, None, None, None)

    mip = MipMRSort(model2, pt, aa_err)
    obj = mip.solve()
    ca2_best = obj / na

    aa2 = model2.get_assignments(pt)

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" % (seed, na, nc, ncat,
                    na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t_total

    return t
Exemplo n.º 45
0
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)
    global aaa
    global allm
    global fct_ca
    global LOO

    # Separate learning data and test data
    if LOO:
        pt_learning, pt_test = data.pt.split_LOO(seed)
    else:
        pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    #import pdb; pdb.set_trace()

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)
    # if LOO:
    #     print(data.c, cvs, bpt, lbda, cat_profiles)
    #     print(model.categories_profiles.to_categories())
    #     print(model.categories)        
    #     import pdb; pdb.set_trace()

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = meta_mrsort(nmodels, model.criteria,
                       model.categories_profiles.to_categories(),
                       pt_sorted, aa_learning,
                       seed = seed * 100)
    # if LOO:
    #     print(nmodels, model.criteria,
    #                    model.categories_profiles.to_categories(),
    #                    pt_sorted, aa_learning)
        #import pdb; pdb.set_trace()
#lp_weights = lp_weights,
#heur_profiles = heur_profiles,
#lp_veto_weights = lp_veto_weights,
#heur_veto_profiles = heur_veto_profiles,

    for i in range(0, nloop):
        model, ca_learning, all_models = meta.optimize(nmeta, fct_ca)
        #import pdb; pdb.set_trace()

        if ca_learning == 1:
            break

    t_total = time.time() - t1

    aa_learning2 = model.pessimist(pt_learning)
    
    ca_learning = compute_ca(aa_learning, aa_learning2)
    ca_learning_good = compute_ca_good(aa_learning, aa_learning2)
    #import pdb; pdb.set_trace()
    auc_learning = model.auc(aa_learning, pt_learning)

    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                           model.categories)

    # Compute CA of test setting
    
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        ca_test_good = compute_ca_good(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                           model.categories)
        #import pdb; pdb.set_trace()

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    ca_good = compute_ca_good(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" % (data.name, seed, nloop, nmodels,
                                           nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca    
    t['ca_learning_good'] = ca_learning_good
    t['ca_test_good'] = ca_test_good
    t['ca_all_good'] = ca_good
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    # import pdb; pdb.set_trace()
    aaa[seed]=dict()
    aaa[seed]['id'] = seed
    aaa[seed]['learning_asgmt_id'] = [i.id for i in aa_learning]
    aaa[seed]['learning_asgmt'] = [i.category_id for i in aa_learning]
    aaa[seed]['learning_asgmt2'] = [i.category_id for i in aa_learning2]        
    aaa[seed]['test_asgmt_id'] = [i.id for i in aa_test]
    aaa[seed]['test_asgmt'] = [i.category_id for i in aa_test]
    aaa[seed]['test_asgmt2'] = [i.category_id for i in aa_test2]
    aaa[seed]['criteria'] =  [i for i,j in model.criteria.items()]
    aaa[seed]['criteria_weights'] = [str(i.value) for i in model.cv.values()]
    aaa[seed]['profiles_values'] = [str(model.bpt['b1'].performances[i]) for i,j in model.criteria.items()]
    aaa[seed]['lambda'] = model.lbda
    #[model.bpt['b1'].performances[i] for i,j in model.criteria.items()]


    allm[seed]=dict()
    allm[seed]['id'] = seed
    current_model = 0
    allm[seed]['mresults'] = dict()
    for all_model in list(all_models)[1:]:
        current_model += 1 # skipping the 1rst model already treated
        allm[seed]['mresults'][current_model] = ["",""]
        aa_learning2_allm = all_model.model.pessimist(pt_learning)
        ca_learning_allm = compute_ca(aa_learning, aa_learning2_allm)
        ca_learning_good_allm = compute_ca_good(aa_learning, aa_learning2_allm)
        auc_learning_allm = all_model.model.auc(aa_learning, pt_learning)
        # diff_learning_allm = compute_confusion_matrix(aa_learning, aa_learning2_allm,
        #                                        all_model.model.categories)
        # Compute CA of test setting
        if len(aa_test) > 0:
            aa_test2_allm = all_model.model.pessimist(pt_test)
            ca_test_allm = compute_ca(aa_test, aa_test2_allm)
            ca_test_good_allm = compute_ca_good(aa_test, aa_test2_allm)
            auc_test_allm = all_model.model.auc(aa_test, pt_test)
            # diff_test_allm = compute_confusion_matrix(aa_test, aa_test2_allm,
            #                                    all_model.categories)
        else:
            ca_test_allm = 0
            auc_test_allm = 0
            ncat_allm = len(data.cats)
            # diff_test_allm = OrderedDict([((a, b), 0) for a in all_model.categories \
            #                                      for b in all_model.model.categories])
        # Compute CA of whole set
        aa2_allm = all_model.model.pessimist(data.pt)
        ca_allm = compute_ca(data.aa, aa2_allm)
        ca_good_allm = compute_ca_good(data.aa, aa2_allm)
        auc_allm = all_model.model.auc(data.aa, data.pt)
        #diff_all_allm = compute_confusion_matrix(data.aa, aa2_allm, all_model.model.categories) 
        allm[seed]['mresults'][current_model][0] = 'na_learning,na_test,ca_learning,ca_test,ca_all,ca_learning_good,ca_test_good,ca_all_good,auc_learning,auc_test,auc_all'
        allm[seed]['mresults'][current_model][1] =  str(len(aa_learning)) + "," + str(len(aa_test)) + "," + str(ca_learning_allm) +  "," + str(ca_test_allm) +  "," + str(ca_allm) + "," + str(ca_learning_good_allm) + "," + str(ca_test_good_allm) + "," + str(ca_good_allm) +  "," + str(auc_learning_allm) + "," + str(auc_test_allm) +  "," + str(auc_allm)
        #allm[seed]['mresults'][current_model][1] =
        #all_model.model.bpt['b1'].performances
        #all_model.model.cv.values()
        #import pdb; pdb.set_trace()

        # allm[seed][current_model]['na_learning'] = len(aa_learning)
        # allm[seed][current_model]['na_test'] = len(na_test)
        # allm[seed][current_model]['ca_learning'] = ca_learning_allm
        # allm[seed][current_model]['ca_test'] = ca_test_allm
        # allm[seed][current_model]['ca_all'] = ca_allm     
        # allm[seed][current_model]['ca_learning_good'] = ca_learning_good_allm
        # allm[seed][current_model]['ca_test_good'] = ca_test_good_allm
        # allm[seed][current_model]['ca_all_good'] = ca_good_allm
        # allm[seed][current_model]['auc_learning'] = auc_learning_allm
        # allm[seed][current_model]['auc_test'] = auc_test_allm
        # allm[seed][current_model]['auc_all'] = auc_allm


    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemplo n.º 46
0
    from pymcda.generate import generate_random_criteria_weights
    from pymcda.generate import generate_categories
    from pymcda.generate import generate_categories_profiles
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_random_performance_table
    from pymcda.generate import generate_random_profiles
    from pymcda.generate import generate_random_plinear_preference_function

    random.seed(123)

    criteria = generate_criteria(5)
    crit_weights = generate_random_criteria_weights(criteria)
    categories = generate_categories(5)
    cat_profiles = generate_categories_profiles(categories)

    a = generate_alternatives(100)
    pt = generate_random_performance_table(a, criteria)
    ap_best = pt.get_best(criteria)
    ap_worst = pt.get_worst(criteria)

    b = cat_profiles.get_ordered_profiles()
    bpt = generate_random_profiles(b, criteria)
    pf = generate_random_plinear_preference_function(criteria, ap_worst,
                                                     ap_best)
    print(crit_weights)
    print(categories)
    print(cat_profiles)
    print(bpt)
    print(pf)

    model = flowsort(criteria, crit_weights, cat_profiles, bpt, pf)
Exemplo n.º 47
0
def test_meta_electre_tri_global(seed, na, nc, ncat, ns, na_gen, pcerrors,
                                 max_oloops, nmodels, max_loops):

    # Generate a random UTADIS model
    model = generate_random_avfsort_model(nc, ncat, ns, ns, seed)
    cats = model.cat_values.get_ordered_categories()

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, cats,
                                           pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Perform at max oloops on the set of metas
    meta = MetaMRSortPop3(nmodels, model.criteria,
                              model.cat_values.to_categories(),
                              pt_sorted, aa_err)
    ca2_iter = [meta.metas[0].ca] + [1] * (max_loops)

    nloops = 0
    for i in range(0, max_loops):
        model2, ca2 = meta.optimize(max_oloops)

        ca2_iter[i + 1] = ca2
        nloops += 1

        if ca2 == 1:
            break

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.get_assignments(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d-%d-%d" % (seed, na, nc, ncat,
                    na_gen, pcerrors, max_loops, nmodels, max_oloops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops
    t['nmodels'] = nmodels
    t['max_oloops'] = max_oloops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
        return a

    def get_best_ap(self):
        a = AlternativePerformances('best', {})
        for cid in self.cids:
            a.performances[cid] = self.sorted_values[cid][-1]
        return a


if __name__ == "__main__":
    from pymcda.generate import generate_alternatives
    from pymcda.generate import generate_criteria
    from pymcda.generate import generate_random_performance_table
    import time

    a = generate_alternatives(500)
    c = generate_criteria(5)
    pt = generate_random_performance_table(a, c, 1234)

    sorted_pt = SortedPerformanceTable(pt)

    t1 = time.time()
#    print len(sorted_pt.get_below('c1', 0.1)[0])
#    print len(sorted_pt.get_above('c1', 0.1)[0])
#    print len(sorted_pt.get_middle('c1', 0, 1)[0])
#    print sorted_pt.get_below_len('c1', 0.1)
#    print sorted_pt.get_above_len('c1', 0.1)
#    print sorted_pt.get_middle_len('c1', 0, 1)
#    print sorted_pt.get_middle_len('c1', 1, 0)
#    print len(sorted_pt.get_all('c1'))
#    print sorted_pt.get_worst_ap()
Exemplo n.º 49
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    gi_worst = AlternativePerformances('worst',
                                       {c.id: 0
                                        for c in model.criteria})
    gi_best = AlternativePerformances('best',
                                      {c.id: 1
                                       for c in model.criteria})

    css = CriteriaValues([])
    for c in model.criteria:
        cs = CriterionValue(c.id, ns)
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css,
                   model.categories_profiles.to_categories(), gi_worst,
                   gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, ns, na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
def run_test(seed, data, pclearning):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize ELECTRE-TRI BM model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the linear program
    t1 = time.time()

    mip = mip_mrsort(model, pt_learning, aa_learning)
    obj = mip.solve()

    t_total = time.time() - t1

    # CA learning set
    aa_learning2 = model.pessimist(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.pessimist(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                             model.categories)
    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = model.pessimist(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d" % (data.name, seed, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, aa_learning,
                  aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['obj'] = obj
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemplo n.º 51
0
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors,
                                 max_oloops, nmodels, max_loops):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'mrsort':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'ncs':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)
    elif random_model_type == 'mrsortcv':
        model = generate_random_mrsort_model_with_coalition_veto2(nc, ncat,
                                                                  seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    categories = model.categories_profiles.to_categories()
    aa_erroned = add_errors_in_assignments_proba(aa_err,
                                                 model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    meta = algo(nmodels, model.criteria, categories, pt_sorted, aa)
    metas_sorted = meta.sort_models()
    ca2_iter = [metas_sorted[0].ca] + [1] * (max_loops)

    t1 = time.time()

    for i in range(0, max_loops):
        model2, ca2_best = meta.optimize(max_oloops)
        ca2_iter[i + 1] = ca2_best
        if ca2_best == 1:
            break

    nloops = i + 1

    t_total = time.time() - t1

    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d-%d-%d" % (seed, na, nc, ncat,
                    na_gen, pcerrors, max_loops, nmodels, max_oloops))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops
    t['nmodels'] = nmodels
    t['max_oloops'] = max_oloops

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Exemplo n.º 52
0
def test_meta_electre_tri_profiles(seed, na, nc, ncat, na_gen, pcerrors,
                                   max_loops):
    # Generate an ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)
    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Initiate model with random profiles
    model2.bpt = generate_random_profiles(model.profiles, model.criteria)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Run the algorithm
    meta = algo(model2, pt_sorted, aa_err)

    ca2_iter = [1] * (max_loops + 1)
    aa2 = model2.pessimist(pt)
    ca2 = compute_ca(aa_err, aa2)
    ca2_best = ca2
    best_bpt = model2.bpt.copy()
    ca2_iter[0] = ca2
    nloops = 0

    for k in range(max_loops):
        if ca2_best == 1:
            break

        meta.optimize()
        nloops += 1

        aa2 = meta.aa
        ca2 = compute_ca(aa_err, aa2)

        ca2_iter[k + 1] = ca2

        if ca2 > ca2_best:
            ca2_best = ca2
            best_bpt =  model2.bpt.copy()

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    model2.bpt = best_bpt
    aa2 = model2.pessimist(pt)

    ok = ok_errors = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

    total = len(a)
    ca_best = ok / total
    ca_best_errors = ok_errors / total
    ca2_best_errors = ok2_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g-%d" % (seed, na, nc, ncat, na_gen,
                    pcerrors, max_loops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_best_errors'] = ca_best_errors
    t['ca2_best'] = ca2_best
    t['ca2_best_errors'] = ca2_best_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Exemplo n.º 53
0
def test_lp_learning_weights(seed, na, nc, ncat, na_gen, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Run linear program
    t1 = time.time()
    if random_model_type == 'default':
        lp_weights = LpMRSortWeights(model2, pt, aa_err, 0.0001)
    else:
        lp_weights = LpMRSortMobius(model2, pt, aa_err, 0.0001)
    t2 = time.time()
    obj = lp_weights.solve()
    t3 = time.time()

    # Compute new assignment and classification accuracy
    aa2 = model2.pessimist(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, na_gen,
                    pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, model2, pt, pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
def test_meta_electre_tri_global(seed, na, nc, ncat, na_gen, pcerrors,
                                 max_oloops, nmodels, max_loops):

    # Generate a random ELECTRE TRI BM model
    if random_model_type == 'mrsort':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'ncs':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)
    elif random_model_type == 'mrsortcv':
        model = generate_random_mrsort_model_with_coalition_veto2(
            nc, ncat, seed)

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    categories = model.categories_profiles.to_categories()
    aa_erroned = add_errors_in_assignments_proba(aa_err, model.categories,
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    meta = algo(nmodels, model.criteria, categories, pt_sorted, aa)
    metas_sorted = meta.sort_models()
    ca2_iter = [metas_sorted[0].ca] + [1] * (max_loops)

    t1 = time.time()

    for i in range(0, max_loops):
        model2, ca2_best = meta.optimize(max_oloops)
        ca2_iter[i + 1] = ca2_best
        if ca2_best == 1:
            break

    nloops = i + 1

    t_total = time.time() - t1

    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.pessimist(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     model.categories,
                                                     pcerrors / 100)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result(
        "%s-%d-%d-%d-%d-%g-%d-%d-%d" %
        (seed, na, nc, ncat, na_gen, pcerrors, max_loops, nmodels, max_oloops))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops
    t['nmodels'] = nmodels
    t['max_oloops'] = max_oloops

    # Ouput params
    t['na_err'] = na_err
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2_best
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
Exemplo n.º 55
0
    from pymcda.utils import add_errors_in_assignments
    from pymcda.utils import print_pt_and_assignments

    # Generate an avfsort model
    c = generate_criteria(2)
    cv = generate_random_criteria_values(c, seed = 6)
    cv.normalize_sum_to_unity()
    cat = generate_categories(3)

    cfs = generate_random_criteria_functions(c, nseg_min = 3, nseg_max = 3)
    catv = generate_random_categories_values(cat)

    u = AVFSort(c, cv, cfs, catv)

    # Generate random alternative and compute assignments
    a = generate_alternatives(10)
    pt = generate_random_performance_table(a, c)
    aa = u.get_assignments(pt)
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, cat.keys(), 0.0)

    print('==============')
    print('Original model')
    print('==============')
    print("Number of alternatives: %d" % len(a))
    print('Criteria weights:')
    cv.display()
    print('Criteria functions:')
    cfs.display()
    print('Categories values:')
    catv.display()
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random UTADIS model and assignment examples
    model = generate_random_avfsort_model(nc, ncat, ns, ns)
    model.set_equal_weights()
    cat = model.cat_values.to_categories()

    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, cat.keys(),
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    gi_worst = AlternativePerformances('worst',
                                       {crit.id: 0
                                        for crit in model.criteria})
    gi_best = AlternativePerformances('best',
                                      {crit.id: 1
                                       for crit in model.criteria})

    css = CriteriaValues([])
    for cf in model.cfs:
        cs = CriterionValue(cf.id, len(cf.function))
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css, cat, gi_worst, gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = altered = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1
        elif alt.id not in aa_erroned:
            altered += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.get_assignments(pt_gen)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err, cat.keys(),
                                                     pcerrors / 100)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, ns, na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['na_err'] = na_err
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['altered'] = altered
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
def test_lp_learning_weights(seed, na, nc, ncat, na_gen, pcerrors):
    # Generate an ELECTRE TRI model and assignment examples
    if random_model_type == 'default':
        model = generate_random_mrsort_model(nc, ncat, seed)
    elif random_model_type == 'choquet':
        model = generate_random_mrsort_choquet_model(nc, ncat, 2, seed)

    model2 = model.copy()

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    # Run linear program
    t1 = time.time()
    if random_model_type == 'default':
        lp_weights = LpMRSortWeights(model2, pt, aa_err, 0.0001)
    else:
        lp_weights = LpMRSortMobius(model2, pt, aa_err, 0.0001)
    t2 = time.time()
    obj = lp_weights.solve()
    t3 = time.time()

    # Compute new assignment and classification accuracy
    aa2 = model2.pessimist(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, na_gen, pcerrors))

    model.id = 'initial'
    model2.id = 'learned'
    pt.id, pt_gen.id = 'learning_set', 'test_set'
    aa.id = 'aa'
    aa_err.id = 'aa_err'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), model, model2, pt,
                  pt_gen, aa, aa_err)

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t