def run_lp_avf(pipe, criteria, categories, worst, best, css, pt, aa):
    lp = LpAVFSort(criteria, css, categories, worst, best)
    obj, cvs, cfs, catv = lp.solve(aa, pt)

    model = AVFSort(criteria, cvs, cfs, catv)
    aa2 = model.get_assignments(pt)
    ca = compute_ca(aa, aa2)
    pipe.send([model, ca])

    pipe.close()
Exemplo n.º 2
0
    def one_test(self, seed, na, nc, ncat, ns):
        u = generate_random_avfsort_model(nc, ncat, ns, ns, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, u.criteria)

        aa = u.get_assignments(pt)

        css = CriteriaValues([])
        for cf in u.cfs:
            cs = CriterionValue(cf.id, len(cf.function))
            css.append(cs)

        cat = u.cat_values.to_categories()
        lp = LpAVFSort(u.criteria, css, cat, pt.get_worst(u.criteria),
                       pt.get_best(u.criteria))
        obj, cvs, cfs, catv = lp.solve(aa, pt)

        u2 = AVFSort(u.criteria, cvs, cfs, catv)
        aa2 = u2.get_assignments(pt)

        self.assertEqual(aa, aa2)
    def one_test(self, seed, na, nc, ncat, ns):
        u = generate_random_avfsort_model(nc, ncat, ns, ns, seed)
        a = generate_alternatives(na)
        pt = generate_random_performance_table(a, u.criteria)

        aa = u.get_assignments(pt)

        css = CriteriaValues([])
        for cf in u.cfs:
            cs = CriterionValue(cf.id, len(cf.function))
            css.append(cs)

        cat = u.cat_values.to_categories()
        lp = LpAVFSort(u.criteria, css, cat, pt.get_worst(u.criteria),
                       pt.get_best(u.criteria))
        obj, cvs, cfs, catv = lp.solve(aa, pt)

        u2 = AVFSort(u.criteria, cvs, cfs, catv)
        aa2 = u2.get_assignments(pt)

        self.assertEqual(aa, aa2)
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random UTADIS model and assignment examples
    model = generate_random_avfsort_model(nc, ncat, ns, ns)
    model.set_equal_weights()
    cat = model.cat_values.to_categories()

    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, cat.keys(),
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    gi_worst = AlternativePerformances('worst',
                                       {crit.id: 0
                                        for crit in model.criteria})
    gi_best = AlternativePerformances('best',
                                      {crit.id: 1
                                       for crit in model.criteria})

    css = CriteriaValues([])
    for cf in model.cfs:
        cs = CriterionValue(cf.id, len(cf.function))
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css, cat, gi_worst, gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = altered = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1
        elif alt.id not in aa_erroned:
            altered += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.get_assignments(pt_gen)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err, cat.keys(),
                                                     pcerrors / 100)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, ns, na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['na_err'] = na_err
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['altered'] = altered
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Exemplo n.º 5
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    gi_worst = AlternativePerformances('worst',
                                       {c.id: 0
                                        for c in model.criteria})
    gi_best = AlternativePerformances('best',
                                      {c.id: 1
                                       for c in model.criteria})

    css = CriteriaValues([])
    for c in model.criteria:
        cs = CriterionValue(c.id, ns)
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css,
                   model.categories_profiles.to_categories(), gi_worst,
                   gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" %
                    (seed, na, nc, ncat, ns, na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Exemplo n.º 6
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random UTADIS model and assignment examples
    model = generate_random_avfsort_model(nc, ncat, ns, ns)
    model.set_equal_weights()
    cat = model.cat_values.to_categories()

    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments_proba(aa_err, cat.keys(),
                                                 pcerrors / 100)
    na_err = len(aa_erroned)

    gi_worst = AlternativePerformances('worst',
                                       {crit.id: 0
                                        for crit in model.criteria})
    gi_best = AlternativePerformances('best',
                                      {crit.id: 1
                                       for crit in model.criteria})

    css = CriteriaValues([])
    for cf in model.cfs:
        cs = CriterionValue(cf.id, len(cf.function))
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css, cat, gi_worst, gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = altered = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1
        elif alt.id not in aa_erroned:
            altered += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.get_assignments(pt_gen)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    aa_gen_err = aa_gen.copy()
    aa_gen_erroned = add_errors_in_assignments_proba(aa_gen_err,
                                                     cat.keys(),
                                                     pcerrors / 100)
    aa_gen2 = model2.get_assignments(pt_gen)
    ca_gen_err = compute_ca(aa_gen_err, aa_gen2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns,
                    na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['na_err'] = na_err
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['altered'] = altered
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['ca_gen_err'] = ca_gen_err
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t
Exemplo n.º 7
0
def run_test(seed, data, pclearning, nseg):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)

    # Run the linear program
    t1 = time.time()

    css = CriteriaValues([])
    for c in data.c:
        cs = CriterionValue(c.id, nseg)
        css.append(cs)

    lp = LpAVFSort(data.c, css, data.cats, worst, best)
    obj, cvs, cfs, catv = lp.solve(aa_learning, pt_learning)

    t_total = time.time() - t1

    model = AVFSort(data.c, cvs, cfs, catv)

    ordered_categories = model.categories

    # CA learning set
    aa_learning2 = model.get_assignments(pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = model.auc(aa_learning, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             ordered_categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = model.get_assignments(pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = model.auc(aa_test, pt_test)
        diff_test = compute_confusion_matrix(aa_test,aa_test2,
                                           ordered_categories)
    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in ordered_categories \
                                             for b in ordered_categories])

    # Compute CA of whole set
    aa2 = model.get_assignments(data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = model.auc(data.aa, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2,
                                        ordered_categories)

    t = test_result("%s-%d-%d-%d" % (data.name, seed, nseg, pclearning))
    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name),
                  model, aa_learning, aa_test, pt_learning, pt_test)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['ns'] = nseg
    t['pclearning'] = pclearning
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['obj'] = obj
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
Exemplo n.º 8
0
    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)
        print(ca_learning)
        if ca_learning == 1:
            break
elif algo == 'mip_mrsort':
    model_type = 'mrsort'
    cat_profiles = generate_categories_profiles(data.cats)
    model = MRSort(data.c, None, None, None, cat_profiles)
    mip = MipMRSort(model, data.pt, data.aa)
    mip.solve()
elif algo == 'lp_utadis':
    model_type = 'utadis'
    css = CriteriaValues(CriterionValue(c.id, nseg) for c in data.c)
    lp = LpAVFSort(data.c, css, data.cats, worst, best)
    obj, cvs, cfs, catv = lp.solve(data.aa, data.pt)
    model = AVFSort(data.c, cvs, cfs, catv)
elif algo == 'lp_utadis_compat':
    model_type = 'utadis'
    css = CriteriaValues(CriterionValue(c.id, nseg) for c in data.c)
    print("LpAVFSortCompat")
    lp = LpAVFSortCompat(data.c, css, data.cats, worst, best)
    obj, cvs, cfs, catv = lp.solve(data.aa, data.pt)
    model = AVFSort(data.c, cvs, cfs, catv)
else:
    print("Invalid algorithm!")
    sys.exit(1)

t_total = time.time() - t1
Exemplo n.º 9
0
    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)
        print(ca_learning)
        if ca_learning == 1:
            break
elif algo == 'mip_mrsort':
    model_type = 'mrsort'
    cat_profiles = generate_categories_profiles(data.cats)
    model = MRSort(data.c, None, None, None, cat_profiles)
    mip = MipMRSort(model, data.pt, data.aa)
    mip.solve()
elif algo == 'lp_utadis':
    model_type = 'utadis'
    css = CriteriaValues(CriterionValue(c.id, nseg) for c in data.c)
    lp = LpAVFSort(data.c, css, data.cats, worst, best)
    obj, cvs, cfs, catv = lp.solve(data.aa, data.pt)
    model = AVFSort(data.c, cvs, cfs, catv)
elif algo == 'lp_utadis_compat':
    model_type = 'utadis'
    css = CriteriaValues(CriterionValue(c.id, nseg) for c in data.c)
    print("LpAVFSortCompat")
    lp = LpAVFSortCompat(data.c, css, data.cats, worst, best)
    obj, cvs, cfs, catv = lp.solve(data.aa, data.pt)
    model = AVFSort(data.c, cvs, cfs, catv)
else:
    print("Invalid algorithm!")
    sys.exit(1)

t_total = time.time() - t1
Exemplo n.º 10
0
def test_lp_avfsort(seed, na, nc, ncat, ns, na_gen, pcerrors):
    # Generate a random ELECTRE TRI model and assignment examples
    model = generate_random_mrsort_model(nc, ncat, seed)

    # Generate a first set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)

    aa = model.pessimist(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, model.categories,
                                           pcerrors / 100)

    gi_worst = AlternativePerformances('worst', {c.id: 0
                                                  for c in model.criteria})
    gi_best = AlternativePerformances('best', {c.id: 1
                                                for c in model.criteria})

    css = CriteriaValues([])
    for c in model.criteria:
        cs = CriterionValue(c.id, ns)
        css.append(cs)

    # Run linear program
    t1 = time.time()
    lp = LpAVFSort(model.criteria, css,
                   model.categories_profiles.to_categories(),
                   gi_worst, gi_best)
    t2 = time.time()
    obj, cv_l, cfs_l, catv_l = lp.solve(aa_err, pt)
    t3 = time.time()

    model2 = AVFSort(model.criteria, cv_l, cfs_l, catv_l)

    # Compute new assignment and classification accuracy
    aa2 = model2.get_assignments(pt)
    ok = ok_errors = ok2 = ok2_errors = 0
    for alt in a:
        if aa_err(alt.id) == aa2(alt.id):
            ok2 += 1
            if alt.id in aa_erroned:
                ok2_errors += 1

        if aa(alt.id) == aa2(alt.id):
            ok += 1
            if alt.id in aa_erroned:
                ok_errors += 1

    total = len(a)

    ca2 = ok2 / total
    ca2_errors = ok2_errors / total

    ca = ok / total
    ca_errors = ok_errors / total

    # Perform the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa = model.pessimist(pt_gen)
    aa2 = model2.get_assignments(pt_gen)
    ca_gen = compute_ca(aa, aa2)

    # Save all infos in test_result class
    t = test_result("%s-%d-%d-%d-%d-%d-%g" % (seed, na, nc, ncat, ns,
                    na_gen, pcerrors))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors

    # Output params
    t['obj'] = obj
    t['ca'] = ca
    t['ca_errors'] = ca_errors
    t['ca2'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['t_total'] = t3 - t1
    t['t_const'] = t2 - t1
    t['t_solve'] = t3 - t2

    return t