def run(self):
        pt_sorted = SortedPerformanceTable(self.pt)

        meta = MetaMRSortPop3(self.nmodels, self.criteria, self.categories,
                              pt_sorted, self.aa)

        self.mutex.lock()
        self.results.append(meta.metas[0].model.copy())
        self.fitness.append(meta.metas[0].ca)
        self.mutex.unlock()
        self.emit(QtCore.SIGNAL('update(int)'), 0)

        for i in range(1, self.niter + 1):
            if self.is_stopped() is True:
                break

            model, ca = meta.optimize(self.nmeta)

            self.mutex.lock()
            self.results.append(model.copy())
            self.fitness.append(ca)
            self.mutex.unlock()

            self.emit(QtCore.SIGNAL('update(int)'), i)

            if ca == 1:
                break
def run_meta_mr(pipe, criteria, categories, worst, best, nmodels, niter, nmeta,
                pt, aa):
    pt_sorted = SortedPerformanceTable(pt)

    meta = MetaMRSortPop3(nmodels, criteria, categories, pt_sorted, aa)

    ca = meta.metas[0].meta.good / len(aa)
    pipe.send([meta.metas[0].model, ca])

    for i in range(1, niter + 1):
        model, ca = meta.optimize(nmeta)
        pipe.send([model, ca])
        if ca == 1:
            break

    pipe.close()
예제 #3
0
def test_meta_electre_tri_global(seed, na, nc, ncat, ns, na_gen, pcerrors,
                                 max_oloops, nmodels, max_loops):

    # Generate a random UTADIS model
    model = generate_random_avfsort_model(nc, ncat, ns, ns, seed)
    cats = model.cat_values.get_ordered_categories()

    # Generate a set of alternatives
    a = generate_alternatives(na)
    pt = generate_random_performance_table(a, model.criteria)
    aa = model.get_assignments(pt)

    # Add errors in assignment examples
    aa_err = aa.copy()
    aa_erroned = add_errors_in_assignments(aa_err, cats, pcerrors / 100)

    # Sort the performance table
    pt_sorted = SortedPerformanceTable(pt)

    t1 = time.time()

    # Perform at max oloops on the set of metas
    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.cat_values.to_categories(), pt_sorted, aa_err)
    ca2_iter = [meta.metas[0].ca] + [1] * (max_loops)

    nloops = 0
    for i in range(0, max_loops):
        model2, ca2 = meta.optimize(max_oloops)

        ca2_iter[i + 1] = ca2
        nloops += 1

        if ca2 == 1:
            break

    t_total = time.time() - t1

    # Determine the number of erroned alternatives badly assigned
    aa2 = model2.pessimist(pt)

    ok_errors = ok2_errors = ok = 0
    for alt in a:
        if aa(alt.id) == aa2(alt.id):
            if alt.id in aa_erroned:
                ok_errors += 1
            ok += 1

        if aa_err(alt.id) == aa2(alt.id) and alt.id in aa_erroned:
            ok2_errors += 1

    total = len(a)
    ca2_errors = ok2_errors / total
    ca_best = ok / total
    ca_errors = ok_errors / total

    # Generate alternatives for the generalization
    a_gen = generate_alternatives(na_gen)
    pt_gen = generate_random_performance_table(a_gen, model.criteria)
    aa_gen = model.get_assignments(pt_gen)
    aa_gen2 = model2.pessimist(pt_gen)
    ca_gen = compute_ca(aa_gen, aa_gen2)

    # Save all infos in test_result class
    t = test_result(
        "%s-%d-%d-%d-%d-%g-%d-%d-%d" %
        (seed, na, nc, ncat, na_gen, pcerrors, max_loops, nmodels, max_oloops))

    # Input params
    t['seed'] = seed
    t['na'] = na
    t['nc'] = nc
    t['ncat'] = ncat
    t['ns'] = ns
    t['na_gen'] = na_gen
    t['pcerrors'] = pcerrors
    t['max_loops'] = max_loops
    t['nmodels'] = nmodels
    t['max_oloops'] = max_oloops

    # Ouput params
    t['ca_best'] = ca_best
    t['ca_errors'] = ca_errors
    t['ca2_best'] = ca2
    t['ca2_errors'] = ca2_errors
    t['ca_gen'] = ca_gen
    t['nloops'] = nloops
    t['t_total'] = t_total

    t['ca2_iter'] = ca2_iter

    return t
def run_test(seed, data, pclearning, nloop, nmodels, nmeta):
    random.seed(seed)

    # Separate learning data and test data
    pt_learning, pt_test = data.pt.split(2, [pclearning, 100 - pclearning])
    aa_learning = data.aa.get_subset(pt_learning.keys())
    aa_test = data.aa.get_subset(pt_test.keys())

    # Initialize a random model
    cat_profiles = generate_categories_profiles(data.cats)
    worst = data.pt.get_worst(data.c)
    best = data.pt.get_best(data.c)
    b = generate_alternatives(len(data.cats) - 1, 'b')
    bpt = None
    cvs = None
    lbda = None

    model = MRSort(data.c, cvs, bpt, lbda, cat_profiles)

    # Run the metaheuristic
    t1 = time.time()

    pt_sorted = SortedPerformanceTable(pt_learning)

    # Algorithm
    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.categories_profiles.to_categories(), pt_sorted,
                          aa_learning, heur_init_profiles, lp_weights,
                          heur_profiles)

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)

    t_total = time.time() - t1

    aa_learning2 = compute_assignments_majority(meta.models, pt_learning)
    ca_learning = compute_ca(aa_learning, aa_learning2)
    auc_learning = compute_auc_majority(meta.models, pt_learning)
    diff_learning = compute_confusion_matrix(aa_learning, aa_learning2,
                                             model.categories)

    # Compute CA of test setting
    if len(aa_test) > 0:
        aa_test2 = compute_assignments_majority(meta.models, pt_test)
        ca_test = compute_ca(aa_test, aa_test2)
        auc_test = compute_auc_majority(meta.models, pt_test)
        diff_test = compute_confusion_matrix(aa_test, aa_test2,
                                             model.categories)

    else:
        ca_test = 0
        auc_test = 0
        ncat = len(data.cats)
        diff_test = OrderedDict([((a, b), 0) for a in model.categories \
                                             for b in model.categories])

    # Compute CA of whole set
    aa2 = compute_assignments_majority(meta.models, data.pt)
    ca = compute_ca(data.aa, aa2)
    auc = compute_auc_majority(meta.models, data.pt)
    diff_all = compute_confusion_matrix(data.aa, aa2, model.categories)

    t = test_result("%s-%d-%d-%d-%d-%d" %
                    (data.name, seed, nloop, nmodels, nmeta, pclearning))

    model.id = 'learned'
    aa_learning.id, aa_test.id = 'learning_set', 'test_set'
    pt_learning.id, pt_test.id = 'learning_set', 'test_set'
    save_to_xmcda("%s/%s.bz2" % (directory, t.test_name), aa_learning, aa_test,
                  pt_learning, pt_test, *meta.models)

    t['seed'] = seed
    t['na'] = len(data.a)
    t['nc'] = len(data.c)
    t['ncat'] = len(data.cats)
    t['pclearning'] = pclearning
    t['nloop'] = nloop
    t['nmodels'] = nmodels
    t['nmeta'] = nmeta
    t['na_learning'] = len(aa_learning)
    t['na_test'] = len(aa_test)
    t['ca_learning'] = ca_learning
    t['ca_test'] = ca_test
    t['ca_all'] = ca
    t['auc_learning'] = auc_learning
    t['auc_test'] = auc_test
    t['auc_all'] = auc

    for k, v in diff_learning.items():
        t['learn_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_test.items():
        t['test_%s_%s' % (k[0], k[1])] = v
    for k, v in diff_all.items():
        t['all_%s_%s' % (k[0], k[1])] = v

    t['t_total'] = t_total

    return t
예제 #5
0
def mrsort_meta_inference(indir, outdir):
    if indir is None or not os.path.isdir(indir):
        log_error("Invalid input directory (%s)" % indir)
        return 1

    if outdir is None or not os.path.isdir(outdir):
        log_error("Invalid output directory (%s)" % outdir)
        return 1

    model, assignments, pt, params = parse_input_files(indir)

    if model is None or assignments is None or pt is None or params is None:
        log_error("Error while parsing input files")
        write_message_error(outdir + '/messages.xml')
        return 1

    if 'solver' in params:
        solver = params['solver'].value
    else:
        solver = DEFAULT_SOLVER

    if solver not in SOLVERS_LIST:
        log_error("Invalid solver selected (%s)" % solver)
        write_message_error(outdir + '/messages.xml')
        return 1

    os.environ["SOLVER"] = solver

    if 'nmodels' in params:
        nmodels = params['nmodels'].value
    else:
        log_error("Invalid number of models (nmodels)")
        write_message_error(outdir + '/messages.xml')
        return 1

    if 'niter_meta' in params:
        niter_meta = params['niter_meta'].value
    else:
        log_error("Invalid number of iterations (niter_meta)")
        write_message_error(outdir + '/messages.xml')
        return 1

    if 'niter_heur' in params:
        niter_heur = params['niter_heur'].value
    else:
        log_error("Invalid number of iterations (niter_heur)")
        write_message_error(outdir + '/messages.xml')
        return 1

    try:
        pt_sorted = SortedPerformanceTable(pt)
        meta = MetaMRSortPop3(nmodels, model.criteria,
                              model.categories_profiles.to_categories(),
                              pt_sorted, assignments)
        for i in range(niter_meta):
            model, ca = meta.optimize(niter_heur)

        assignments2 = model.get_assignments(pt)
        compat = get_compat_alternatives(assignments, assignments2)
        compat = to_alternatives(compat)
        msg_solver = "Solver: %s" % solver
        msg_ca = "CA: %g" % (len(compat) / len(assignments))

        profiles = to_alternatives(model.categories_profiles.keys())
        xmcda_lbda = lambda_to_xmcda(model.lbda)

        write_xmcda_file(outdir + '/lambda.xml', xmcda_lbda)
        write_xmcda_file(outdir + '/cat_profiles.xml',
                         model.categories_profiles.to_xmcda())
        write_xmcda_file(outdir + '/crit_weights.xml', model.cv.to_xmcda())
        write_xmcda_file(outdir + '/profiles_perfs.xml', model.bpt.to_xmcda())
        write_xmcda_file(outdir + '/compatible_alts.xml', compat.to_xmcda())

        write_message_ok(outdir + '/messages.xml', [msg_solver, msg_ca])
    except:
        log_error("Cannot solve problem")
        log_error(traceback.format_exc())
        write_message_error(outdir + '/messages.xml')

    return 0
예제 #6
0
    heur_init_profiles = HeurMRSortInitProfiles
    lp_weights = LpMRSortWeights
    heur_profiles = MetaMRSortProfiles4
elif algo == 'meta_mrsortc':
    heur_init_profiles = HeurMRSortInitProfiles
    lp_weights = LpMRSortMobius
    heur_profiles = MetaMRSortProfilesChoquet

if algo == 'meta_mrsort' or algo == 'meta_mrsortc':
    model_type = 'mrsort'
    cat_profiles = generate_categories_profiles(data.cats)
    model = MRSort(data.c, None, None, None, cat_profiles)
    pt_sorted = SortedPerformanceTable(data.pt)

    meta = MetaMRSortPop3(nmodels, model.criteria,
                          model.categories_profiles.to_categories(), pt_sorted,
                          data.aa, heur_init_profiles, lp_weights,
                          heur_profiles)

    for i in range(0, nloop):
        model, ca_learning = meta.optimize(nmeta)
        print(ca_learning)
        if ca_learning == 1:
            break
elif algo == 'mip_mrsort':
    model_type = 'mrsort'
    cat_profiles = generate_categories_profiles(data.cats)
    model = MRSort(data.c, None, None, None, cat_profiles)
    mip = MipMRSort(model, data.pt, data.aa)
    mip.solve()
elif algo == 'lp_utadis':
    model_type = 'utadis'