Exemplo n.º 1
0
def evaluate_single_models(models, results):
    for m in models:
        sys = sb.SystemBuilder()
        c = make.make_classifier(m, m)
        sys.add_classifier(c)
        sys.set_start(m)
        results[generate_system_id(sys)] = eval.evaluate(sys, sys.get_start(), phases=["test"])
 def __create_ensemble(self):
     ensemble = sb.SystemBuilder()
     c_id = 'ResNet18'
     c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                           'sota_models_cifar10-32-dev_validation',
                           'V001_ResNet18_ref_0.pkl')
     classifier = mutils.make_classifier(c_id, c_file)
     ensemble.add_classifier(classifier)
     ensemble.set_start(c_id)
     return ensemble
Exemplo n.º 3
0
def create_ensemble():
    ensemble = sb.SystemBuilder()
    c_id = 'ResNet18'
    c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                          'V001_ResNet18_ref_0.pkl')
    classifier = mutils.make_classifier(c_id, c_file)
    merger_id = 'Merger'
    merger = mutils.make_merger(merger_id, [c_id], merge_type=fcm.Merger.AVERAGE)
    ensemble.add_classifier(classifier)
    ensemble.add_merger(merger)
    ensemble.set_start('Merger')
    return ensemble
def build_chain(classifiers, id_classifiers, thresholds, id_triggers, data_id):

    assert len(classifiers) == len(
        id_triggers
    ) + 1, "ERROR: Number of triggers in the chain is not consistent"
    assert len(id_triggers) == len(
        thresholds), "ERROR: Each trigger should be assigned a threshold"
    assert len(classifiers) == len(
        id_classifiers
    ), "ERROR: Each classifier file should be assigned a classifier id"

    data_path = os.path.join(os.environ['FCM'], 'Data', data_id)

    if not os.path.exists(data_path):
        os.makedirs(data_path)

    sys = sb.SystemBuilder(verbose=False)
    for i in range(len(classifiers) - 1):

        # Create data for the trigger
        train_path = os.path.join(data_path, id_triggers[i] + "_test.pkl")
        test_path = os.path.join(data_path, id_triggers[i] + "_train.pkl")
        val_path = os.path.join(data_path, id_triggers[i] + "_val.pkl")
        source = make.make_source(train_path, test_path, 2, val_path=val_path)
        data = make.make_data("data_" + id_triggers[i], 1, 1, source=source)
        update_dataset(classifiers[i], train_path, test_path, val_path,
                       thresholds[i])
        sys.add_data(data)

        # Build trigger attached to classifier
        trigger = make.make_trigger(id_triggers[i],
                                    make.make_empty_classifier(
                                        id='',
                                        data_id="data_" + id_triggers[i]),
                                    [id_classifiers[i + 1]],
                                    model="probability")
        sys.add_trigger(trigger)

        # Build classifier
        c_file = classifiers[i]
        classifier = make.make_classifier(id_classifiers[i],
                                          c_file,
                                          component_id=id_triggers[i])
        sys.add_classifier(classifier)

        if i == 0:
            sys.set_start(id_classifiers[i])

    classifier = make.make_classifier(id_classifiers[-1], classifiers[-1])
    sys.add_classifier(classifier)
    return sys
Exemplo n.º 5
0
def build_evaluate_chain(files: [str], ths: [float]):
    assert len(files) > 0 and len(files) == len(ths)+1

    sys = sb.SystemBuilder(verbose=False)
    classifier = make.make_classifier(os.path.basename(files[0]), files[0])
    sys.add_classifier(classifier)
    sys.set_start(classifier.id)

    # Automatically build the chain with written mutation operations
    for i, file in enumerate(files[:-1]):
        extend_merged_chain(sys, os.path.basename(file), os.path.basename(files[i+1]), ths[i], files[i+1])

    result = eval.evaluate(sys)
    return result
Exemplo n.º 6
0
def generate_initial_population():
    classifier_path = os.path.join(os.environ['FCM'], 'Definitions',
                                   'Classifiers', args.dataset)
    P = []
    classifier_files = [
        os.path.join(classifier_path, f) for f in os.listdir(classifier_path)
        if ".pkl" in f
    ]
    for c_file in classifier_files:
        sys = sb.SystemBuilder(verbose=False)
        c_id = get_classifier_name(c_file)
        classifier = make.make_classifier(c_id, c_file)
        sys.add_classifier(classifier)
        sys.set_start(c_id)
        # sys.set_sysid(utils.generate_system_id(sys))
        P.append(sys)
    return P
Exemplo n.º 7
0
def create_evaluate_system(work, results):
    # Creating system
    lock = Lock()
    while True:
        protocol, subset = work.get()
        sys = sb.SystemBuilder(verbose=False)
        classifiers_ids = []
        for m in subset:
            file = m
            model = make.make_classifier(m, file)
            sys.add_classifier(model)
            classifiers_ids.append(model.id)

        merger = make.make_merger("MERGER", classifiers_ids, merge_type=protocol)
        sys.add_merger(merger)
        r = eval.evaluate(sys, merger.id)
        # if results: results = io.read_pickle('./results/R_'+str(protocol)+'_'+str(n_models))
        results['system_' + '_'.join(classifiers_ids) + '_protocol' + str(protocol)] = r

        work.task_done()
Exemplo n.º 8
0
    for dataset in datasets:

        #########################################################################
        Classifier_Path = os.environ['FCM']+"/Definitions/Classifiers/" + dataset + "/"
        model_paths = [Classifier_Path + f for f in os.listdir(Classifier_Path) if ".pkl" in f]
        out_dir = os.path.join("./results/", dataset)
        data_path = os.environ['FCM']+"/Datasets/"
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        #########################################################################

        import Examples.study.paretto_front as paretto
        R_models = {}
        for mpath in model_paths:
            sys = sb.SystemBuilder(verbose=False)
            classifier_id = os.path.basename(mpath)
            c = make.make_classifier(classifier_id, mpath)
            sys.add_classifier(c)
            sys.set_start(classifier_id)
            R_models[classifier_id] = eval.evaluate(sys, phases=["test", "val"])

        #front = paretto.get_front_time_accuracy(R_models, phase="test")
        #front_sorted = paretto.sort_results_by_accuracy(front, phase="test")
        models_paths = [Classifier_Path + k for k, v in paretto.sort_results_by_accuracy(R_models, phase="val")]
        records = R_models

        # Combinations
        for ic0 in range(len(models_paths)):
            c0 = models_paths[ic0]
            for th0 in np.arange(0, 1+step_th, step_th):
from Source.system_evaluator_utils import pretty_print
import os

classifiers = [
    os.path.join(
        os.environ['FCM'],
        'Definitions/Classifiers/sota_models_cifar10-32-dev_validation',
        'V001_ResNet18_ref_0.pkl'),
    os.path.join(
        os.environ['FCM'],
        'Definitions/Classifiers/sota_models_cifar10-32-dev_validation',
        'V001_ResNet152_ref_0.pkl')
]

# Build the system
merged_classifiers = sb.SystemBuilder()
merger = make.make_merger('Merger', classifiers, fcm.Merger.AVERAGE)
merged_classifiers.add_merger(merger)
for classifier in classifiers:
    c = make.make_classifier(classifier, classifier)
    merged_classifiers.add_classifier(c)

merged_classifiers.set_start('Merger')
R = evaluate(merged_classifiers, merged_classifiers.get_start())
pretty_print(R)

# Manual check
import Source.io_util as io
import numpy as np

c_dict_0 = io.read_pickle(classifiers[0])