Example #1
0
def evaluate(work, records, tid):
    lock = Lock()
    while True:

        m, th, m_ = work.get()
        print(m, th, m_)

        small_dict = io.read_pickle(m)
        test_images = len(small_dict['test']['gt'])
        train_images = len(small_dict['train']['gt'])
        data_path = "../../Data/"
        trigger_train_dataset = os.path.join(data_path,
                                             "train_trigger_" + str(tid))
        test_train_dataset = os.path.join(data_path,
                                          "test_trigger_" + str(tid))

        sys = sb.SystemBuilder(verbose=False)

        # Complex classifier
        bigClassifier = make.make_classifier("big", m_)
        sys.add_classifier(bigClassifier)

        # Data
        source = make.make_source(trigger_train_dataset, test_train_dataset, 2)
        data = make.make_data("trigger_data",
                              train_images,
                              test_images,
                              source=source)
        sys.add_data(data)
        update_dataset(m, th, trigger_train_dataset, test_train_dataset)

        # Trigger
        trigger = make.make_trigger(
            "trigger",
            make.make_empty_classifier(data_id="trigger_data"), ["big"],
            model="probability")
        sys.add_trigger(trigger)

        # Simple classifier
        smallClassifier = make.make_classifier("small", m, "trigger")
        sys.add_classifier(smallClassifier)

        results = eval.evaluate(sys, "small", check_classifiers=False)
        records["system_" + m + ';' + m_ + ';' + str(th)] = results.test

        lock.acquire()
        if m_ not in records:  # Evaluate individual models in order to plot
            records[m_] = eval.evaluate(sys, 'big').test
        lock.release()

        work.task_done()
Example #2
0
def evaluate_single_models(models, results):
    for m in models:
        sys = sb.SystemBuilder()
        c = make.make_classifier(m, m)
        sys.add_classifier(c)
        sys.set_start(m)
        results[generate_system_id(sys)] = eval.evaluate(sys, sys.get_start(), phases=["test"])
Example #3
0
    def test_accuracy(self):
        ensemble = create_ensemble()

        c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                              'V001_DenseNet161_ref_0.pkl')
        c_file2 = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                     'V001_ResNet18_ref_0.pkl')

        # System Evaluator computes accuracy
        add_classifier_to_merger(ensemble, 'Merger', 'DenseNet161', c_file)
        R = evaluate(ensemble, ensemble.get_start())
        acc = R.test['system'].accuracy

        # Compute accuracy manually
        c_dict_0 = io.read_pickle(c_file2)
        c_dict_1 = io.read_pickle(c_file)

        gt = c_dict_0['test']['gt']
        logits_0 = c_dict_0['test']['logits']
        logits_1 = c_dict_1['test']['logits']

        average = (logits_0 + logits_1) / 2
        acc_manual = np.sum(np.argmax(average, 1) == gt) / len(gt)

        self.assertEqual(acc, acc_manual)
def evaluate_population(P, phases=['test', 'val']):

    R = [ev.Results] * len(P)

    if main.args.cores:

        assert main.args.device != 'cpu', "ERROR: Code does not evaluate the chain ensemble on PyTorch w\ CPU"

        from multiprocessing import Process, Manager
        processes = []
        R_manager = Manager().list(R)
        for i in range(main.args.cores):
            processes.append(
                Process(target=evaluate_process,
                        args=(P, i, R_manager, main.args.cores, phases)))
            processes[i].start()
        for i in range(main.args.cores):
            processes[i].join()
        return list(R_manager)

    else:
        for i, p in enumerate(P):
            R[i] = ev.evaluate(p, p.get_start(), phases=phases)

            if main.args.device == 'cpu':
                from Source.iotnets.main_run_chain import chain_inference_time

                classifiers_chain = [utils.get_classifier_index(P[i], 0)] * 3
                ths = [0, 0]

                if len(P[i].get_message().classifier) > 1:
                    c_id = utils.get_classifier_index(P[i], 1)
                    t_id = P[i].get(classifiers_chain[0]).component_id
                    classifiers_chain[1] = c_id
                    ths[0] = float(t_id.split("_")[2])

                if len(P[i].get_message().classifier) > 2:
                    c_id = utils.get_classifier_index(P[i], 2)
                    t_id = P[i].get(classifiers_chain[1]).component_id
                    classifiers_chain[2] = c_id
                    ths[1] = float(t_id.split("_")[2])

                update = R[i]
                if 'val' in phases:
                    update.val['system'].time = chain_inference_time(
                        main.args.dataset,
                        classifiers_chain,
                        ths,
                        bs=128,
                        phase='val')
                if 'test' in phases:
                    update.test['system'].time = chain_inference_time(
                        main.args.dataset,
                        classifiers_chain,
                        ths,
                        bs=128,
                        phase='test')
                R[i] = update

    return R
 def build_classifier_dict(self, name, start_id, phases=["test"]):
     import Source.system_evaluator as eval
     classifier_dict = {'name': name, 'test': {}, 'train': {}, 'val': {}}
     eval_results = eval.evaluate(self,
                                  start_id,
                                  classifier_dict=classifier_dict,
                                  phases=phases)
     return classifier_dict, eval_results
Example #6
0
def build_evaluate_chain(files: [str], ths: [float]):
    assert len(files) > 0 and len(files) == len(ths)+1

    sys = sb.SystemBuilder(verbose=False)
    classifier = make.make_classifier(os.path.basename(files[0]), files[0])
    sys.add_classifier(classifier)
    sys.set_start(classifier.id)

    # Automatically build the chain with written mutation operations
    for i, file in enumerate(files[:-1]):
        extend_merged_chain(sys, os.path.basename(file), os.path.basename(files[i+1]), ths[i], files[i+1])

    result = eval.evaluate(sys)
    return result
Example #7
0
    def test_accuracy(self):
        chain = create_chain()
        extend_chain(chain)
        replace(chain)
        R = evaluate(chain, chain.get_start())

        # Accuracy classifier by hand
        dict_classifier = io.read_pickle(
            os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                         'sota_models_cifar10-32-dev_validation',
                         'V001_DenseNet169_ref_0.pkl'))
        acc_net0 = np.sum(
            np.argmax(dict_classifier['test']['logits'], 1) ==
            dict_classifier['test']['gt']) / len(dict_classifier['test']['gt'])

        self.assertEqual(acc_net0, R.test['system'].accuracy)
Example #8
0
def extend_merged_chain_automatic_threshold(i, c_id_tail, c_id_new, a, c_file_new=None):

    thresholds = [0.1, 0.3, 0.5, 0.7, 0.9]
    # Apply operation
    extend_merged_chain(i, c_id_tail, c_id_new, 0.0, c_file_new=c_file_new)

    # Find good threshold value
    R_list = []
    for th in thresholds:
        set_threshold(i, c_id_tail, th)
        R_list.append(evaluate(i, i.get_start(), phases=["val"]))
    fit_list = fit(R_list, a)
    j_best_th = np.argmax(fit_list)

    # Set best threshold value found
    set_threshold(i, c_id_tail, thresholds[j_best_th])
Example #9
0
def replace_classifier_merger_automatic_threshold(i, c_id, c_id_new, a, c_file=None):

    thresholds = [0.1, 0.3, 0.5, 0.7, 0.9]

    # Find good threshold value
    replace_classifier_merger(i, c_id, c_id_new, c_file)

    # Find good threshold value
    R_list = []
    for th in thresholds:
        set_threshold(i, c_id_new, th)
        R_list.append(evaluate(i, i.get_start(), phases=["val"]))
    fit_list = fit(R_list, a)
    j_best_th = np.argmax(fit_list)

    # Set best threshold value found
    set_threshold(i, c_id_new, thresholds[j_best_th])
Example #10
0
def create_evaluate_system(work, results):
    # Creating system
    lock = Lock()
    while True:
        protocol, subset = work.get()
        sys = sb.SystemBuilder(verbose=False)
        classifiers_ids = []
        for m in subset:
            file = m
            model = make.make_classifier(m, file)
            sys.add_classifier(model)
            classifiers_ids.append(model.id)

        merger = make.make_merger("MERGER", classifiers_ids, merge_type=protocol)
        sys.add_merger(merger)
        r = eval.evaluate(sys, merger.id)
        # if results: results = io.read_pickle('./results/R_'+str(protocol)+'_'+str(n_models))
        results['system_' + '_'.join(classifiers_ids) + '_protocol' + str(protocol)] = r

        work.task_done()
    def test_threshold_1(self):
        chain = self.__create_ensemble()
        c_id_extend = 'DenseNet-161'
        c_file_extend = os.path.join(os.environ['FCM'], 'Definitions',
                                     'Classifiers',
                                     'sota_models_cifar10-32-dev_validation',
                                     'V001_DenseNet161_ref_0.pkl')
        extend_merged_chain(chain, 'ResNet18', c_id_extend, 1.1, c_file_extend)
        R = evaluate(chain, chain.get_start())
        acc_chain = R.test['system'].accuracy
        time_chain = R.test['system'].time
        ops_chain = R.test['system'].ops
        params_chain = R.test['system'].params

        # Accuracy classifier by hand
        dict_classifier = io.read_pickle(
            os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                         'sota_models_cifar10-32-dev_validation',
                         'V001_ResNet18_ref_0.pkl'))
        acc_net0 = np.sum(
            np.argmax(dict_classifier['test']['logits'], 1) ==
            dict_classifier['test']['gt']) / len(dict_classifier['test']['gt'])
        time_net0 = dict_classifier['metrics']['time']
        params_net0 = dict_classifier['metrics']['params']
        ops_net0 = dict_classifier['metrics']['ops']

        # Accuracy classifier by hand
        dict_classifier = io.read_pickle(c_file_extend)
        acc_net1 = np.sum(
            np.argmax(dict_classifier['test']['logits'], 1) ==
            dict_classifier['test']['gt']) / len(dict_classifier['test']['gt'])
        time_net1 = dict_classifier['metrics']['time']
        params_net1 = dict_classifier['metrics']['params']
        ops_net1 = dict_classifier['metrics']['ops']

        correct = acc_chain == acc_net1 and \
                    math.isclose(time_net0/128 + time_net1/128, time_chain/5e3) and \
                    ops_chain/5e3 == ops_net0 + ops_net1 and \
                    params_chain == params_net0 + params_net1 + 1

        self.assertEqual(correct, True)
Example #12
0
    def test_time(self):
        ensemble = create_ensemble()

        c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                              'V001_DenseNet161_ref_0.pkl')
        c_file2 = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                               'V001_ResNet18_ref_0.pkl')

        # Evaluation time of ensemble
        add_classifier_to_merger(ensemble, 'Merger', 'DenseNet161', c_file)
        R = evaluate(ensemble, ensemble.get_start())
        time = R.test['system'].time

        # Compute CIFAR-10 evaluation time manually
        c_dict_0 = io.read_pickle(c_file2)
        c_dict_1 = io.read_pickle(c_file)

        time_0 = c_dict_0['metrics']['time']/128 * 5e3
        time_1 = c_dict_1['metrics']['time']/128 * 5e3

        self.assertAlmostEqual(time_1+time_0, time)
Example #13
0
    def test_params(self):
        ensemble = create_ensemble()

        c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                              'V001_DenseNet161_ref_0.pkl')
        c_file2 = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                               'V001_ResNet18_ref_0.pkl')

        # Evaluation time of ensemble
        add_classifier_to_merger(ensemble, 'Merger', 'DenseNet161', c_file)
        R = evaluate(ensemble, ensemble.get_start())
        params = R.test['system'].params

        # Compute CIFAR-10 evaluation time manually
        c_dict_0 = io.read_pickle(c_file2)
        c_dict_1 = io.read_pickle(c_file)

        params_0 = c_dict_0['metrics']['params']
        params_1 = c_dict_1['metrics']['params']

        self.assertEqual(params_0+params_1, params)
Example #14
0
                print("MERGE:", protocol)
                model_subsets = itertools.combinations(models, n_models)
                for subset in model_subsets:
                    print(subset)
                    sys = sb.SystemBuilder(verbose=False)
                    classifiers_ids = []
                    for m in subset:
                        file = m
                        model = make.make_classifier(m, file)
                        sys.add_classifier(model)
                        classifiers_ids.append(model.id)

                    merger = make.make_merger("MERGER", classifiers_ids, merge_type=protocol)
                    sys.add_merger(merger)
                    sys.set_start(merger.id)
                    r = eval.evaluate(sys, sys.get_start(), phases=["test"])
                    results[generate_system_id(sys)] = r

                # Save the evaluation results
                import Examples.metadata_manager_results as manager_results

                meta_data_file = os.path.join(os.environ['FCM'],
                                              'Examples',
                                              'compute',
                                              'merger_combinations',
                                              'results',
                                              'metadata.json')

                id = str(random.randint(0, 1e16))
                results_loc = os.path.join('Examples/compute/merger_combinations/results', dataset, id)
                meta_data_result = manager_results.metadata_template(id, dataset, results_loc, "")
Example #15
0
def evaluate_process(P, pi, R, cores, phases):
    i = pi
    while i < len(P):
        R[i] = ev.evaluate(P[i], P[i].get_start(), phases=phases)
        i += cores
    # Classifier 2
    c2 = make.make_classifier("c2", c2_file)
    sys.add_classifier(c2)

    # ---- TEST ---- #

    # Test 1 -> All instances executed by the bigger network
    update_dataset(c0_file, [0, 1.1], trigger0_train_dataset,
                   trigger0_test_dataset)
    trigger0 = make.make_trigger("trigger0",
                                 make.make_empty_classifier(
                                     id="", data_id="trigger0_data"),
                                 ["c1", "c2"],
                                 model="probability_multiple_classifiers")
    sys.replace("trigger0", trigger0)
    R1 = eval.evaluate(sys, "c0")
    assert R1.test['system'].accuracy == R1.test['c2'].accuracy and \
            R1.test['system'].time == R1.test['c0'].time + R1.test['c2'].time, "Error in test 1"
    print("TEST 1: PASS")

    # Test 2 -> All instances executed by the medium network
    update_dataset(c0_file, [1.1, 0], trigger0_train_dataset,
                   trigger0_test_dataset)
    trigger0 = make.make_trigger("trigger0",
                                 make.make_empty_classifier(
                                     id="", data_id="trigger0_data"),
                                 ["c1", "c2"],
                                 model="probability_multiple_classifiers")
    sys.replace("trigger0", trigger0)
    R2 = eval.evaluate(sys, "c0")
    assert R2.test['system'].accuracy == R2.test['c1'].accuracy and \
Example #17
0
        model_paths = [Classifier_Path + f for f in os.listdir(Classifier_Path) if ".pkl" in f]
        out_dir = os.path.join("./results/", dataset)
        data_path = os.environ['FCM']+"/Datasets/"
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        #########################################################################

        import Examples.study.paretto_front as paretto
        R_models = {}
        for mpath in model_paths:
            sys = sb.SystemBuilder(verbose=False)
            classifier_id = os.path.basename(mpath)
            c = make.make_classifier(classifier_id, mpath)
            sys.add_classifier(c)
            sys.set_start(classifier_id)
            R_models[classifier_id] = eval.evaluate(sys, phases=["test", "val"])

        #front = paretto.get_front_time_accuracy(R_models, phase="test")
        #front_sorted = paretto.sort_results_by_accuracy(front, phase="test")
        models_paths = [Classifier_Path + k for k, v in paretto.sort_results_by_accuracy(R_models, phase="val")]
        records = R_models

        # Combinations
        for ic0 in range(len(models_paths)):
            c0 = models_paths[ic0]
            for th0 in np.arange(0, 1+step_th, step_th):
                for ic1 in range(ic0+1, len(models_paths)):
                    c1 = models_paths[ic1]
                    #for th2 in np.arange(0.2, 1, step_th):
                    #   for ic2 in range(ic1+1, len(models)):
                    #       c2 = models[ic2]
Example #18
0
import merge_two_chains
import os

merger = merge_two_chains.merged

from Source.genetic_algorithm.operations_mutation import add_classifier_to_merger
add_classifier_to_merger(merger, 'Merger', '2_PNASNetA', os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                                                                      'sota_models_cifar10-32-dev_validation',
                                                                      'V001_PNASNetA_ref_0'))

from Examples.compute.chain_genetic_algorithm.utils import generate_system_id
merger.set_sysid(generate_system_id(merger))
print(merger.get_sysid())

from Source.system_evaluator import evaluate
from Source.system_evaluator_utils import pretty_print
R = evaluate(merger, merger.get_start())
pretty_print(R)
    sys = sb.SystemBuilder(verbose=False)

    bigClassifier = make.make_classifier("big", big_cfile)
    sys.add_classifier(bigClassifier)

    source = make.make_source(train_path, test_path, fcm.Data.Source.NUMPY)
    data = make.make_data("trigger_data", int(5e4), int(1e4), source=source)
    sys.add_data(data)
    update_dataset(small_cfile, th, train_path, test_path)

    trigger = make.make_trigger(
        "trigger",
        make.make_empty_classifier(data_id="trigger_data"), ["big"],
        model="probability")
    sys.add_trigger(trigger)

    smallClassifier = make.make_classifier("small", small_cfile, "trigger")
    sys.add_classifier(smallClassifier)

    r = eval.evaluate(sys, "small")
    eval.pretty_print(r)
    print(r.test['system'].instance_model)

    time = 0
    time_small = io.read_pickle(small_cfile)['metrics']['time'] / 128
    time_big = io.read_pickle(big_cfile)['metrics']['time'] / 128
    for id, model in r.test['system'].instance_model.items():
        time += time_small if len(model) == 1 else time_small + time_big

    print(time, r.test['system'].time)
from extend_chain_operation import chain
from Source.genetic_algorithm.operations_mutation import set_threshold
from Source.system_evaluator import evaluate
from Source.system_evaluator_utils import pretty_print
from Examples.compute.chain_genetic_algorithm.utils import generate_system_id

print(chain.get_message())
print(chain.get_sysid())
set_threshold(chain, "VGG13", 0.5)
chain.set_sysid(generate_system_id(chain))
print(chain.get_sysid())
R = evaluate(chain, chain.get_start())
pretty_print(R)
set_threshold(chain, "ResNeXt29_32x4d", 0.3)
chain.set_sysid(generate_system_id(chain))
print(chain.get_sysid())
R = evaluate(chain, chain.get_start())
pretty_print(R)
set_threshold(chain, "VGG11", 0.2)
chain.set_sysid(generate_system_id(chain))
print(chain.get_sysid())
R = evaluate(chain, chain.get_start())
pretty_print(R)

Example #21
0
    val_path = path + '/Data/val_trigger_threshold.pkl'
    small_cfile = "../Definitions/Classifiers/front45_models_validation/V001_DenseNet_s1_39"
    big_cfile = "../Definitions/Classifiers/front45_models_validation/V001_DenseNet_s1_66"

    sys = sb.SystemBuilder(verbose=False)

    bigClassifier = make.make_classifier("big", big_cfile)
    sys.add_classifier(bigClassifier)

    source = make.make_source(train_path, test_path, fcm.Data.Source.NUMPY,
                              val_path)
    data = make.make_data("trigger_data", int(5e4), int(1e4), source=source)
    sys.add_data(data)
    update_dataset(small_cfile, 0.6, train_path, test_path, val_path)

    trigger = make.make_trigger(
        "trigger",
        make.make_empty_classifier(data_id="trigger_data"), ["big"],
        model="probability")
    sys.add_trigger(trigger)

    smallClassifier = make.make_classifier("small", small_cfile, "trigger")
    sys.add_classifier(smallClassifier)

    r = eval.evaluate(sys,
                      "small",
                      phases=['test', 'val'],
                      check_classifiers=True)
    import Source.system_evaluator_utils as eval_utils
    eval_utils.pretty_print(r)
        # Creating system
        sys = sb.SystemBuilder(verbose=False)
        smallClassifier = make.make_empty_classifier("Classifier")
        sys.add_classifier(smallClassifier)

        for m_ in models:

            # Model 2
            name2 = Classifier_Path + m_
            model2 = make.make_empty_classifier()
            model2.id = "Classifier"
            model2.classifier_file = name2
            sys.replace(model2.id, model2)

            evaluate_time_start = time.time()
            results = eval.evaluate(sys, model2.id)
            eval_time = time.time() - evaluate_time_start

            print("Evaluation time:", eval_time)

            records[m_] = results

        import Source.io_util as io
        if not os.path.exists('./models_evaluation/%s' % d):
            os.makedirs('./models_evaluation/%s' % d)
        io.save_pickle('./models_evaluation/%s/models.pkl' % d, records)

        plot_accuracy_time(records)
        plt.legend()
        plt.show()
Example #23
0
    # Load 32 DNNs
    S_initial = []
    S_eval_dict = {}
    limits = make_limits_dict()

    classifier_path = os.path.join(os.environ['FCM'], 'Definitions',
                                   'Classifiers', args.dataset)
    classifier_files = [f for f in os.listdir(classifier_path) if ".pkl" in f]
    for c_id in classifier_files:
        sys = sb.SystemBuilder(verbose=False)
        c_file = os.path.join(classifier_path, c_id)
        sys.add_classifier(mutil.make_classifier(c_id, c_file))
        sys.set_start(c_id)
        S_initial.append(sys)
        S_eval_dict[c_id] = evaluate(sys, sys.get_start(), phases=["val"])
    update_limit_dict(limits, S_eval_dict, phase="val")

    # Initialize Q-Learning table
    Qtable = {}

    # Start Q-loop
    bar = ProgressBar(args.steps)
    R_episodes = []
    Acc_episodes = []

    for episode in range(args.episodes):
        print("EPISODE %d" % episode)
        ensemble = S_initial[(episode % len(S_initial))]
        ensemble_eval = evaluate(ensemble,
                                 ensemble.get_start(),
    dev = 'cpu'
    print(f"Evaluating ensemble on {dev}")


    # Measure how fast builds
    ts = perf_counter()
    ensemble = pytorch_ensemble_1()
    print(f"PyTorch merge ensemble creation time: {perf_counter()-ts: 0.4f}s")
    ts = perf_counter()
    ensemble_pb = protobuf_ensemble_1()
    print(f"Protobuf merge ensemble creation time: {perf_counter()-ts: 0.4f}s")
    print("--"*20)

    # Measure evaluation time on CPU
    ts = perf_counter()
    r = evaluate(ensemble, phases=[Split.TRAIN], device=dev)
    print(f"PyTorch merge ensemble evaluation time: {perf_counter() - ts: 0.4f}s")
    ts = perf_counter()
    r_pb = evaluate(ensemble_pb, phases=[Split.TRAIN])
    print(f"Protobuf merge ensemble evaluation time: {perf_counter()-ts: 0.4f}s")
    print("--" * 20)

    print(r.test["system"].accuracy, r_pb.test["system"].accuracy)

    # Measure size of the ensembles
    import sys
    print(f"PyTorch merge ensemble object size: {sys.getsizeof(ensemble)/1e6:0.2f} MBytes")
    print(f"Protobuf merge ensemble object size: {sys.getsizeof(ensemble_pb)/1e6:0.2f} MBytes")
    print("==" * 20)

    """
Example #25
0
sys = sb.SystemBuilder(verbose=True)

name2 = m_
bigClassifier = make.make_empty_classifier("big")
bigClassifier.classifier_file = name2
sys.add_classifier(bigClassifier)

trigger = make.make_trigger("probability_threshold_trigger",
                            make.make_empty_classifier(), ["big"])
sys.add_trigger(trigger)

name1 = m
smallClassifier = make.make_empty_classifier("small",
                                             "probability_threshold_trigger")
model1_dict = io.read_pickle(name1, suffix="", verbose=False)
smallClassifier.classifier_file = name1
sys.add_classifier(smallClassifier)

classifier_trigger = build_train_trigger(model1_dict, th)
trigger = make.make_trigger("probability_threshold_trigger",
                            classifier_trigger, ["big"])
trigger.id = "probability_threshold_trigger"
sys.replace(trigger.id, trigger)

metrics = eval.evaluate(sys, "small", check_classifiers=False)
eval.pretty_print(metrics)

# cl = sys.build_classifier_dict("small")
# io.save_pickle(Classifier_Path+"system_"+net1+"_"+net2+"th=0.7", cl)
Example #26
0
chain1 = extend_chain_operation.chain

classifiers = [
    os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                 'sota_models_cifar10-32-dev_validation',
                 'V001_ResNet34_ref_0'),
    os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                 'sota_models_cifar10-32-dev_validation', 'V001_VGG19_ref_0')
]
classifiers_id = ['ResNet34', 'VGG19']

thresholds = [0.9]

trigger_ids = ['trigger_classifier_0.8_ResNet34']

chain2 = build_chain(classifiers, classifiers_id, thresholds, trigger_ids,
                     'chain2_extend_operation')

from Source.genetic_algorithm.operations_breed import merge_two_chains

merged = merge_two_chains(chain1, chain2)
merged.set_sysid(extend_chain_operation.generate_system_id(merged))
print(merged.get_sysid())

from Source.system_evaluator import evaluate
from Source.system_evaluator_utils import pretty_print

R = evaluate(merged, merged.get_start())
pretty_print(R)
    os.path.join(
        os.environ['FCM'],
        'Definitions/Classifiers/sota_models_cifar10-32-dev_validation',
        'V001_ResNet152_ref_0.pkl')
]

# Build the system
merged_classifiers = sb.SystemBuilder()
merger = make.make_merger('Merger', classifiers, fcm.Merger.AVERAGE)
merged_classifiers.add_merger(merger)
for classifier in classifiers:
    c = make.make_classifier(classifier, classifier)
    merged_classifiers.add_classifier(c)

merged_classifiers.set_start('Merger')
R = evaluate(merged_classifiers, merged_classifiers.get_start())
pretty_print(R)

# Manual check
import Source.io_util as io
import numpy as np

c_dict_0 = io.read_pickle(classifiers[0])
c_dict_1 = io.read_pickle(classifiers[1])

gt = c_dict_0['test']['gt']
logits_0 = c_dict_0['test']['logits']
logits_1 = c_dict_1['test']['logits']

average = (logits_0 + logits_1) / 2
test_acc = np.sum(np.argmax(average, 1) == gt) / len(gt)
                     'sota_models_stl10-32-dev_validation',
                     'V001_VGG13_ref_0'),
        os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                     'sota_models_stl10-32-dev_validation',
                     'V001_ResNeXt29_32x4d_ref_0'),
        os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                     'sota_models_stl10-32-dev_validation', 'V001_VGG11_ref_0')
    ]
    classifiers_id = [
        'V001_VGG13_ref_0', 'V001_ResNeXt29_32x4d_ref_0', 'V001_VGG11_ref_0'
    ]
    thresholds = [0.1, 0.4]
    trigger_ids = [
        'trigger_classifier_0.1_V001_VGG13_ref_0',
        'trigger_classifier_0.4_V001_ResNeXt29_32x4d_ref_0'
    ]

    chain = build_chain(classifiers, classifiers_id, thresholds, trigger_ids,
                        'test_ga_chain')
    r = se.evaluate(chain, chain.get_start())
    assert acc_result == r.test[
        'system'].accuracy, "ERROR: Accuracy of GA solution should be the same as the manual"
    assert time_result == r.test[
        'system'].time, "ERROR: Time of GA solution should be the same as the manual"

    print("Genetic Algorithm")
    print(acc_result)
    print(time_result)
    print("Built")
    print(r.test['system'].accuracy)
    print(r.test['system'].time)
    # ENSEMBLE SKELETON
    chain0 = build_chain(
        [c0_file, c1_file, c2_file],
        ['V001_DenseNet_s3_71', 'V001_DenseNet_s1_3', 'V001_DenseNet_s2_32'],
        [1.1, 1.1], [
            'trigger_classifier_1.1_V001_DenseNet_s3_71',
            'trigger_classifier_1.1_V001_DenseNet_s1_3'
        ], 'test_crossover_chain0')

    chain1 = build_chain([c0_file, c2_file],
                         ['V001_DenseNet_s3_71', 'V001_DenseNet_s2_32'], [0.8],
                         ['trigger_classifier_0.8_V001_DenseNet_s3_71'],
                         'test_crossover_chain1')

    R_chain0 = eval.evaluate(chain0, chain0.get_start())
    R_chain1 = eval.evaluate(chain1, chain1.get_start())

    c = ob.singlepoint_crossover(chain0, chain1, 'V001_DenseNet_s1_3',
                                 'V001_DenseNet_s3_71')

    from Examples.compute.chain_genetic_algorithm.utils import generate_system_id

    print(generate_system_id(c[0]))
    print("---------------------------")
    print(generate_system_id(c[1]))

    R = eval.evaluate(c[0], c[0].get_start())
    R_ = eval.evaluate(c[1], c[1].get_start())

    # Test that the offspring result is consistent
Example #30
0
import Source.system_builder as sb
import Source.protobuf.make_util as make
import Source.system_evaluator as eval

if __name__ == "__main__":

    Classifier_Path = "../../Definitions/Classifiers/"
    classifier_file = "DenseNet121_cifar10.pkl"

    # Creating system
    sys = sb.SystemBuilder(verbose=False)
    smallClassifier = make.make_classifier("Classifier", Classifier_Path+classifier_file)
    sys.add_classifier(smallClassifier)
    results = eval.evaluate(sys, "Classifier", check_classifiers=True)
    eval.pretty_print(results)