def train_trigger(sys, c):
    """
    Trains a trigger and creates its classifier whose name will be classifier's id and will be saved in
    Definitions/Classifiers/tmp
    :param sys: System of components
    :param c: Trigger protobuf definition
    :return: -
    """

    assert c.HasField(
        "classifier"
    ), "ERROR in TRIGGER.CLASSIFIER: A classifier should be specified for the trigger"
    train_trigger = not c.classifier.HasField(
        "classifier_file") or c.classifier.classifier_file == ""
    if train_trigger:
        assert c.HasField("model"), \
            "ERROR in TRIGGER: A model must be specified when training the trigger"
        assert c.classifier.HasField("data_id"), \
            "ERROR in TRIGGER.CLASSIFIER: Training data should be specified when training the trigger"

        module = importlib.import_module('Definitions.triggers.' + c.model)
        c_dict = module.train_fit(sys, c.id)
        if 'TMP' in os.environ:
            tmp_location = os.path.join(os.environ['FCM'],
                                        os.environ['TMP'] + '/')
        else:
            tmp_location = os.path.join(os.environ['FCM'],
                                        'Definitions/Classifiers/tmp/')
        classifier_file = c.id
        io.save_pickle(tmp_location + classifier_file, c_dict)
        c.classifier.classifier_file = tmp_location + classifier_file
        sys.replace(c.id, c)
def save_results(meta_file, meta_data_results, params, R):

    """
    Saves the results pickle and checks meta-Data information about the results is correct and updated

    :param meta_file: Storage of all the ensemble results for a particular experiment/ensemble type.
    :param meta_data_results: Meta Data information of a new result
    :param params: Params of the experiment
    :return:
    """

    assert os.path.isabs(meta_file), "ERROR Metadata Results: File meta_file should be an absolute path to the file"
    assert meta_data_results['results'] != "" and meta_data_results['results'] is not None

    if not os.path.exists(meta_file):
        with open(meta_file, 'w') as file:
            json.dump([], file)

    save_path = os.path.join(os.environ['FCM'], meta_data_results['results'])
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # 1- Guardar R
    io.save_pickle(os.path.join(save_path, "results_ensembles.pkl"), R)

    # 2- Obre el meta_data file i fer update
    meta_data_results['params'] = params
    with open(meta_file, 'r') as file:
        meta_data = json.load(file)

    with open(meta_file, 'w') as file:
        meta_data.append(meta_data_results)
        json.dump(meta_data, file, indent=4)
def update_dataset(c_file, train_path, test_path, val_path, th):
    # Create dataset
    model = io.read_pickle(c_file)
    # Test
    dataset_test = model['test']
    if th >= 0: dataset_test['th'] = th
    io.save_pickle(test_path, dataset_test)
    """
def update_dataset(model_file, th, train_path, test_path):
    model = io.read_pickle(model_file)
    # Test
    dataset_test = model['test']
    dataset_test['th'] = th
    io.save_pickle(test_path, dataset_test)
    # Train
    dataset_train = model['train']
    dataset_train['th'] = th
    io.save_pickle(train_path, dataset_train)
Exemple #5
0
def check_valid_classifier_metrics(c):
    classifier_dict = io.read_pickle(c.classifier_file, verbose=False)
    if 'time' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: Time measurement not found, set to 0 instead")
        classifier_dict['metrics']['time'] = 0
        io.save_pickle(c.classifier_file, classifier_dict)
    if 'times' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: Times list measurement not found, set to time instead")
        classifier_dict['metrics']['times'] = np.array([classifier_dict['metrics']['time']])
        io.save_pickle(c.classifier_file, classifier_dict)
    if 'params' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: # Params measurement not found, set to 0 instead")
        classifier_dict['metrics']['params'] = 0
        io.save_pickle(c.classifier_file, classifier_dict)
    if 'ops' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: # Ops measurement not found, set to 0 instead")
        classifier_dict['metrics']['ops'] = 0
        io.save_pickle(c.classifier_file, classifier_dict)

    classifier_dict = io.read_pickle(c.classifier_file, verbose=False)

    assert classifier_dict['metrics']['time'] >= 0, "ERROR in Classifier: Time should be positive"
    assert np.all(np.array(classifier_dict['metrics']['times']) >= 0) and len(classifier_dict['metrics']['times']) > 0, \
        "ERROR in Classifier: Time should be positive"
    assert classifier_dict['metrics']['params'] >= 0, "ERROR in Classifier: # Params should be positive"
    assert classifier_dict['metrics']['ops'] >= 0, "ERROR in Classifier: # Ops should be positive"
Exemple #6
0
def build_train_trigger(model1_dict, th):
    classifier_trigger_dict = {}

    performance = make.make_performance_metrics(**{})

    # Train dict
    L = model1_dict['train']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    sort = np.sort(P, axis=1)  # Sort class probabilities
    diff = sort[:, -1] - sort[:, -2]  # Difference
    logits_trigger = np.empty((diff.shape[0], 2))
    logits_trigger[:, 0] = diff < th
    logits_trigger[:, 1] = diff >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['train']['gt']

    train = make.make_classifier_raw_data(logits_trigger,
                                          (pred_model1 == gt_model1),
                                          np.copy(model1_dict['train']['id']))

    # Test dict
    L = model1_dict['test']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    sort = np.sort(P, axis=1)  # Sort class probabilities
    diff = sort[:, -1] - sort[:, -2]  # Difference
    logits_trigger = np.empty((diff.shape[0], 2))
    logits_trigger[:, 0] = diff < th
    logits_trigger[:, 1] = diff >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['test']['gt']

    test = make.make_classifier_raw_data(logits_trigger,
                                         (pred_model1 == gt_model1),
                                         np.copy(model1_dict['test']['id']))

    classifier_trigger_dict = make.make_classifier_dict(
        "trigger_classifier", "cifar10", train, test, performance)
    io.save_pickle(
        '../../Definitions/Classifiers/tmp/trigger_random_threshold.pkl',
        classifier_trigger_dict)
    classifier = make.make_classifier(
        "trigger_classifier",
        "../../Definitions/Classifiers/tmp/trigger_random_threshold.pkl")
    return classifier
Exemple #7
0
def build_train_trigger2(model1_dict, th):
    classifier_trigger_dict = {}

    performance = make.make_performance_metrics(**{})

    # Train dict
    L = model1_dict['train']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    max_P = np.max(P, axis=1)
    logits_trigger = np.empty((max_P.shape[0], 2))
    logits_trigger[:, 0] = max_P < th
    logits_trigger[:, 1] = max_P >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['train']['gt']

    train = make.make_classifier_raw_data(logits_trigger,
                                          (pred_model1 == gt_model1),
                                          np.copy(model1_dict['train']['id']))

    # Test dict
    L = model1_dict['test']['logits']
    dividend = np.sum(np.exp(L), axis=1)
    P = np.exp(L) / dividend[:, None]
    max_P = np.max(P, axis=1)
    logits_trigger = np.empty((max_P.shape[0], 2))
    logits_trigger[:, 0] = max_P < th
    logits_trigger[:, 1] = max_P >= th

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['test']['gt']

    test = make.make_classifier_raw_data(logits_trigger,
                                         (pred_model1 == gt_model1),
                                         np.copy(model1_dict['test']['id']))

    classifier_trigger_dict = make.make_classifier_dict(
        "trigger_classifier", "cifar10", train, test, performance)
    io.save_pickle(
        '../../Definitions/Classifiers/tmp/trigger_random_threshold',
        classifier_trigger_dict)
    classifier = make.make_classifier(
        "trigger_classifier",
        "../../Definitions/Classifiers/tmp/trigger_random_threshold")
    return classifier
Exemple #8
0
def build_train_trigger3(model1_dict, p):
    classifier_trigger_dict = {}

    performance = make.make_performance_metrics(**{})

    # Train dict
    L = model1_dict['train']['logits']
    logits_trigger = np.empty((L.shape[0], 2))
    logits_trigger[:, 0] = np.random.binomial(1, p, L.shape[0])
    logits_trigger[:, 1] = 1 - logits_trigger[:, 0]

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['train']['gt']

    train = make.make_classifier_raw_data(logits_trigger,
                                          (pred_model1 == gt_model1),
                                          np.copy(model1_dict['train']['id']))

    # Test dict
    L = model1_dict['test']['logits']
    logits_trigger = np.empty((L.shape[0], 2))
    logits_trigger[:, 0] = np.random.binomial(1, p, L.shape[0])
    logits_trigger[:, 1] = 1 - logits_trigger[:, 0]

    pred_model1 = np.argmax(L, axis=1)
    gt_model1 = model1_dict['test']['gt']

    test = make.make_classifier_raw_data(logits_trigger,
                                         (pred_model1 == gt_model1),
                                         np.copy(model1_dict['test']['id']))

    classifier_trigger_dict = make.make_classifier_dict(
        "trigger_classifier", "cifar10", train, test, performance)
    io.save_pickle(
        '../../Definitions/Classifiers/tmp/trigger_random_threshold',
        classifier_trigger_dict)
    classifier = make.make_classifier(
        "trigger_classifier",
        "../../Definitions/Classifiers/tmp/trigger_random_threshold")
    return classifier
Exemple #9
0
def save_results(R_dict, individuals_fitness_per_generation):
    meta_data_file = os.path.join(os.environ['FCM'], 'Examples', 'compute',
                                  'genetic_algorithm_multinode', 'results',
                                  'metadata.json')

    id = str(random.randint(0, 1e16))
    results_loc = os.path.join(
        'Examples/compute/genetic_algorithm_multinode/results',
        main.args.dataset, id)
    comments = main.args.comment
    meta_data_result = manager_results.metadata_template(
        id, main.args.dataset, results_loc, comments)

    params = main.args.__dict__
    # Save dictionary of ensemble results
    manager_results.save_results(meta_data_file, meta_data_result, params,
                                 R_dict)
    # Additionally save ensemble results per generation (list)
    io.save_pickle(
        os.path.join(os.environ['FCM'], results_loc,
                     'individuals_fitness_per_generation.pkl'),
        individuals_fitness_per_generation)
Exemple #10
0
def update_dataset(c_file, train_path, test_path, val_path, th):
    # Create dataset
    model = io.read_pickle(c_file)

    # Test
    dataset_test = model['test']
    if th >= 0: dataset_test['th'] = th
    io.save_pickle(test_path, dataset_test)

    dataset_train = model['train']
    if th >= 0: dataset_train['th'] = th
    io.save_pickle(train_path, dataset_train)

    # Validation
    dataset_val = model['val']
    if th >= 0: dataset_val['th'] = th
    io.save_pickle(val_path, dataset_val)
Exemple #11
0
        # Info about current generation
        print("Iteration %d" % iteration)
        print("TIME: Seconds per generation: %f " % (time.time() - start))

        iteration += 1

    # Save the results
    import Examples.metadata_manager_results as manager_results
    meta_data_file = os.path.join(os.environ['FCM'], 'Examples', 'compute',
                                  'bagging_boosting_of_chains_GA', 'results',
                                  'metadata.json')

    id = str(random.randint(0, 1e16))
    results_loc = os.path.join(
        'Examples/compute/bagging_boosting_of_chains_GA/results', args.dataset,
        id)
    comments = args.comment
    meta_data_result = manager_results.metadata_template(
        id, args.dataset, results_loc, comments)

    # Save the ensemble evaluation results
    R_dict_old.update(R_dict)
    params = args.__dict__
    manager_results.save_results(meta_data_file, meta_data_result, params,
                                 R_dict_old)
    io.save_pickle(
        os.path.join(os.environ['FCM'], results_loc,
                     'individuals_fitness_per_generation.pkl'),
        individuals_fitness_per_generation)
        # Creating system
        sys = sb.SystemBuilder(verbose=False)
        smallClassifier = make.make_empty_classifier("Classifier")
        sys.add_classifier(smallClassifier)

        for m_ in models:

            # Model 2
            name2 = Classifier_Path + m_
            model2 = make.make_empty_classifier()
            model2.id = "Classifier"
            model2.classifier_file = name2
            sys.replace(model2.id, model2)

            evaluate_time_start = time.time()
            results = eval.evaluate(sys, model2.id)
            eval_time = time.time() - evaluate_time_start

            print("Evaluation time:", eval_time)

            records[m_] = results

        import Source.io_util as io
        if not os.path.exists('./models_evaluation/%s' % d):
            os.makedirs('./models_evaluation/%s' % d)
        io.save_pickle('./models_evaluation/%s/models.pkl' % d, records)

        plot_accuracy_time(records)
        plt.legend()
        plt.show()
import Source.protobuf.system_builder_serializable as sb
import Source.protobuf.make_util as make
import Source.io_util as io
import os

if __name__ == "__main__":

    dset = "sota_models_cifar10-32-dev_validation"
    Classifier_Path = os.path.join(os.environ['FCM'], 'Definitions',
                                   'Classifiers', dset)

    P = []
    models = [f for f in os.listdir(Classifier_Path)]

    # Creating system
    records = {}
    for m_ in models:
        # Model 2
        sys = sb.SystemBuilder(verbose=False)
        classifier = make.make_classifier(m_,
                                          os.path.join(Classifier_Path, m_))
        sys.add_classifier(classifier)
        P.append(sys)

    io.save_pickle('initial_population', P)
Exemple #14
0
                trigger = make.make_trigger("probability_threshold_trigger",
                                            classifier_trigger, ["big"])
                sys.replace(trigger.id, trigger)

                results = eval.evaluate(sys, "small",
                                        check_classifiers=False).test
                random_times_chain.append(results)

            r['random_chain'].append(random_times_chain)

            # Random trigger tree structure
            simple = make.make_classifier("small", c_simple_file)
            sys.replace("small", simple)
            random_times_tree = []
            for i in range(5):
                classifier_trigger = build_train_trigger3(simple_dict, th)
                trigger = make.make_trigger("probability_threshold_trigger",
                                            classifier_trigger,
                                            ["big", "small"])
                sys.replace(trigger.id, trigger)
                results = eval.evaluate(sys,
                                        "probability_threshold_trigger",
                                        check_classifiers=False).test
                random_times_tree.append(results)

            r['random_tree'].append(random_times_tree)

        R["system_" + c_simple_file + "_" + c_complex_file] = r

    io.save_pickle("./results/R.pkl", R)
Exemple #15
0
        ]
        out_dir = os.path.join("./results/", dataset)
        data_path = "../../Data/"
        if not os.path.exists(out_dir):
            os.mkdir(out_dir)
        #########################################################################
        import Examples.study.paretto_front as paretto

        R_models = {}
        for model in models:
            sys = sb.SystemBuilder(verbose=False)
            c = make.make_classifier("classifier", model)
            sys.add_classifier(c)
            R_models[model] = eval.evaluate(sys, c.id).test

        io.save_pickle(os.path.join(out_dir, "models"), R_models)

        front_sorted = paretto.sort_results_by_accuracy(R_models)
        models = [val[0] for val in front_sorted]
        print(models)

        sys = sb.SystemBuilder(verbose=False)
        trigger_train_dataset = os.path.join(data_path, "train_trigger_0")
        test_train_dataset = os.path.join(data_path, "test_trigger_0")

        # Model 1
        for im, m in enumerate(models):

            smallClassifier = make.make_classifier("small", m, "trigger")
            sys.replace("small", smallClassifier)