Exemplo n.º 1
0
    def test_accuracy(self):
        ensemble = create_ensemble()

        c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                              'V001_DenseNet161_ref_0.pkl')
        c_file2 = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                     'V001_ResNet18_ref_0.pkl')

        # System Evaluator computes accuracy
        add_classifier_to_merger(ensemble, 'Merger', 'DenseNet161', c_file)
        R = evaluate(ensemble, ensemble.get_start())
        acc = R.test['system'].accuracy

        # Compute accuracy manually
        c_dict_0 = io.read_pickle(c_file2)
        c_dict_1 = io.read_pickle(c_file)

        gt = c_dict_0['test']['gt']
        logits_0 = c_dict_0['test']['logits']
        logits_1 = c_dict_1['test']['logits']

        average = (logits_0 + logits_1) / 2
        acc_manual = np.sum(np.argmax(average, 1) == gt) / len(gt)

        self.assertEqual(acc, acc_manual)
Exemplo n.º 2
0
    def test_time_threshold_1(self):

        self.t1.update_threshold(1.1)
        self.t2.update_threshold(1.1)

        metadata1 = read_pickle(
            os.environ["FCM"] +
            "/Definitions/Classifiers/sota_models_cifar10-32-dev_validation/V001_DenseNet121_ref_0.pkl"
        )
        metadata2 = read_pickle(
            os.environ["FCM"] +
            "/Definitions/Classifiers/sota_models_cifar10-32-dev_validation/V001_DenseNet121_ref_0.pkl"
        )
        metadata3 = read_pickle(
            os.environ["FCM"] +
            "/Definitions/Classifiers/sota_models_cifar10-32-dev_validation/V001_MobileNetV2_ref_0.pkl"
        )

        ids = metadata1["test"]["id"]
        inference_t = metadata1["metrics"]["time"]/128.0 + \
                     metadata2["metrics"]["time"]/128.0 + \
                     metadata3["metrics"]["time"]/128.0
        true_time = len(ids) * inference_t
        input = torch.range(0, len(ids) - 1).long()
        self.ensemble(input)
        self.assertEqual(self.ensemble.get_processing_time(), true_time)
Exemplo n.º 3
0
def check_valid_classifier_metrics(c):
    classifier_dict = io.read_pickle(c.classifier_file, verbose=False)
    if 'time' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: Time measurement not found, set to 0 instead")
        classifier_dict['metrics']['time'] = 0
        io.save_pickle(c.classifier_file, classifier_dict)
    if 'times' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: Times list measurement not found, set to time instead")
        classifier_dict['metrics']['times'] = np.array([classifier_dict['metrics']['time']])
        io.save_pickle(c.classifier_file, classifier_dict)
    if 'params' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: # Params measurement not found, set to 0 instead")
        classifier_dict['metrics']['params'] = 0
        io.save_pickle(c.classifier_file, classifier_dict)
    if 'ops' not in classifier_dict['metrics']:
        warnings.warn("WARNING in Classifier: # Ops measurement not found, set to 0 instead")
        classifier_dict['metrics']['ops'] = 0
        io.save_pickle(c.classifier_file, classifier_dict)

    classifier_dict = io.read_pickle(c.classifier_file, verbose=False)

    assert classifier_dict['metrics']['time'] >= 0, "ERROR in Classifier: Time should be positive"
    assert np.all(np.array(classifier_dict['metrics']['times']) >= 0) and len(classifier_dict['metrics']['times']) > 0, \
        "ERROR in Classifier: Time should be positive"
    assert classifier_dict['metrics']['params'] >= 0, "ERROR in Classifier: # Params should be positive"
    assert classifier_dict['metrics']['ops'] >= 0, "ERROR in Classifier: # Ops should be positive"
Exemplo n.º 4
0
def plot_paretto_fronts(n):
    toPlot = io.read_pickle("./results/models")
    myplt.plot_accuracy_time(toPlot)

    for i in range(n + 1):
        print(i)
        toPlot = dict(
            (k, v)
            for k, v in io.read_pickle("./results/front" + str(i)).items()
            if k not in toPlot)
        myplt.plot_accuracy_time(toPlot, system_color=myplt.color[i])
    """
Exemplo n.º 5
0
def sort_models_params(models):
    size = [c_dict['metrics']['params'] for c_dict in [io.read_pickle(model) for model in models]]
    position = np.argsort(size)
    models_sorted = ["" for x in range(len(models))]
    for i in range(len(models)):
        models_sorted[position[i]] = models[i]
    return models_sorted
def __get_trigger_raw_data(data, phase):

    if phase == "test":
        data_source = data.source.test_path
    elif phase == "train":
        data_source = data.source.train_path
    else:  # phase == "val":
        data_source = data.source.val_path

    dataset = io.read_pickle(data_source)

    L = dataset['logits']
    P = softmax(L)
    ths = dataset['ths']
    gt = dataset['gt']
    ids = dataset['id']

    sort = np.sort(P, axis=1)
    diff = sort[:, -1] - sort[:, -2]

    sample_size = len(ids)
    Y = np.zeros((sample_size, len(ths) + 1))  # Predictions for the trigger

    Or = np.zeros(sample_size)
    for ith, th in enumerate(ths):
        Y[:, ith] = np.array([diff < th])
        Or = np.logical_or(Or, Y[:, ith])

    Y[:, -1] = np.logical_not(
        Or)  # Executed by classifier before trigger if none matches

    raw_data = make.make_classifier_raw_data(Y, gt == np.argmax(L, axis=1),
                                             ids)
    return raw_data
Exemplo n.º 7
0
def check_valid_classifier(c):
    if c.HasField('classifier_file'):
        classifier_dict = io.read_pickle(c.classifier_file, verbose=False)
        check_valid_classifier_structure(classifier_dict)
        check_valid_classifier_raw_data(classifier_dict)
        check_valid_classifier_metrics(c)
    else:
        raise Exception("ERROR in Classifier: classifier_file field should be specified.")
def plot_paretto_fronts(n, res_dir):
    toPlot = io.read_pickle(os.path.join(res_dir, "models"))
    plt.figure(0)
    plt.xscale("log")
    myplt.plot_accuracy_time(toPlot)

    toPlot = io.read_pickle(os.path.join(res_dir, "front0"))

    for i in range(1, n + 1):
        toPlot = dict(
            ("system_" + k, v)
            for k, v in io.read_pickle(os.path.join(res_dir, "front" +
                                                    str(i))).items()
            if k not in toPlot)
        myplt.plot_accuracy_time(toPlot, system_color=myplt.color[i])
    plt.legend("Iteration 1", "Iteration 2")
    plt.show()
def update_dataset(c_file, train_path, test_path, val_path, th):
    # Create dataset
    model = io.read_pickle(c_file)
    # Test
    dataset_test = model['test']
    if th >= 0: dataset_test['th'] = th
    io.save_pickle(test_path, dataset_test)
    """
 def __init__(self, path_to_pickle: str, split=Split.VAL):
     # Call component's class constructor
     self.path = path_to_pickle
     self.split = split
     metadata = read_pickle(self.path)
     parameters = metadata['metrics']['params']
     super().__init__(p=parameters)
     # This way predictions can be manipulated on GPU
     self.register_buffer("predictions", None)
def evaluate(sys, results, c, check_classifiers, classifier_dict, input_ids,
             phase):
    contribution = {}

    c_dict = io.read_pickle(c.classifier.classifier_file)
    L, gt, ids = eval_utils.get_Lgtid(c_dict, phase, input_ids)
    n_inputs = len(input_ids) if input_ids is not None else len(
        c_dict[phase]['id'])
    predictions_trigger = np.argmax(L, axis=1)

    results[c.id] = eval.create_metrics_classifier(c_dict, predictions_trigger,
                                                   gt, n_inputs)
    eval.update_metrics_system(results,
                               c_dict,
                               n_inputs,
                               input_ids=input_ids,
                               phase=phase)

    contribution['gt'] = {}
    contribution['predictions'] = {}
    contribution['logits'] = {}
    contribution['time_instance'] = {}
    contribution['model'] = {}

    for i, c_id in enumerate(c.component_ids):
        c_next = sys.get(c_id)
        mask = (predictions_trigger == i)
        ids_next = ids[mask]

        contribution_component = eval.__evaluate(sys,
                                                 results,
                                                 c_next,
                                                 check_classifiers,
                                                 classifier_dict,
                                                 input_ids=ids_next,
                                                 phase=phase)
        contribution['gt'].update(contribution_component['gt'])
        contribution['predictions'].update(
            contribution_component['predictions'])
        contribution['logits'].update(contribution_component['logits'])

        if contribution['model']:
            contribution['model'].update(
                dict([(k, contribution['model'][k] + v)
                      for k, v in contribution_component['model'].items()]))
        else:
            contribution['model'].update(contribution_component['model'])

        if classifier_dict is not None:
            contribution['logits'].update(contribution_component['logits'])
            for k, v in contribution_component['time_instance'].items():
                contribution['time_instance'][
                    k] = contribution['time_instance'][
                        k] + v if k in contribution['time_instance'] else v

    return contribution
Exemplo n.º 12
0
    def test_parameter_count(self):
        metadata1 = read_pickle(
            os.environ["FCM"] +
            "/Definitions/Classifiers/sota_models_cifar10-32-dev_validation/V001_DenseNet121_ref_0.pkl"
        )
        metadata2 = read_pickle(
            os.environ["FCM"] +
            "/Definitions/Classifiers/sota_models_cifar10-32-dev_validation/V001_DenseNet121_ref_0.pkl"
        )
        metadata3 = read_pickle(
            os.environ["FCM"] +
            "/Definitions/Classifiers/sota_models_cifar10-32-dev_validation/V001_MobileNetV2_ref_0.pkl"
        )

        true_count = metadata1["metrics"]["params"] + \
                     metadata2["metrics"]["params"] + \
                     metadata3["metrics"]["params"]

        self.assertEqual(self.ensemble.get_num_parameters(), true_count)
def update_dataset(model_file, th, train_path, test_path):
    model = io.read_pickle(model_file)
    # Test
    dataset_test = model['test']
    dataset_test['th'] = th
    io.save_pickle(test_path, dataset_test)
    # Train
    dataset_train = model['train']
    dataset_train['th'] = th
    io.save_pickle(train_path, dataset_train)
    def test_threshold_1(self):
        chain = self.__create_ensemble()
        c_id_extend = 'DenseNet-161'
        c_file_extend = os.path.join(os.environ['FCM'], 'Definitions',
                                     'Classifiers',
                                     'sota_models_cifar10-32-dev_validation',
                                     'V001_DenseNet161_ref_0.pkl')
        extend_merged_chain(chain, 'ResNet18', c_id_extend, 1.1, c_file_extend)
        R = evaluate(chain, chain.get_start())
        acc_chain = R.test['system'].accuracy
        time_chain = R.test['system'].time
        ops_chain = R.test['system'].ops
        params_chain = R.test['system'].params

        # Accuracy classifier by hand
        dict_classifier = io.read_pickle(
            os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                         'sota_models_cifar10-32-dev_validation',
                         'V001_ResNet18_ref_0.pkl'))
        acc_net0 = np.sum(
            np.argmax(dict_classifier['test']['logits'], 1) ==
            dict_classifier['test']['gt']) / len(dict_classifier['test']['gt'])
        time_net0 = dict_classifier['metrics']['time']
        params_net0 = dict_classifier['metrics']['params']
        ops_net0 = dict_classifier['metrics']['ops']

        # Accuracy classifier by hand
        dict_classifier = io.read_pickle(c_file_extend)
        acc_net1 = np.sum(
            np.argmax(dict_classifier['test']['logits'], 1) ==
            dict_classifier['test']['gt']) / len(dict_classifier['test']['gt'])
        time_net1 = dict_classifier['metrics']['time']
        params_net1 = dict_classifier['metrics']['params']
        ops_net1 = dict_classifier['metrics']['ops']

        correct = acc_chain == acc_net1 and \
                    math.isclose(time_net0/128 + time_net1/128, time_chain/5e3) and \
                    ops_chain/5e3 == ops_net0 + ops_net1 and \
                    params_chain == params_net0 + params_net1 + 1

        self.assertEqual(correct, True)
Exemplo n.º 15
0
    def test_time(self):
        ensemble = create_ensemble()

        c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                              'V001_DenseNet161_ref_0.pkl')
        c_file2 = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                               'V001_ResNet18_ref_0.pkl')

        # Evaluation time of ensemble
        add_classifier_to_merger(ensemble, 'Merger', 'DenseNet161', c_file)
        R = evaluate(ensemble, ensemble.get_start())
        time = R.test['system'].time

        # Compute CIFAR-10 evaluation time manually
        c_dict_0 = io.read_pickle(c_file2)
        c_dict_1 = io.read_pickle(c_file)

        time_0 = c_dict_0['metrics']['time']/128 * 5e3
        time_1 = c_dict_1['metrics']['time']/128 * 5e3

        self.assertAlmostEqual(time_1+time_0, time)
Exemplo n.º 16
0
    def test_params(self):
        ensemble = create_ensemble()

        c_file = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                              'V001_DenseNet161_ref_0.pkl')
        c_file2 = os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
                               'V001_ResNet18_ref_0.pkl')

        # Evaluation time of ensemble
        add_classifier_to_merger(ensemble, 'Merger', 'DenseNet161', c_file)
        R = evaluate(ensemble, ensemble.get_start())
        params = R.test['system'].params

        # Compute CIFAR-10 evaluation time manually
        c_dict_0 = io.read_pickle(c_file2)
        c_dict_1 = io.read_pickle(c_file)

        params_0 = c_dict_0['metrics']['params']
        params_1 = c_dict_1['metrics']['params']

        self.assertEqual(params_0+params_1, params)
Exemplo n.º 17
0
def evaluate_pytorch(ensemble: System, phases: List[Split], dataset=None, device='cpu') -> Results:

    import torch

    eval = Results()
    eval.train = dict()
    eval.test = dict()
    eval.val = dict()

    eval.test['system'] = Metrics()
    eval.train['system'] = Metrics()
    eval.val['system'] = Metrics()

    ensemble.to(device)

    metadata = io.read_pickle(ensemble.get_classifiers()[0].get_model())

    if Split.TRAIN in phases:
        ensemble.set_evaluation_split(Split.TRAIN)
        input = torch.arange(len(metadata["train"]["id"])).to(device)
        gt_labels = torch.tensor(metadata["train"]["gt"]).to(device)
        # Run
        predictions = ensemble(input)  # Evaluate ensemble
        # Performance
        eval.train['system'].accuracy = (torch.sum(torch.eq(predictions.argmax(dim=1), gt_labels)).float() / gt_labels.numel()).item()
        eval.train['system'].time = ensemble.get_processing_time()
        eval.train['system'].params = ensemble.get_num_parameters()

    if Split.TEST in phases:
        ensemble.set_evaluation_split(Split.TEST)
        input = torch.arange(len(metadata["test"]["id"])).to(device)
        gt_labels = torch.tensor(metadata["test"]["gt"]).to(device)
        # Run
        predictions = ensemble(input)  # Evaluate ensemble
        # Performance
        eval.test['system'].accuracy = (torch.sum(torch.eq(predictions.argmax(dim=1), gt_labels)).float() / gt_labels.numel()).item()
        eval.test['system'].time = ensemble.get_processing_time()
        eval.test['system'].params = ensemble.get_num_parameters()

    if Split.VAL in phases:
        ensemble.set_evaluation_split(Split.VAL)
        input = torch.arange(len(metadata["test"]["id"])).to(device)
        gt_labels = torch.tensor(metadata["test"]["gt"]).to(device)
        # Run
        predictions = ensemble(input)  # Evaluate ensemble
        # Metrics
        eval.val['system'].accuracy = (torch.sum(torch.eq(predictions.argmax(dim=1), gt_labels)).float() / gt_labels.numel()).item()
        eval.val['system'].time = ensemble.get_processing_time()
        eval.val['system'].params = ensemble.get_num_parameters()

    return eval
Exemplo n.º 18
0
def evaluate(work, records, tid):
    lock = Lock()
    while True:

        m, th, m_ = work.get()
        print(m, th, m_)

        small_dict = io.read_pickle(m)
        test_images = len(small_dict['test']['gt'])
        train_images = len(small_dict['train']['gt'])
        data_path = "../../Data/"
        trigger_train_dataset = os.path.join(data_path,
                                             "train_trigger_" + str(tid))
        test_train_dataset = os.path.join(data_path,
                                          "test_trigger_" + str(tid))

        sys = sb.SystemBuilder(verbose=False)

        # Complex classifier
        bigClassifier = make.make_classifier("big", m_)
        sys.add_classifier(bigClassifier)

        # Data
        source = make.make_source(trigger_train_dataset, test_train_dataset, 2)
        data = make.make_data("trigger_data",
                              train_images,
                              test_images,
                              source=source)
        sys.add_data(data)
        update_dataset(m, th, trigger_train_dataset, test_train_dataset)

        # Trigger
        trigger = make.make_trigger(
            "trigger",
            make.make_empty_classifier(data_id="trigger_data"), ["big"],
            model="probability")
        sys.add_trigger(trigger)

        # Simple classifier
        smallClassifier = make.make_classifier("small", m, "trigger")
        sys.add_classifier(smallClassifier)

        results = eval.evaluate(sys, "small", check_classifiers=False)
        records["system_" + m + ';' + m_ + ';' + str(th)] = results.test

        lock.acquire()
        if m_ not in records:  # Evaluate individual models in order to plot
            records[m_] = eval.evaluate(sys, 'big').test
        lock.release()

        work.task_done()
Exemplo n.º 19
0
    def test_accuracy(self):
        chain = create_chain()
        extend_chain(chain)
        replace(chain)
        R = evaluate(chain, chain.get_start())

        # Accuracy classifier by hand
        dict_classifier = io.read_pickle(
            os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                         'sota_models_cifar10-32-dev_validation',
                         'V001_DenseNet169_ref_0.pkl'))
        acc_net0 = np.sum(
            np.argmax(dict_classifier['test']['logits'], 1) ==
            dict_classifier['test']['gt']) / len(dict_classifier['test']['gt'])

        self.assertEqual(acc_net0, R.test['system'].accuracy)
Exemplo n.º 20
0
    def test_predictions_threshold_1(self):

        self.t1.update_threshold(1.1)
        self.t2.update_threshold(1.1)

        # Dataset info
        metadata = read_pickle(
            os.environ["FCM"] +
            "/Definitions/Classifiers/sota_models_cifar10-32-dev_validation/V001_DenseNet121_ref_0.pkl"
        )
        # time = metadata["test"]["gt"]
        ids = metadata["test"]["id"]
        input = torch.range(0, len(ids) - 1).long()
        predictions = self.ensemble(input)
        predictions_classifier_c3 = self.c3(input)
        equal = torch.equal(predictions, predictions_classifier_c3)
        self.assertEqual(True, equal)
Exemplo n.º 21
0
def update_dataset(c_file, train_path, test_path, val_path, th):
    # Create dataset
    model = io.read_pickle(c_file)

    # Test
    dataset_test = model['test']
    if th >= 0: dataset_test['th'] = th
    io.save_pickle(test_path, dataset_test)

    dataset_train = model['train']
    if th >= 0: dataset_train['th'] = th
    io.save_pickle(train_path, dataset_train)

    # Validation
    dataset_val = model['val']
    if th >= 0: dataset_val['th'] = th
    io.save_pickle(val_path, dataset_val)
Exemplo n.º 22
0
    def forward(self, x=None):
        if len(self.get_classifiers()) == 0:
            raise ValueError("System empty of classifiers")

        if x is None:
            metadata = read_pickle(self.get_classifiers()[0].get_model())
            if self.split == Split.TRAIN:
                num_samples = len(metadata["train"]["id"])
                x = arange(num_samples).long()
            elif self.split == Split.TEST:
                num_samples = len(metadata["test"]["id"])
                x = arange(num_samples).long()
            elif self.split == Split.VAL:
                num_samples = len(metadata["val"]["id"])
                x = arange(num_samples).long()
            else:
                raise ValueError("Dataset split not recognized")

        return self.graph(x)
    def forward(self, ids: torch.LongTensor) -> torch.Tensor:
        """
        Receives the indices of a set of samples, and returns the precomputed predictions of those samples
        :param ids: Index of samples in the dataset
        :return: Predictions of the classifier for those indices
        """
        metadata = read_pickle(self.path)
        time_batch_128 = metadata['metrics']['time']
        if self.split == Split.TRAIN:
            precomputed_pred = metadata['train']['logits']
        elif self.split == Split.TEST:
            precomputed_pred = metadata['test']['logits']
        else:
            precomputed_pred = metadata['val']['logits']

        precomputed_pred = torch.from_numpy(precomputed_pred)
        self.update_processing_time(ids.numel() * time_batch_128 / 128.0)
        self.predictions = precomputed_pred[ids].to(ids.device)
        return self.predictions
Exemplo n.º 24
0
def __get_trigger_raw_data(data, phase):

    if phase == "test":
        data_source = data.source.test_path
    elif phase == "train":
        data_source = data.source.train_path
    else:
        data_source = data.source.val_path

    dataset = io.read_pickle(data_source)

    P = dataset['predictions']  # Models for voting protocol
    n = dataset['n']
    gt = dataset['gt']
    ids = dataset['ids']

    agree = np.apply_along_axis(np.bincount, 1, P, None, P.shape[2])
    agree_max = np.max(agree, axis=1)
    y = np.transpose(np.append(np.array([agree_max < n]), np.array([agree_max >= n]), axis=0))
    raw_data = make.make_classifier_raw_data(y, gt == np.argmax(P, axis=1), ids)
    return raw_data
Exemplo n.º 25
0
def __get_trigger_raw_data(data, phase):

    if phase == "test":
        data_source = data.source.test_path
    elif phase == "train":
        data_source = data.source.train_path
    else:
        data_source = data.source.val_path

    dataset = io.read_pickle(data_source)

    L = dataset['logits']
    P = softmax(L)
    th = dataset['th']
    gt = dataset['gt']
    ids = dataset['id']

    sort = np.sort(P, axis=1)
    diff = np.array(sort[:, -1] - sort[:, -2])
    y = np.column_stack((diff < th, diff >= th)).astype(np.int)
    raw_data = make.make_classifier_raw_data(y, gt == np.argmax(L, axis=1),
                                             ids)
    return raw_data
Exemplo n.º 26
0
sys = sb.SystemBuilder(verbose=True)

name2 = m_
bigClassifier = make.make_empty_classifier("big")
bigClassifier.classifier_file = name2
sys.add_classifier(bigClassifier)

trigger = make.make_trigger("probability_threshold_trigger",
                            make.make_empty_classifier(), ["big"])
sys.add_trigger(trigger)

name1 = m
smallClassifier = make.make_empty_classifier("small",
                                             "probability_threshold_trigger")
model1_dict = io.read_pickle(name1, suffix="", verbose=False)
smallClassifier.classifier_file = name1
sys.add_classifier(smallClassifier)

classifier_trigger = build_train_trigger(model1_dict, th)
trigger = make.make_trigger("probability_threshold_trigger",
                            classifier_trigger, ["big"])
trigger.id = "probability_threshold_trigger"
sys.replace(trigger.id, trigger)

metrics = eval.evaluate(sys, "small", check_classifiers=False)
eval.pretty_print(metrics)

# cl = sys.build_classifier_dict("small")
# io.save_pickle(Classifier_Path+"system_"+net1+"_"+net2+"th=0.7", cl)
Exemplo n.º 27
0
    X = [record[key].test['system'].params for key in ref_keys]
    Y = [record[key].test['system'].accuracy for key in ref_keys]
    plt.scatter(X, Y, color='black', s=20)


def show():
    plt.legend()
    plt.show()


def save(f):
    plt.save_fig(f)


if __name__ == "__main__":
    method = 0
    plt.figure(0)
    toPlot_chain = io.read_pickle(
        "./probability_threshold/results/imagenet/R_all")

    plt.figure(0)
    plot_accuracy_parameters(toPlot_chain, system_color='blue')
    #plot_accuracy_parameters(toPlot, system_color='green')

    plt.figure(1)
    plot_accuracy_time(toPlot_chain, system_color='blue')
    #plot_accuracy_time(toPlot, system_color='green')

    plt.show()
    # plot_accuracy_time(result_AVERAGE_4_models)
        os.environ['FCM'], 'Examples', 'compute',
        'genetic_algorithm_multinode', 'results',
        'sota_models_cifar10-40-dev_validation',
        'cifar10_8nodes_800population_400offspring_0',
        'multinode_metainfo.json')
    execution_time = 0
    n_nodes = 8
    R_all = {}

    with open(multinode_meta_file, 'r') as handle:
        results = json.load(handle)
        for i, iteration in enumerate(results):
            max_exec_offspring_time = sum(get_n_maximum(iteration, 1))
            selection_time = iteration['selection']['exec_time']
            execution_time += max_exec_offspring_time + selection_time

            # Gather the results
            R_iter = io.read_pickle(
                os.path.join(os.environ['FCM'], iteration['selection']['R']))
            R_all.update(R_iter)

            print("Iteration %d: Offspring+evaluate %f, Selection %f" %
                  (i, max_exec_offspring_time, selection_time))

    print(R_all.keys())
    print("Execution time: %f" % execution_time)

    # Plot solutions
    import Examples.study.plot as myplt
    myplt.plot_accuracy_time_old(R_all)
    myplt.show()
Exemplo n.º 29
0
def get_classifier_name(c_file):
    return io.read_pickle(c_file)['name']

if __name__ == "__main__":

    id_fastest = "__trigger_classifier_0.10000000000000003_V001_VGG13_ref_0____trigger_classifier_0" \
                 ".4_V001_ResNeXt29_32x4d_ref_0__V001_VGG11_ref_0"

    experiment_dir = os.path.join(os.environ['FCM'], 'Examples', 'compute',
                                  'chain_genetic_algorithm')
    metadata_file = os.path.join(experiment_dir, 'results', 'metadata.json')

    id = "7062152700584889"
    dataset = results_manager.get_fieldval_by_id(metadata_file, id, 'dataset')
    results_chain_path = os.path.join(
        experiment_dir, results_manager.get_results_by_id(metadata_file, id))
    results_chain = io.read_pickle(results_chain_path)
    fastest = results_chain[id_fastest]
    acc_result = fastest.test['system'].accuracy
    time_result = fastest.test['system'].time

    # Build same ensemble
    classifiers = [
        os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                     'sota_models_stl10-32-dev_validation',
                     'V001_VGG13_ref_0'),
        os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                     'sota_models_stl10-32-dev_validation',
                     'V001_ResNeXt29_32x4d_ref_0'),
        os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers',
                     'sota_models_stl10-32-dev_validation', 'V001_VGG11_ref_0')
    ]