コード例 #1
0
def evaluate(work, records, tid):
    lock = Lock()
    while True:

        m, th, m_ = work.get()
        print(m, th, m_)

        small_dict = io.read_pickle(m)
        test_images = len(small_dict['test']['gt'])
        train_images = len(small_dict['train']['gt'])
        data_path = "../../Data/"
        trigger_train_dataset = os.path.join(data_path,
                                             "train_trigger_" + str(tid))
        test_train_dataset = os.path.join(data_path,
                                          "test_trigger_" + str(tid))

        sys = sb.SystemBuilder(verbose=False)

        # Complex classifier
        bigClassifier = make.make_classifier("big", m_)
        sys.add_classifier(bigClassifier)

        # Data
        source = make.make_source(trigger_train_dataset, test_train_dataset, 2)
        data = make.make_data("trigger_data",
                              train_images,
                              test_images,
                              source=source)
        sys.add_data(data)
        update_dataset(m, th, trigger_train_dataset, test_train_dataset)

        # Trigger
        trigger = make.make_trigger(
            "trigger",
            make.make_empty_classifier(data_id="trigger_data"), ["big"],
            model="probability")
        sys.add_trigger(trigger)

        # Simple classifier
        smallClassifier = make.make_classifier("small", m, "trigger")
        sys.add_classifier(smallClassifier)

        results = eval.evaluate(sys, "small", check_classifiers=False)
        records["system_" + m + ';' + m_ + ';' + str(th)] = results.test

        lock.acquire()
        if m_ not in records:  # Evaluate individual models in order to plot
            records[m_] = eval.evaluate(sys, 'big').test
        lock.release()

        work.task_done()
コード例 #2
0
def build_chain(classifiers, id_classifiers, thresholds, id_triggers, data_id):

    assert len(classifiers) == len(
        id_triggers
    ) + 1, "ERROR: Number of triggers in the chain is not consistent"
    assert len(id_triggers) == len(
        thresholds), "ERROR: Each trigger should be assigned a threshold"
    assert len(classifiers) == len(
        id_classifiers
    ), "ERROR: Each classifier file should be assigned a classifier id"

    data_path = os.path.join(os.environ['FCM'], 'Data', data_id)

    if not os.path.exists(data_path):
        os.makedirs(data_path)

    sys = sb.SystemBuilder(verbose=False)
    for i in range(len(classifiers) - 1):

        # Create data for the trigger
        test_path = os.path.join(data_path, id_triggers[i] + "_test.pkl")
        source = make.make_source(test_path, test_path, 2)
        data = make.make_data("data_" + id_triggers[i], 1, 1, source=source)
        update_dataset(classifiers[i], test_path, test_path, test_path,
                       thresholds[i])
        sys.add_data(data)

        # Build trigger attached to classifier
        trigger = make.make_trigger(
            id_triggers[i],
            make.make_empty_classifier(data_id="data_" + id_triggers[i]),
            [id_classifiers[i + 1]],
            model="probability")
        sys.add_trigger(trigger)

        # Build classifier
        c_file = classifiers[i]
        classifier = make.make_classifier(id_classifiers[i],
                                          c_file,
                                          component_id=id_triggers[i])
        sys.add_classifier(classifier)

        if i == 0:
            sys.set_start(id_classifiers[i])

    classifier = make.make_classifier(id_classifiers[-1], classifiers[-1])
    sys.add_classifier(classifier)
    return sys
コード例 #3
0
    classifier_trigger_dict = make.make_classifier_dict(
        "trigger_classifier", "cifar10", train, test, performance)
    io.save_pickle('../Definitions/Triggers/Tmp/' + name,
                   classifier_trigger_dict)
    classifier = make.make_classifier("trigger_classifier",
                                      "../Definitions/Triggers/Tmp/" + name)
    return classifier


net1 = 'system_DenseNet201_cifar10.pkl_ResNet152_cifar10.pklth=0.7'
net2 = 'GoogleNet_cifar10.pkl'
m = Classifier_Path + net1
m_ = Classifier_Path + net2
th = 0.7

sys = sb.SystemBuilder(verbose=True)

name2 = m_
bigClassifier = make.make_empty_classifier("big")
bigClassifier.classifier_file = name2
sys.add_classifier(bigClassifier)

trigger = make.make_trigger("probability_threshold_trigger",
                            make.make_empty_classifier(), ["big"])
sys.add_trigger(trigger)

name1 = m
smallClassifier = make.make_empty_classifier("small",
                                             "probability_threshold_trigger")
model1_dict = io.read_pickle(name1, suffix="", verbose=False)
smallClassifier.classifier_file = name1
コード例 #4
0
    dataset_train['th'] = th
    io.save_pickle(train_path, dataset_train)


if __name__ == "__main__":

    # Dict with the results of the evaluations
    records = {}
    path = os.environ['PYTHONPATH']
    train_path = path + '/Data/train_trigger_threshold.pkl'
    test_path = path + '/Data/test_trigger_threshold.pkl'
    small_cfile = "../Definitions/Classifiers/DenseNet121_cifar10"
    big_cfile = "../Definitions/Classifiers/DenseNet201_cifar10"
    th = 0.9

    sys = sb.SystemBuilder(verbose=False)

    bigClassifier = make.make_classifier("big", big_cfile)
    sys.add_classifier(bigClassifier)

    source = make.make_source(train_path, test_path, fcm.Data.Source.NUMPY)
    data = make.make_data("trigger_data", int(5e4), int(1e4), source=source)
    sys.add_data(data)
    update_dataset(small_cfile, th, train_path, test_path)

    trigger = make.make_trigger(
        "trigger",
        make.make_empty_classifier(data_id="trigger_data"), ["big"],
        model="probability")
    sys.add_trigger(trigger)