示例#1
0
def evaluate_all_datasets_combination():
    results = [['Origin', 'Target', 'Feature', 'HTER', 'APCER', 'BPCER']]

    datasets_origin = file_helper.get_dirs_from_folder(BASE_PATH_COMBINATION)

    for dataset_origin in datasets_origin:
        print('Origin: ', dataset_origin)
        datasets_target = file_helper.get_dirs_from_folder(
            os.path.join(BASE_PATH_COMBINATION, dataset_origin))
        for dataset_target in datasets_target:
            print('  Target: ', dataset_target)
            features = file_helper.get_dirs_from_folder(
                os.path.join(BASE_PATH_COMBINATION, dataset_origin,
                             dataset_target))
            for feature in features:
                full_path_features = os.path.join(BASE_PATH_COMBINATION,
                                                  dataset_origin,
                                                  dataset_target, feature)
                try:
                    hter, apcer, bpcer = evaluate_predictions(
                        full_path_features)

                    row = [
                        dataset_origin, dataset_target, feature, hter, apcer,
                        bpcer
                    ]
                    results.append(row)
                except Exception as e:
                    print(e)

    df = DataFrame(results)
    print(df)
    df.to_csv('results_hter_combinations.csv', sep=' ')
示例#2
0
def classify_all_datasets(organized_attack_path, base_output_path):
    datasets = file_helper.get_dirs_from_folder(organized_attack_path)
    for dataset in datasets:
        print("Dataset: %s" % dataset)
        dataset_path = os.path.join(organized_attack_path, dataset)
        types_attacks = file_helper.get_dirs_from_folder(dataset_path)

        for type_attack in types_attacks:
            print("Type attack: %s" % type_attack)
            attack_path = os.path.join(dataset_path, type_attack)
            features = file_helper.get_dirs_from_folder(attack_path)

            for feature in features:
                print("Feature: %s" % feature)

                if feature != 'features':  # this folder is where we keep the results from the extraction
                    feature_path = os.path.join(dataset_path, type_attack,
                                                feature)
                    output_path = os.path.join(base_output_path, dataset,
                                               type_attack, feature)
                    print(feature_path)
                    try:
                        run(feature_path, output_path, 'jpg')
                    except Exception as e:
                        print(e)
示例#3
0
def evaluate_all_cross(base_features, base_output):
    datasets = file_helper.get_dirs_from_folder(base_features)
    for dataset_origin in datasets:

        origin_path = os.path.join(base_features, dataset_origin)

        # types_attack = all, cut, print, tablet, etc.
        types_attack = file_helper.get_dirs_from_folder(origin_path)

        for attack_origin in types_attack:
            full_path_origin = os.path.join(origin_path, attack_origin)
            features_origin = file_helper.get_dirs_from_folder(
                full_path_origin)

            for feature_origin in features_origin:
                full_path_origin = os.path.join(origin_path, attack_origin,
                                                feature_origin)
                print(full_path_origin)
                targets_datasets = file_helper.get_dirs_from_folder(
                    base_features)
                for dataset_target in targets_datasets:
                    if dataset_target != dataset_origin and attack_origin != 'all':
                        continue

                    attacks_target = os.path.join(base_features,
                                                  dataset_target)

                    for attack_target in os.listdir(attacks_target):
                        features_target = os.path.join(base_features,
                                                       dataset_target,
                                                       attack_target)

                        # if (dataset_target == dataset_origin and attack_target != attack_origin):
                        #     continue

                        for feature_target in os.listdir(features_target):

                            if feature_target == feature_origin:
                                full_path_target = os.path.join(
                                    features_target, feature_target)
                                output_path = os.path.join(
                                    base_output, dataset_origin, attack_origin,
                                    dataset_target, attack_target,
                                    feature_target)

                                print('  target: %s' % (full_path_target))
                                try:
                                    evaluate(full_path_origin,
                                             full_path_target, output_path)
                                except Exception as e:
                                    print(e)
示例#4
0
def classify_all_datasets():
    datasets = file_helper.get_dirs_from_folder(BASE_PATH)
    for dataset in datasets:
        dataset_path = os.path.join(BASE_PATH, dataset)
        types_attacks = file_helper.get_dirs_from_folder(dataset_path)

        for type_attack in types_attacks:
            attack_path = os.path.join(dataset_path, type_attack)
            features = file_helper.get_dirs_from_folder(attack_path)

            for feature in features:
                if feature != 'features':  # this folder is where we keep the results from the extraction
                    feature_path = os.path.join(dataset_path, type_attack, feature)
                    print(feature_path)
                    run(feature_path, 'jpg')
示例#5
0
def evaluate_all_datasets():
    results = [[
        'Origin', 'Target', 'Origin Type', 'Target Type', 'Feature', 'HTER',
        'APCER', 'BPCER'
    ]]
    datasets_origin = file_helper.get_dirs_from_folder(BASE_PATH_INTRA)

    for dataset_origin in datasets_origin:
        attacks_origin = os.listdir(
            os.path.join(BASE_PATH_INTRA, dataset_origin))
        for attack_origin in attacks_origin:
            datasets_target = file_helper.get_dirs_from_folder(
                (os.path.join(BASE_PATH_INTRA, dataset_origin, attack_origin)))

            for dataset_target in datasets_target:
                attacks = file_helper.get_dirs_from_folder(
                    os.path.join(BASE_PATH_INTRA, dataset_origin,
                                 attack_origin, dataset_target))

                for attack_target in attacks:
                    features = os.listdir(
                        os.path.join(BASE_PATH_INTRA, dataset_origin,
                                     attack_origin, dataset_target,
                                     attack_target))

                    for feature in features:
                        full_path_features = os.path.join(
                            BASE_PATH_INTRA, dataset_origin, attack_origin,
                            dataset_target, attack_target, feature)
                        try:
                            hter, apcer, bpcer = evaluate_predictions(
                                full_path_features)

                            row = [
                                dataset_origin, dataset_target, attack_origin,
                                attack_target, feature, hter, apcer, bpcer
                            ]
                            results.append(row)
                        except Exception as e:
                            print(e)

    df = DataFrame(results)
    print(df)
    df.to_csv('results_hter.csv', sep=' ')
示例#6
0
def run_pipeline(dataset_alias, pai, dataset_type, dataset_root, data_type,
                 output_path):
    l.log("Run pipeline (run_pipeline()) method invocation. Parameters below:")
    l.log("    Fetching items from dataset")
    l.log("    Dataset root: " + dataset_root)
    l.log("    Dataset alias: " + dataset_alias)
    l.log("    PAI: " + pai)
    l.log("    Data type: " + data_type)

    detector, fa = face_aligner.align_faces.make_face_aligner()
    items_dataset = file_helper.get_dirs_from_folder(dataset_root)

    for index, item in enumerate(items_dataset):
        try:
            if is_item_processed(output_path, dataset_alias, pai, dataset_type,
                                 dataset_root, data_type, item):
                print("Item % already processed" % item)
            else:
                print("Processing %s" % item)

                process_item(output_path, dataset_alias, pai, dataset_type,
                             dataset_root, data_type, item, detector, fa)
        except Exception as exception:
            l.logE(exception)