Esempio n. 1
0
def predict(face, sp, age_mp, gender_mp, cp, iap, age_path, gender_path,
            age_le, gender_le):
    # load model from disk
    gender_model = load_model(gender_path)

    agh = AgeGenderHelper(config, deploy)
    one_off_mappings = agh.build_oneoff_mappings(age_le)
    one_off = OneOffAccuracy(one_off_mappings)
    custom_objects = {'one_off_accuracy': one_off.one_off_accuracy}
    age_model = load_model(age_path, custom_objects=custom_objects)

    # resize and crop image
    age_crops = cp.preprocess(age_mp.preprocess(sp.preprocess(face)))
    age_crops = np.array([iap.preprocess(c) for c in age_crops])

    gender_crops = cp.preprocess(gender_mp.preprocess(sp.preprocess(face)))
    gender_crops = np.array([iap.preprocess(c) for c in gender_crops])

    # predict on age and gender based on extracted crops
    age_preds = age_model.predict(age_crops).mean(axis=0)
    gender_preds = gender_model.predict(gender_crops).mean(axis=0)

    del gender_model, age_model

    return age_preds, gender_preds
Esempio n. 2
0
def calculate_score_adience(config, is_frontal=False):
    if is_frontal == True:
        print(f'Calculating accuracy for frontalized test set...')
    else:
        print(f'Calculating accuracy for test set...')
    score_mean, score_one_off_mean, score_crops_mean, score_one_off_crops_mean = 0, 0, 0, 0
    preds_cross, labels_cross = [], []
    for i in range(config.NUM_FOLD_PATHS):
        if is_frontal == True:
            score, score_one_off, score_crops, score_one_off_crops = calculate_score(
                config.DATASET_MEANS[i],
                config.LABEL_ENCODER_PATHS[i],
                config.BEST_WEIGHTS[i],
                config.TEST_HDF5S_FRONTAL[i],
                cross_val=i,
                preds_cross=preds_cross,
                labels_cross=labels_cross)
        else:
            score, score_one_off, score_crops, score_one_off_crops = calculate_score(
                config.DATASET_MEANS[i],
                config.LABEL_ENCODER_PATHS[i],
                config.BEST_WEIGHTS[i],
                config.TEST_HDF5S[i],
                cross_val=i,
                preds_cross=preds_cross,
                labels_cross=labels_cross)

        score_mean += score
        score_crops_mean += score_crops

        if config.DATASET_TYPE == 'age':
            score_one_off_mean += score_one_off
            score_one_off_crops_mean += score_one_off_crops

    # load LabelEncoder
    le = pickle.loads(open(config.LABEL_ENCODER_PATHS[0], 'rb').read())
    cm_path = f'cm_{config.DATASET_TYPE}_frontal.png' if is_frontal == True else f'cm_{config.DATASET_TYPE}.png'
    AgeGenderHelper.plot_confusion_matrix_from_data(
        config,
        labels_cross,
        preds_cross,
        le,
        save_path=os.path.sep.join([config.OUTPUT_BASE, cm_path]))

    print(
        f'[INFO] rank 1 across {config.NUM_FOLD_PATHS} validations: {score_mean/config.NUM_FOLD_PATHS:.4f}'
    )
    print(
        f'[INFO] rank 1 across {config.NUM_FOLD_PATHS} validations with crops: {score_crops_mean/config.NUM_FOLD_PATHS:.4f}'
    )

    if config.DATASET_TYPE == 'age':
        print(
            f'[INFO] one-off across {config.NUM_FOLD_PATHS} validations: {score_one_off_mean/config.NUM_FOLD_PATHS:.4f}'
        )
        print(
            f'[INFO] one-off across {config.NUM_FOLD_PATHS} validations with crops: {score_one_off_crops_mean/config.NUM_FOLD_PATHS:.4f}'
        )
Esempio n. 3
0
def training(aug, means_path, train_hdf5_path, val_hdf5_path, fig_path, json_path, label_encoder_path, best_weight_path, checkpoint_path, cross_val=None):
    # load RGB means
    means = json.loads(open(means_path).read())

    # initialize image preprocessors
    sp, mp, pp, iap = SimplePreprocessor(227, 227), MeanPreprocessor(means['R'], means['G'], means['B']), PatchPreprocessor(227, 227), ImageToArrayPreprocessor()

    # initialize training and validation image generator
    train_gen = HDF5DatasetGenerator(train_hdf5_path, config.BATCH_SIZE, preprocessors=[pp, mp, iap], aug=aug, classes=config.NUM_CLASSES)
    val_gen = HDF5DatasetGenerator(val_hdf5_path, config.BATCH_SIZE, preprocessors=[sp, mp, iap], aug=aug, classes=config.NUM_CLASSES)

    metrics = ['accuracy']
    if config.DATASET_TYPE == 'age':
        le = pickle.loads(open(label_encoder_path, 'rb').read())
        agh = AgeGenderHelper(config, deploy)
        one_off_mappings = agh.build_oneoff_mappings(le)

        one_off = OneOffAccuracy(one_off_mappings)
        metrics.append(one_off.one_off_accuracy)

    # construct callbacks
    callbacks = [TrainingMonitor(fig_path, json_path=json_path, start_at=args['start_epoch']), EpochCheckpoint(checkpoint_path, every=5, start_at=args['start_epoch']), ModelCheckpointsAdvanced(best_weight_path, json_path=json_path, start_at=args['start_epoch'])] #, LearningRateScheduler(decay)

    if cross_val is None:
        print('[INFO] compiling model...')
    else:
        print(f'[INFO] compiling model for cross validation {cross_val}...')
    
    if args['start_epoch'] == 0:
        if not os.path.exists(checkpoint_path):
            os.makedirs(checkpoint_path)
        model = AgeGenderNet.build(227, 227, 3, config.NUM_CLASSES, reg=5e-4)
        model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metrics)
    else:
        model_path = os.path.sep.join([checkpoint_path, f"epoch_{args['start_epoch']}.hdf5"])
        print(f"[INFO] loading {model_path}...")
        if config.DATASET_TYPE == 'age':
            model = load_model(model_path, custom_objects={'one_off_accuracy': one_off.one_off_accuracy})
        elif config.DATASET_TYPE == 'gender':
            model = load_model(model_path)

        # update learning rate
        print(f'[INFO] old learning rate: {K.get_value(model.optimizer.lr)}')
        K.set_value(model.optimizer.lr, INIT_LR)
        print(f'[INFO] new learning rate: {K.get_value(model.optimizer.lr)}')

    # train the network
    if cross_val is None:
        print('[INFO] training the network...')
    else:
        print(f'[INFO] training the network for cross validation {cross_val}...')
    model.fit_generator(train_gen.generator(), steps_per_epoch=train_gen.num_images//config.BATCH_SIZE, validation_data=val_gen.generator(), validation_steps=val_gen.num_images//config.BATCH_SIZE, epochs=MAX_EPOCH-args['start_epoch'], verbose=2, callbacks=callbacks)

    # close dataset
    train_gen.close()
    val_gen.close()
Esempio n. 4
0
# import the necessary packages
from config import age_gender_config as config
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from pyimagesearch.utils import AgeGenderHelper
import numpy as np
import progressbar
import pickle
import json
import cv2

# initialize our helper class, then build the set of image paths
# and class labels
print("[INFO] building paths and labels...")
agh = AgeGenderHelper(config)
(trainPaths, trainLabels) = agh.buildPathsAndLabels()

# now that we have the total number of images in the dataset that
# can be used for training, compute the number of images that
# should be used for validation and testing
numVal = int(len(trainPaths) * config.NUM_VAL_IMAGES)
numTest = int(len(trainPaths) * config.NUM_TEST_IMAGES)

# our class labels are represented as strings so we need to encode
# them
print("[INFO] encoding labels...")
le = LabelEncoder().fit(trainLabels)
trainLabels = le.transform(trainLabels)

# perform sampling from the training set to construct a a validation
Esempio n. 5
0
# load Label Encoder and mean files
print('[INFO] loading label encoders and mean files...')
age_le = pickle.loads(open(deploy.AGE_LABEL_ENCODER, 'rb').read())
gender_le = pickle.loads(open(deploy.GENDER_LABEL_ENCODER, 'rb').read())
age_means = json.loads(open(deploy.AGE_MEANS).read())
gender_means = json.loads(open(deploy.GENDER_MEANS).read())

# load model from disk
custom_objects = None

age_path = deploy.AGE_NETWORK_PATH
gender_path = deploy.GENDER_NETWORK_PATH
gender_model = load_model(gender_path)

agh = AgeGenderHelper(config, deploy)
one_off_mappings = agh.build_oneoff_mappings(age_le)
one_off = OneOffAccuracy(one_off_mappings)
custom_objects = {'one_off_accuracy': one_off.one_off_accuracy}
age_model = load_model(age_path, custom_objects=custom_objects)

# initialize image preprocessors
sp = SimplePreprocessor(256, 256, inter=cv2.INTER_CUBIC)
age_mp = MeanPreprocessor(age_means['R'], age_means['G'], age_means['B'])
gender_mp = MeanPreprocessor(gender_means['R'], gender_means['G'],
                             gender_means['B'])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

# initialize dlib's face detector (HOG-based), then create facial landmark predictor and face aligner
detector = dlib.get_frontal_face_detector()
Esempio n. 6
0
            val_loss = criterion(outputs, labels)
            running_val_loss += val_loss.item() * inputs.size(0)
            labels_val_len += labels.size(0)

    print(
        f'[INFO] Epoch {epoch+1}/{max_epoch}: Loss: {running_loss/labels_len:.5f}, Validation loss: {running_val_loss/labels_val_len:.5f}'
    )

correct = 0
labels_len = 0
test_prediction, test_labels = [], []
with torch.no_grad():
    for inputs, labels in test_loader:
        inputs = inputs.to(device)
        outputs = model(inputs)

        predicted = torch.argmax(outputs, 1)
        predicted = predicted.to('cpu')
        correct += (predicted == labels).sum().item()

        test_prediction.extend(predicted.tolist())
        test_labels.extend(labels.tolist())
        labels_len += labels.size(0)

print(
    f'[INFO]Accuracy of model over {labels_len} test images: {correct/labels_len:.5f}'
)

AgeGenderHelper.plot_confusion_matrix_from_transformed_data(
    classes[test_labels], classes[test_prediction], classes, 'cm_cifar10.jpg')
Esempio n. 7
0
                             arg_params=model.arg_params,
                             aux_params=model.aux_params)

# make predictions on the testing data
print("[INFO] predicting on '{}' test data...".format(config.DATASET_TYPE))
metrics = [mx.metric.Accuracy()]
acc = model.score(testIter, eval_metric=metrics)

# display the rank-1 accuracy
print("[INFO] rank-1: {:.2f}%".format(acc[0] * 100))

# check to see if the one-off accuracy callback should be used
if config.DATASET_TYPE == "age":
    # re-compile the model so that we can compute our custom one-off
    # evaluation metric
    arg = model.arg_params
    aux = model.aux_params
    model = mx.mod.Module(symbol=model.symbol, context=[mx.gpu(1)])
    model.bind(data_shapes=testIter.provide_data,
               label_shapes=testIter.provide_label)
    model.set_params(arg, aux)

    # load the label encoder, then build the one-off mappings for
    # computing accuracy
    le = pickle.loads(open(config.LABEL_ENCODER_PATH, "rb").read())
    agh = AgeGenderHelper(config)
    oneOff = agh.buildOneOffMappings(le)

    # compute and display the one-off evaluation metric
    acc = _compute_one_off(model, testIter, oneOff)
    print("[INFO] one-off: {:.2f}%".format(acc * 100))
Esempio n. 8
0
		sp.preprocess(image)))
	genderImage = iap.preprocess(genderMP.preprocess(
		sp.preprocess(image)))
	ageImage = np.expand_dims(ageImage, axis=0)
	genderImage = np.expand_dims(genderImage, axis=0)

	# pass the ROIs through their respective models
	agePreds = ageModel.predict(ageImage)[0]
	genderPreds = genderModel.predict(genderImage)[0]

	# sort the predictions according to their probability
	ageIdxs = np.argsort(agePreds)[::-1]
	genderIdxs = np.argsort(genderPreds)[::-1]

	# visualize the age and gender predictions
	ageCanvas = AgeGenderHelper.visualizeAge(agePreds, ageLE)
	genderCanvas = AgeGenderHelper.visualizeGender(genderPreds,
		genderLE)
	image = imutils.resize(image, width=400)

	# draw the actual prediction on the image
	gtLabel = ageLE.inverse_transform(int(gtLabel))
	text = "Actual: {}-{}".format(*gtLabel.split("_"))
	cv2.putText(image, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
		0.7, (0, 0, 255), 3)

	# show the output image
	cv2.imshow("Image", image)
	cv2.imshow("Age Probabilities", ageCanvas)
	cv2.imshow("Gender Probabilities", genderCanvas)
	cv2.waitKey(0)
Esempio n. 9
0
    # serialize means of R, G, B
    print('[INFO] serialzing means...')
    D = {'R': np.mean(R), 'G': np.mean(G), 'B': np.mean(B)}
    f = open(dataset_mean_path, 'w')
    f.write(json.dumps(D))
    f.close()

    # serialize label encoder
    print('[INFO] serializing label encoder...')
    f = open(label_encoder_path, 'wb')
    f.write(pickle.dumps(le))
    f.close()


# initialize helper class
agh = AgeGenderHelper(config, deploy)

if config.DATASET == 'IOG':
    # build set of image paths and class labels
    print('[INFO] building paths and labels...')
    train_paths, train_labels, test_paths, test_labels = agh.build_paths_and_labels_iog_preprocessed(
    )

    # define number of validation and testing size
    num_val = int(len(train_labels) * config.NUM_VAL_IMAGES)

    # our class labels are represented as strings, so encode them
    print(f'[INFO] encoding labels {config.SALIENCY_INFO}...')
    le = LabelEncoder()
    train_labels = le.fit_transform(train_labels)
    test_labels = le.transform(test_labels)
Esempio n. 10
0
        if config.DATASET == 'IOG':
            # load Label Encoder and mean files
            print('[INFO] loading label encoders and mean files...')
            age_le = pickle.loads(open(deploy.AGE_LABEL_ENCODER, 'rb').read())
            gender_le = pickle.loads(open(deploy.GENDER_LABEL_ENCODER, 'rb').read())
            age_means = json.loads(open(deploy.AGE_MEAN).read())
            gender_means = json.loads(open(deploy.GENDER_MEAN).read())

            # initialize image preprocessors
            age_mp = MeanPreprocessor(age_means['R'], age_means['G'], age_means['B'])
            gender_mp = MeanPreprocessor(gender_means['R'], gender_means['G'], gender_means['B'])

            age_preds, gender_preds = predict(face, sp, age_mp, gender_mp, cp, iap, deploy.AGE_NETWORK_PATH, deploy.GENDER_NETWORK_PATH, age_le, gender_le)

            # visualize age and gender predictions
            age_canvas = AgeGenderHelper.visualize_age(age_preds, age_le)
            gender_canvas = AgeGenderHelper.visualize_gender(gender_preds, gender_le)
            
        elif config.DATASET == 'ADIENCE':
            # age_preds_cross, gender_preds_cross = [], []

            i = 0
            # load Label Encoder and mean files
            print(f'[INFO] loading label encoders and mean files for cross validation {i}...')
            age_le = pickle.loads(open(deploy.AGE_LABEL_ENCODERS[i], 'rb').read())
            gender_le = pickle.loads(open(deploy.GENDER_LABEL_ENCODERS[i], 'rb').read())
            age_means = json.loads(open(deploy.AGE_MEANS[i]).read())
            gender_means = json.loads(open(deploy.GENDER_MEANS[i]).read())

            # initialize image preprocessors
            age_mp = MeanPreprocessor(age_means['R'], age_means['G'], age_means['B'])
Esempio n. 11
0
                             high=len(image_paths),
                             size=args['sample_size'])

    # load Label Encoder and mean files
    print('[INFO] loading label encoders and mean files...')
    age_le = pickle.loads(open(deploy.AGE_LABEL_ENCODER, 'rb').read())
    gender_le = pickle.loads(open(deploy.GENDER_LABEL_ENCODER, 'rb').read())
    age_mean = json.loads(open(deploy.AGE_MEAN).read())
    gender_mean = json.loads(open(deploy.GENDER_MEAN).read())

    # load model from disk
    age_path = deploy.AGE_NETWORK_PATH
    gender_path = deploy.GENDER_NETWORK_PATH
    gender_model = load_model(gender_path)

    agh = AgeGenderHelper(config, deploy)
    one_off_mappings = agh.build_oneoff_mappings(age_le)
    one_off = OneOffAccuracy(one_off_mappings)
    custom_objects = {'one_off_accuracy': one_off.one_off_accuracy}
    age_model = load_model(age_path, custom_objects=custom_objects)

    # initialize image preprocessors
    sp = SimplePreprocessor(256, 256, inter=cv2.INTER_CUBIC)
    age_mp = MeanPreprocessor(age_mean['R'], age_mean['G'], age_mean['B'])
    gender_mp = MeanPreprocessor(gender_mean['R'], gender_mean['G'],
                                 gender_mean['B'])
    cp = CropPreprocessor(config.IMAGE_SIZE, config.IMAGE_SIZE)
    iap = ImageToArrayPreprocessor()

    # initialize dlib's face detector (HOG-based), then create facial landmark predictor and face aligner
    detector = dlib.get_frontal_face_detector()
Esempio n. 12
0
def upload_file():
    file = request.files['image']

    image_path = os.path.sep.join([UPLOAD_FOLDER, file.filename])
    file.save(image_path)
    # image_url = uploader.upload(image_path)
    # image = AgeGenderHelper.url_to_image(image_url['url'])

    # initialize dlib's face detector (HOG-based), then create facial landmark predictor and face aligner
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(deploy.DLIB_LANDMARK_PATH)
    fa = FaceAligner(predictor)

    # initialize image preprocessors
    sp, cp, iap = SimplePreprocessor(
        256, 256, inter=cv2.INTER_CUBIC), CropPreprocessor(
            config.IMAGE_SIZE, config.IMAGE_SIZE,
            horiz=False), ImageToArrayPreprocessor()

    # loop over image paths
    # load image fron disk, resize it and convert it to grayscale
    print(f'[INFO] processing {file.filename}')
    image = cv2.imread(image_path)
    image = imutils.resize(image, width=1024)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    clone = image.copy()

    # detect faces in grayscale image
    rects = detector(gray, 1)

    # loop over face detections
    for rect in rects:
        # determine facial landmarks for face region, then align face
        shape = predictor(gray, rect)
        face = fa.align(image, gray, rect)

        # draw bounding box around face
        x, y, w, h = face_utils.rect_to_bb(rect)
        cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2)

        if config.DATASET == 'IOG':
            # load Label Encoder and mean files
            print('[INFO] loading label encoders and mean files...')
            age_le = pickle.loads(open(deploy.AGE_LABEL_ENCODER, 'rb').read())
            gender_le = pickle.loads(
                open(deploy.GENDER_LABEL_ENCODER, 'rb').read())
            age_means = json.loads(open(deploy.AGE_MEAN).read())
            gender_means = json.loads(open(deploy.GENDER_MEAN).read())

            # initialize image preprocessors
            age_mp = MeanPreprocessor(age_means['R'], age_means['G'],
                                      age_means['B'])
            gender_mp = MeanPreprocessor(gender_means['R'], gender_means['G'],
                                         gender_means['B'])

            age_preds, gender_preds = predict(face, sp, age_mp, gender_mp, cp,
                                              iap, deploy.AGE_NETWORK_PATH,
                                              deploy.GENDER_NETWORK_PATH,
                                              age_le, gender_le)

        elif config.DATASET == 'ADIENCE':
            # age_preds_cross, gender_preds_cross = [], []

            i = 0
            # load Label Encoder and mean files
            print(
                f'[INFO] loading label encoders and mean files for cross validation {i}...'
            )
            age_le = pickle.loads(
                open(deploy.AGE_LABEL_ENCODERS[i], 'rb').read())
            gender_le = pickle.loads(
                open(deploy.GENDER_LABEL_ENCODERS[i], 'rb').read())
            age_means = json.loads(open(deploy.AGE_MEANS[i]).read())
            gender_means = json.loads(open(deploy.GENDER_MEANS[i]).read())

            # initialize image preprocessors
            age_mp = MeanPreprocessor(age_means['R'], age_means['G'],
                                      age_means['B'])
            gender_mp = MeanPreprocessor(gender_means['R'], gender_means['G'],
                                         gender_means['B'])

            age_preds, gender_preds = predict(face, sp, age_mp, gender_mp, cp,
                                              iap, deploy.AGE_NETWORK_PATHS[i],
                                              deploy.GENDER_NETWORK_PATHS[i],
                                              age_le, gender_le)
            # age_preds_cross.append(age_pred)
            # gender_preds_cross.append(gender_pred)

            # age_preds, gender_preds = np.mean(age_preds_cross, axis = 0), np.mean(gender_preds_cross, axis = 0)

        clone = AgeGenderHelper.visualize_video(age_preds, gender_preds,
                                                age_le, gender_le, clone,
                                                (x, y))

    # path = image_path.split('.')
    # pred_path = '.'.join([f'{path[0]}_predict', path[1]])
    # pred_filename = pred_path.split(os.path.sep)[-1]
    pred_path = '.'.join([f"{image_path.split('.')[0]}_1", 'jpg'])
    cv2.imwrite(pred_path, clone)
    # image_url = uploader.upload(pred_path)
    gc.collect()
    K.clear_session()

    return render_template('index.html',
                           filename=pred_path.split(os.path.sep)[-1])
Esempio n. 13
0
def calculate_score(means_path,
                    label_encoder_path,
                    best_weight_path,
                    test_hdf5_path,
                    cross_val=None,
                    preds_cross=None,
                    labels_cross=None,
                    is_mapped=False):
    # load RGB means for training set
    means = json.loads(open(means_path).read())

    # load LabelEncoder
    le = pickle.loads(open(label_encoder_path, 'rb').read())

    # initialize image preprocessors
    sp, mp, cp, iap = SimplePreprocessor(
        config.IMAGE_SIZE, config.IMAGE_SIZE), MeanPreprocessor(
            means['R'], means['G'], means['B']), CropPreprocessor(
                config.IMAGE_SIZE,
                config.IMAGE_SIZE), ImageToArrayPreprocessor()

    custom_objects = None
    agh = AgeGenderHelper(config, deploy)
    if config.DATASET_TYPE == 'age':
        one_off_mappings = agh.build_oneoff_mappings(le)
        one_off = OneOffAccuracy(one_off_mappings)
        custom_objects = {'one_off_accuracy': one_off.one_off_accuracy}

    # load model
    print(f'[INFO] loading {best_weight_path}...')
    model = load_model(best_weight_path, custom_objects=custom_objects)

    # initialize testing dataset generator, then predict
    if cross_val is None:
        print(
            f'[INFO] predicting in testing data (no crops){config.SALIENCY_INFO}...'
        )
    else:
        print(
            f'[INFO] predicting in testing data (no crops) for cross validation {cross_val}{config.SALIENCY_INFO}...'
        )

    test_gen = HDF5DatasetGenerator(test_hdf5_path,
                                    batch_size=config.BATCH_SIZE,
                                    preprocessors=[sp, mp, iap],
                                    classes=config.NUM_CLASSES)
    preds = model.predict_generator(test_gen.generator(),
                                    steps=test_gen.num_images //
                                    config.BATCH_SIZE)

    # compute rank-1 and one-off accuracies
    labels = to_categorical(
        test_gen.db['labels'][0:config.BATCH_SIZE *
                              (test_gen.num_images // config.BATCH_SIZE)],
        num_classes=config.NUM_CLASSES)
    preds_mapped = preds.argmax(axis=1)

    if is_mapped == True:
        preds_mapped = agh.build_mapping_to_iog_labels()[preds_mapped]

    if cross_val is None:
        print(
            '[INFO] serializing all images classified incorrectly for testing dataset...'
        )
        prefix_path = os.path.sep.join(
            [config.WRONG_BASE, config.DATASET_TYPE])

        agh.plot_confusion_matrix_from_data(config,
                                            labels.argmax(axis=1),
                                            preds_mapped,
                                            le=le,
                                            save_path=os.path.sep.join([
                                                config.OUTPUT_BASE,
                                                f'cm_{config.DATASET_TYPE}.png'
                                            ]))
    else:
        print(
            f'[INFO] serializing all images classified incorrectly for cross validation {cross_val} of testing dataset...'
        )
        prefix_path = os.path.sep.join(
            [config.WRONG_BASE, f'Cross{cross_val}', config.DATASET_TYPE])

        preds_cross.extend(preds_mapped.tolist())
        labels_cross.extend(labels.argmax(axis=1).tolist())

    if os.path.exists(prefix_path):
        shutil.rmtree(prefix_path)
    os.makedirs(prefix_path)

    for i, (pred, label) in enumerate(zip(preds_mapped,
                                          labels.argmax(axis=1))):
        if pred != label:
            image = test_gen.db['images'][i]

            if config.DATASET_TYPE == 'age':
                real_label, real_pred = le.classes_[label], le.classes_[pred]
                real_label = real_label.replace('_', '-')
                real_label = real_label.replace('-inf', '+')

                real_pred = real_pred.replace('_', '-')
                real_pred = real_pred.replace('-inf', '+')

            elif config.DATASET_TYPE == 'gender':
                real_label = 'Male' if label == 0 else 'Female'
                real_pred = 'Male' if pred == 0 else 'Female'

            cv2.putText(image, f'Actual: {real_label}, Predict: {real_pred}',
                        (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0),
                        2)

            cv2.imwrite(os.path.sep.join([prefix_path, f'{i:05d}.jpg']), image)

    score = accuracy_score(labels.argmax(axis=1), preds_mapped)
    print(f'[INFO] rank-1: {score:.4f}')
    score_one_off = None
    if config.DATASET_TYPE == 'age':
        score_one_off = one_off.one_off_compute(
            labels, to_categorical(preds_mapped,
                                   num_classes=config.NUM_CLASSES))
        print(f'[INFO] one-off: {score_one_off:.4f}')
    test_gen.close()

    # re-initialize testing generator, now excluding SimplePreprocessor
    test_gen = HDF5DatasetGenerator(test_hdf5_path,
                                    config.BATCH_SIZE,
                                    preprocessors=[mp],
                                    classes=config.NUM_CLASSES)
    preds = []

    labels = to_categorical(test_gen.db['labels'],
                            num_classes=config.NUM_CLASSES)

    print('[INFO] predicting in testing data (with crops)...')
    # initialize progress bar
    widgets = [
        'Evaluating: ',
        progressbar.Percentage(), ' ',
        progressbar.Bar(), ' ',
        progressbar.ETA()
    ]
    pbar = progressbar.ProgressBar(maxval=math.ceil(test_gen.num_images /
                                                    config.BATCH_SIZE),
                                   widgets=widgets).start()

    for i, (images, _) in enumerate(test_gen.generator(passes=1)):
        for image in images:
            crops = cp.preprocess(image)
            crops = np.array([iap.preprocess(c) for c in crops])

            pred = model.predict(crops)
            preds.append(pred.mean(axis=0))

        pbar.update(i)

    pbar.finish()
    test_gen.close()

    # compute rank-1 accuracy
    preds_mapped = np.argmax(preds, axis=1)
    if is_mapped == True:
        preds_mapped = agh.build_mapping_to_iog_labels()[preds_mapped]

    score_crops = accuracy_score(labels.argmax(axis=1), preds_mapped)
    print(f'[INFO] rank-1: {score_crops:.4f}')
    score_one_off_crops = None
    if config.DATASET_TYPE == 'age':
        score_one_off_crops = one_off.one_off_compute(
            labels, to_categorical(preds_mapped,
                                   num_classes=config.NUM_CLASSES))
        print(f'[INFO] one-off: {score_one_off_crops:.4f}')

    return score, score_one_off, score_crops, score_one_off_crops