def predict(face, sp, age_mp, gender_mp, cp, iap, age_path, gender_path, age_le, gender_le): # load model from disk gender_model = load_model(gender_path) agh = AgeGenderHelper(config, deploy) one_off_mappings = agh.build_oneoff_mappings(age_le) one_off = OneOffAccuracy(one_off_mappings) custom_objects = {'one_off_accuracy': one_off.one_off_accuracy} age_model = load_model(age_path, custom_objects=custom_objects) # resize and crop image age_crops = cp.preprocess(age_mp.preprocess(sp.preprocess(face))) age_crops = np.array([iap.preprocess(c) for c in age_crops]) gender_crops = cp.preprocess(gender_mp.preprocess(sp.preprocess(face))) gender_crops = np.array([iap.preprocess(c) for c in gender_crops]) # predict on age and gender based on extracted crops age_preds = age_model.predict(age_crops).mean(axis=0) gender_preds = gender_model.predict(gender_crops).mean(axis=0) del gender_model, age_model return age_preds, gender_preds
def training(aug, means_path, train_hdf5_path, val_hdf5_path, fig_path, json_path, label_encoder_path, best_weight_path, checkpoint_path, cross_val=None): # load RGB means means = json.loads(open(means_path).read()) # initialize image preprocessors sp, mp, pp, iap = SimplePreprocessor(227, 227), MeanPreprocessor(means['R'], means['G'], means['B']), PatchPreprocessor(227, 227), ImageToArrayPreprocessor() # initialize training and validation image generator train_gen = HDF5DatasetGenerator(train_hdf5_path, config.BATCH_SIZE, preprocessors=[pp, mp, iap], aug=aug, classes=config.NUM_CLASSES) val_gen = HDF5DatasetGenerator(val_hdf5_path, config.BATCH_SIZE, preprocessors=[sp, mp, iap], aug=aug, classes=config.NUM_CLASSES) metrics = ['accuracy'] if config.DATASET_TYPE == 'age': le = pickle.loads(open(label_encoder_path, 'rb').read()) agh = AgeGenderHelper(config, deploy) one_off_mappings = agh.build_oneoff_mappings(le) one_off = OneOffAccuracy(one_off_mappings) metrics.append(one_off.one_off_accuracy) # construct callbacks callbacks = [TrainingMonitor(fig_path, json_path=json_path, start_at=args['start_epoch']), EpochCheckpoint(checkpoint_path, every=5, start_at=args['start_epoch']), ModelCheckpointsAdvanced(best_weight_path, json_path=json_path, start_at=args['start_epoch'])] #, LearningRateScheduler(decay) if cross_val is None: print('[INFO] compiling model...') else: print(f'[INFO] compiling model for cross validation {cross_val}...') if args['start_epoch'] == 0: if not os.path.exists(checkpoint_path): os.makedirs(checkpoint_path) model = AgeGenderNet.build(227, 227, 3, config.NUM_CLASSES, reg=5e-4) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metrics) else: model_path = os.path.sep.join([checkpoint_path, f"epoch_{args['start_epoch']}.hdf5"]) print(f"[INFO] loading {model_path}...") if config.DATASET_TYPE == 'age': model = load_model(model_path, custom_objects={'one_off_accuracy': one_off.one_off_accuracy}) elif config.DATASET_TYPE == 'gender': model = load_model(model_path) # update learning rate print(f'[INFO] old learning rate: {K.get_value(model.optimizer.lr)}') K.set_value(model.optimizer.lr, INIT_LR) print(f'[INFO] new learning rate: {K.get_value(model.optimizer.lr)}') # train the network if cross_val is None: print('[INFO] training the network...') else: print(f'[INFO] training the network for cross validation {cross_val}...') model.fit_generator(train_gen.generator(), steps_per_epoch=train_gen.num_images//config.BATCH_SIZE, validation_data=val_gen.generator(), validation_steps=val_gen.num_images//config.BATCH_SIZE, epochs=MAX_EPOCH-args['start_epoch'], verbose=2, callbacks=callbacks) # close dataset train_gen.close() val_gen.close()
print('[INFO] loading label encoders and mean files...') age_le = pickle.loads(open(deploy.AGE_LABEL_ENCODER, 'rb').read()) gender_le = pickle.loads(open(deploy.GENDER_LABEL_ENCODER, 'rb').read()) age_means = json.loads(open(deploy.AGE_MEANS).read()) gender_means = json.loads(open(deploy.GENDER_MEANS).read()) # load model from disk custom_objects = None age_path = deploy.AGE_NETWORK_PATH gender_path = deploy.GENDER_NETWORK_PATH gender_model = load_model(gender_path) agh = AgeGenderHelper(config, deploy) one_off_mappings = agh.build_oneoff_mappings(age_le) one_off = OneOffAccuracy(one_off_mappings) custom_objects = {'one_off_accuracy': one_off.one_off_accuracy} age_model = load_model(age_path, custom_objects=custom_objects) # initialize image preprocessors sp = SimplePreprocessor(256, 256, inter=cv2.INTER_CUBIC) age_mp = MeanPreprocessor(age_means['R'], age_means['G'], age_means['B']) gender_mp = MeanPreprocessor(gender_means['R'], gender_means['G'], gender_means['B']) cp = CropPreprocessor(227, 227) iap = ImageToArrayPreprocessor() # initialize dlib's face detector (HOG-based), then create facial landmark predictor and face aligner detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(deploy.DLIB_LANDMARK_PATH) fa = FaceAligner(predictor)
def calculate_score(means_path, label_encoder_path, best_weight_path, test_hdf5_path, cross_val=None, preds_cross=None, labels_cross=None, is_mapped=False): # load RGB means for training set means = json.loads(open(means_path).read()) # load LabelEncoder le = pickle.loads(open(label_encoder_path, 'rb').read()) # initialize image preprocessors sp, mp, cp, iap = SimplePreprocessor( config.IMAGE_SIZE, config.IMAGE_SIZE), MeanPreprocessor( means['R'], means['G'], means['B']), CropPreprocessor( config.IMAGE_SIZE, config.IMAGE_SIZE), ImageToArrayPreprocessor() custom_objects = None agh = AgeGenderHelper(config, deploy) if config.DATASET_TYPE == 'age': one_off_mappings = agh.build_oneoff_mappings(le) one_off = OneOffAccuracy(one_off_mappings) custom_objects = {'one_off_accuracy': one_off.one_off_accuracy} # load model print(f'[INFO] loading {best_weight_path}...') model = load_model(best_weight_path, custom_objects=custom_objects) # initialize testing dataset generator, then predict if cross_val is None: print( f'[INFO] predicting in testing data (no crops){config.SALIENCY_INFO}...' ) else: print( f'[INFO] predicting in testing data (no crops) for cross validation {cross_val}{config.SALIENCY_INFO}...' ) test_gen = HDF5DatasetGenerator(test_hdf5_path, batch_size=config.BATCH_SIZE, preprocessors=[sp, mp, iap], classes=config.NUM_CLASSES) preds = model.predict_generator(test_gen.generator(), steps=test_gen.num_images // config.BATCH_SIZE) # compute rank-1 and one-off accuracies labels = to_categorical( test_gen.db['labels'][0:config.BATCH_SIZE * (test_gen.num_images // config.BATCH_SIZE)], num_classes=config.NUM_CLASSES) preds_mapped = preds.argmax(axis=1) if is_mapped == True: preds_mapped = agh.build_mapping_to_iog_labels()[preds_mapped] if cross_val is None: print( '[INFO] serializing all images classified incorrectly for testing dataset...' ) prefix_path = os.path.sep.join( [config.WRONG_BASE, config.DATASET_TYPE]) agh.plot_confusion_matrix_from_data(config, labels.argmax(axis=1), preds_mapped, le=le, save_path=os.path.sep.join([ config.OUTPUT_BASE, f'cm_{config.DATASET_TYPE}.png' ])) else: print( f'[INFO] serializing all images classified incorrectly for cross validation {cross_val} of testing dataset...' ) prefix_path = os.path.sep.join( [config.WRONG_BASE, f'Cross{cross_val}', config.DATASET_TYPE]) preds_cross.extend(preds_mapped.tolist()) labels_cross.extend(labels.argmax(axis=1).tolist()) if os.path.exists(prefix_path): shutil.rmtree(prefix_path) os.makedirs(prefix_path) for i, (pred, label) in enumerate(zip(preds_mapped, labels.argmax(axis=1))): if pred != label: image = test_gen.db['images'][i] if config.DATASET_TYPE == 'age': real_label, real_pred = le.classes_[label], le.classes_[pred] real_label = real_label.replace('_', '-') real_label = real_label.replace('-inf', '+') real_pred = real_pred.replace('_', '-') real_pred = real_pred.replace('-inf', '+') elif config.DATASET_TYPE == 'gender': real_label = 'Male' if label == 0 else 'Female' real_pred = 'Male' if pred == 0 else 'Female' cv2.putText(image, f'Actual: {real_label}, Predict: {real_pred}', (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2) cv2.imwrite(os.path.sep.join([prefix_path, f'{i:05d}.jpg']), image) score = accuracy_score(labels.argmax(axis=1), preds_mapped) print(f'[INFO] rank-1: {score:.4f}') score_one_off = None if config.DATASET_TYPE == 'age': score_one_off = one_off.one_off_compute( labels, to_categorical(preds_mapped, num_classes=config.NUM_CLASSES)) print(f'[INFO] one-off: {score_one_off:.4f}') test_gen.close() # re-initialize testing generator, now excluding SimplePreprocessor test_gen = HDF5DatasetGenerator(test_hdf5_path, config.BATCH_SIZE, preprocessors=[mp], classes=config.NUM_CLASSES) preds = [] labels = to_categorical(test_gen.db['labels'], num_classes=config.NUM_CLASSES) print('[INFO] predicting in testing data (with crops)...') # initialize progress bar widgets = [ 'Evaluating: ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA() ] pbar = progressbar.ProgressBar(maxval=math.ceil(test_gen.num_images / config.BATCH_SIZE), widgets=widgets).start() for i, (images, _) in enumerate(test_gen.generator(passes=1)): for image in images: crops = cp.preprocess(image) crops = np.array([iap.preprocess(c) for c in crops]) pred = model.predict(crops) preds.append(pred.mean(axis=0)) pbar.update(i) pbar.finish() test_gen.close() # compute rank-1 accuracy preds_mapped = np.argmax(preds, axis=1) if is_mapped == True: preds_mapped = agh.build_mapping_to_iog_labels()[preds_mapped] score_crops = accuracy_score(labels.argmax(axis=1), preds_mapped) print(f'[INFO] rank-1: {score_crops:.4f}') score_one_off_crops = None if config.DATASET_TYPE == 'age': score_one_off_crops = one_off.one_off_compute( labels, to_categorical(preds_mapped, num_classes=config.NUM_CLASSES)) print(f'[INFO] one-off: {score_one_off_crops:.4f}') return score, score_one_off, score_crops, score_one_off_crops