def POST(self): person = web.input(imagefile={}) # read user input filedir = 'C:\Documents and Settings\Rigs\Desktop\RESTOpenCV\other_faces' # directory to store the file in. newFilename = copy_file(filedir, person) result = fc.recognize_face(newFilename) # calls the recognizing function from face_recognizer module # and stores results return render.rec(result) # render the page with the output.
def fr_test(params, show_results): """ Execute software test on face_recognition module :type params: dictionary :param params: configuration parameters to be used for the test :type show_results: boolean :param show_results: show (True) or do not show (False) image with assigned tag :rtype: boolean :returns: True if test was successful, False otherwise """ image_path = ('..' + os.sep + 'test_files' + os.sep + 'face_recognition' + os.sep + 'Test.pgm') if params is not None and ce.SOFTWARE_TEST_FILE_PATH_KEY in params: image_path = params[ce.SOFTWARE_TEST_FILE_PATH_KEY] test_passed = True if os.path.isfile(image_path): try: image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) recognition_results = recognize_face( image, None, params, show_results) if recognition_results is not None: error = recognition_results[c.ERROR_KEY] if not error: tag = recognition_results[c.ASSIGNED_TAG_KEY] confidence = recognition_results[c.CONFIDENCE_KEY] if len(tag) == 0: test_passed = False if confidence < 0: test_passed = False else: test_passed = False else: test_passed = False except IOError, (errno, strerror): print "I/O error({0}): {1}".format(errno, strerror) test_passed = False except:
def fr_experiments(params, show_results): """ Execute face recognition experiments :type params: dictionary :param params: configuration parameters to be used for the experiment (see table) :type show_results: boolean :param show_results: show (True) or do not show (False) images with detected faces ============================================ ======================================== ============================== Key Value Default value ============================================ ======================================== ============================== check_eye_positions If True, check eye positions True classifiers_dir_path Path of directory with OpenCV cascade classifiers eye_detection_classifier Classifier for eye detection 'haarcascade_mcs_lefteye.xml' face_detection_algorithm Classifier for face detection 'HaarCascadeFrontalFaceAlt2' ('HaarCascadeFrontalFaceAlt', 'HaarCascadeFrontalFaceAltTree', 'HaarCascadeFrontalFaceAlt2', 'HaarCascadeFrontalFaceDefault', 'HaarCascadeProfileFace', 'HaarCascadeFrontalAndProfileFaces', 'HaarCascadeFrontalAndProfileFaces2', 'LBPCascadeFrontalface', 'LBPCascadeProfileFace' or 'LBPCascadeFrontalAndProfileFaces') flags Flags used in face detection 'DoCannyPruning' ('DoCannyPruning', 'ScaleImage', 'FindBiggestObject', 'DoRoughSearch'). If 'DoCannyPruning' is used, regions that do not contain lines are discarded. If 'ScaleImage' is used, image instead of the detector is scaled (it can be advantegeous in terms of memory and cache use). If 'FindBiggestObject' is used, only the biggest object is returned by the detector. 'DoRoughSearch', used together with 'FindBiggestObject', terminates the search as soon as the first candidate object is found min_neighbors Mininum number of neighbor bounding 5 boxes for retaining face detection min_size_height Minimum height of face detection 20 bounding box (in pixels) min_size_width Minimum width of face detection 20 bounding box (in pixels) scale_factor Scale factor between two scans 1.1 in face detection max_eye_angle Maximum inclination of the line 0.125 connecting the eyes (in % of pi radians) min_eye_distance Minimum distance between eyes 0.25 (in % of the width of the face bounding box) nose_detection_classifier Classifier for nose detection 'haarcascade_mcs_nose.xml' test_set_path path of directory containing test set use_nose_pos_in_detection If True, detections with no good False nose position are discarded aligned_faces_path Default path of directory for aligned faces cropped_face_height Height of aligned faces (in pixels) 400 cropped_face_width Width of aligned faces (in pixels) 200 dataset_already_divided If True, dataset is already divided False between training and test set dataset_path Path of whole dataset, used if dataset is not already divided between training and test set db_name Name of single file containing face models db_models_path Path of directory containing face models face_model_algorithm Algorithm for face recognition 'LBP' ('Eigenfaces', 'Fisherfaces' or 'LBP') face_recognition_results_path Path of directory where test results will be saved test_set_path Path of directory containing test set training_set_path Path of directory containing training set LBP_grid_x Number of columns in grid 4 used for calculating LBP LBP_grid_y Number of columns in grid 8 used for calculating LBP LBP_neighbors Number of neighbors 8 used for calculating LBP LBP_radius Radius used 1 for calculating LBP (in pixels) offset_pct_x % of the image to keep next to 0.20 the eyes in the horizontal direction offset_pct_y % of the image to keep next to 0.50 the eyes in the vertical direction software_test_file Path of image to be used for software test training_images_nr Number of images per person used in training set use_eye_detection If True, use eye detection for detecting True eye position for aligning faces in test images use_eye_detection_in_training If True, use eye detection for detecting True eye position for aligning faces in training images use_eyes_position If True, align faces in test images True by using eye positions use_eyes_position_in_training If True, align faces in training images True by using eye positions use_face_detection_in_training If True, use face detection False for images in training set use_NBNN If True, False use Naive Bayes Nearest Neighbor use_one_file_for_face_models If True, use one file for face models True use_resizing If True, resize images True use_weighted_regions If True, use weighted LBP False ============================================ ======================================== ============================== """ rec_images_nr = 0 # Number of correctly recognized images test_images_nr = 0 # Number of total test images mean_rec_time = 0 # List of confidence values for true positives true_pos_confidence_list = [] # List of confidence values for false positives false_pos_confidence_list = [] fm = FaceModels(params) training_images_nr = ce.TRAINING_IMAGES_NR if params is not None: # Number of images for each person to be used for the training training_images_nr = params[ce.TRAINING_IMAGES_NR_KEY] # Number of people people_nr = fm.get_people_nr() rec_dict = {} # Dictionary containing all results for this experiment # List used for creating YAML file with list of images images_list_for_YAML = [] # List used for creating YAML file with list of people people_list_for_YAML = [] # List containing recognition rates rec_rate_list = [] # Initialize dictionaries with people people_true_positives_dict = {} people_false_positives_dict = {} people_test_images_nr_dict = {} tags = fm.get_tags() for tag in tags: people_true_positives_dict[tag] = 0 people_false_positives_dict[tag] = 0 people_test_images_nr_dict[tag] = 0 dataset_already_divided = ce.DATASET_ALREADY_DIVIDED # directory with test set test_set_path = ce.FACE_RECOGNITION_TEST_SET_PATH if not dataset_already_divided: test_set_path = ce.FACE_RECOGNITION_DATASET_PATH results_path = ce.FACE_RECOGNITION_RESULTS_PATH if params is not None: # Get path of directories with used files from params if ce.DATASET_ALREADY_DIVIDED_KEY in params: dataset_already_divided = params[ce.DATASET_ALREADY_DIVIDED_KEY] if dataset_already_divided: if ce.TEST_SET_PATH_KEY in params: test_set_path = params[ce.TEST_SET_PATH_KEY] else: if ce.DATASET_PATH_KEY in params: test_set_path = params[ce.DATASET_PATH_KEY] # directory with results if ce.FACE_RECOGNITION_RESULTS_PATH_KEY in params: results_path = params[ce.FACE_RECOGNITION_RESULTS_PATH_KEY] # Iterate over all directories with images images_dirs = os.listdir(test_set_path) total_test_images_nr = 0 for images_dir in images_dirs: ann_face_tag = images_dir images_dir_complete_path = os.path.join(test_set_path, images_dir) # Iterate over all images in this directory image_counter = 0 person_test_images = 0 person_rec_images = 0 for image in os.listdir(images_dir_complete_path): # If dataset is not already divided, # first training_images_nr images are used for training, # the remaining for test if(dataset_already_divided or (image_counter >= training_images_nr)): total_test_images_nr += 1 person_test_images += 1 # Complete path of image image_complete_path = os.path.join( images_dir_complete_path, image) try: assigned_tag = 'Undefined' confidence = -1 if USE_FACEEXTRACTOR: fe = FaceExtractor(fm, params) handle = fe.extract_faces_from_image( image_complete_path) results = fe.get_results(handle) faces = results[c.FACES_KEY] if len(faces) != 0: face = faces[0] mean_rec_time = ( mean_rec_time + results[c.ELAPSED_CPU_TIME_KEY]) assigned_tag = face[c.ASSIGNED_TAG_KEY] confidence = face[c.CONFIDENCE_KEY] else: face = cv2.imread( image_complete_path, cv2.IMREAD_GRAYSCALE) sz = None use_resizing = ce.USE_RESIZING use_eyes_position = c.USE_EYES_POSITION use_eye_detection = ce.USE_EYE_DETECTION offset_pct_x = c.OFFSET_PCT_X offset_pct_y = c.OFFSET_PCT_Y if params is not None: if ce.USE_RESIZING_KEY in params: use_resizing = params[ce.USE_RESIZING_KEY] if c.USE_EYES_POSITION_KEY in params: use_eyes_position = ( params[c.USE_EYES_POSITION_KEY]) if ce.USE_EYE_DETECTION_KEY in params: use_eye_detection = ( params[ce.USE_EYE_DETECTION_KEY]) if c.OFFSET_PCT_X_KEY in params: offset_pct_x = params[c.OFFSET_PCT_X_KEY] if c.OFFSET_PCT_Y_KEY in params: offset_pct_y = params[c.OFFSET_PCT_Y_KEY] if use_resizing: width = c.CROPPED_FACE_WIDTH height = c.CROPPED_FACE_HEIGHT if params is not None: if c.CROPPED_FACE_WIDTH_KEY in params: width = params[c.CROPPED_FACE_WIDTH_KEY] if c.CROPPED_FACE_HEIGHT_KEY in params: height = params[c.CROPPED_FACE_HEIGHT_KEY] sz = (width, height) if use_eyes_position: align_path = c.ALIGNED_FACES_PATH if params is not None: align_path = params[c.ALIGNED_FACES_PATH_KEY] if use_eye_detection: face = fd.get_cropped_face( image_complete_path, align_path, params, return_always_face=False) else: face = fd.get_cropped_face_using_fixed_eye_pos( image_complete_path, align_path, offset_pct=(offset_pct_x, offset_pct_y), dest_size=sz) if face is not None: face = face[c.FACE_KEY] else: if sz is not None: face = cv2.resize(face, sz) if face is not None: rec_results = recognize_face( face, fm, params, show_results) assigned_tag = rec_results[c.ASSIGNED_TAG_KEY] confidence = rec_results[c.CONFIDENCE_KEY] # Add recognition time to total mean_rec_time = ( mean_rec_time + rec_results[c.ELAPSED_CPU_TIME_KEY]) image_dict = {ce.IMAGE_KEY: image, c.ANN_TAG_KEY: images_dir, c.ASSIGNED_TAG_KEY: assigned_tag, c.CONFIDENCE_KEY: confidence} if assigned_tag == ann_face_tag: image_dict[ce.PERSON_CHECK_KEY] = 'TP' people_true_positives_dict[assigned_tag] += 1 rec_images_nr += 1 true_pos_confidence_list.append(confidence) person_rec_images += 1 else: image_dict[ce.PERSON_CHECK_KEY] = 'FP' if assigned_tag != 'Undefined': people_false_positives_dict[assigned_tag] += 1 false_pos_confidence_list.append(confidence) image_dict_extended = {ce.IMAGE_KEY: image_dict} images_list_for_YAML.append(image_dict_extended) except IOError, (errno, strerror): print "I/O error({0}): {1}".format(errno, strerror) except: print "Unexpected error:", sys.exc_info()[0] raise
print("Welcome to Face Recognition System") print("Choose your option") print("(1) Register your Face.") print("(2) Recognize your Face.") choice = input("Your option: ") if(choice is "1"): # do registration username = input("Username:"******"Detected your face.") print("Training your face...") from train_faces import train_faces train_faces() print("Training successful") elif(choice is "2"): from face_recognition import recognize_face recognize_face() else: print("Your choice is invalid.")
def extract_faces_from_image(self, resource_path): """ Launch the face extractor on one image resource. This method returns a task handle. :type resource_path: string :param resource_path: resource file path :rtype: float :returns: handle for getting results """ # Save processing time start_time = cv2.getTickCount() error = None # Face detection align_path = c.ALIGNED_FACES_PATH if ((self.params is not None) and (c.ALIGNED_FACES_PATH_KEY in self.params)): align_path = self.params[c.ALIGNED_FACES_PATH_KEY] detection_result = detect_faces_in_image(resource_path, align_path, self.params, False) detection_error = detection_result[c.ERROR_KEY] if not detection_error: face_images = detection_result[c.FACES_KEY] detected_faces = detection_result[c.FACES_KEY] # Face recognition faces = [] # face=cv2.imread(resource_path,cv2.IMREAD_GRAYSCALE); # face_images=[face] for det_face_dict in face_images: face_dict = {} face = det_face_dict[c.FACE_KEY] bbox = det_face_dict[c.BBOX_KEY] # Resize face resize_face = ce.USE_RESIZING if ((self.params is not None) and (ce.USE_RESIZING_KEY in self.params)): resize_face = self.params[ce.USE_RESIZING_KEY] if resize_face: face_width = c.CROPPED_FACE_WIDTH face_height = c.CROPPED_FACE_HEIGHT if ((self.params is not None) and (c.CROPPED_FACE_WIDTH_KEY in self.params) and (c.CROPPED_FACE_HEIGHT_KEY in self.params)): face_width = self.params[c.CROPPED_FACE_WIDTH_KEY] face_height = self.params[c.CROPPED_FACE_HEIGHT_KEY] new_size = (face_width, face_height) face = cv2.resize(face, new_size) rec_result = recognize_face( face, self.face_models, self.params, False) tag = rec_result[c.ASSIGNED_TAG_KEY] confidence = rec_result[c.CONFIDENCE_KEY] face_dict[c.ASSIGNED_TAG_KEY] = tag face_dict[c.CONFIDENCE_KEY] = confidence face_dict[c.BBOX_KEY] = bbox face_dict[c.FACE_KEY] = face faces.append(face_dict) processing_time_in_clocks = cv2.getTickCount() - start_time processing_time_in_seconds = ( processing_time_in_clocks / cv2.getTickFrequency()) # Populate dictionary with results results = {c.ELAPSED_CPU_TIME_KEY: processing_time_in_seconds, c.ERROR_KEY: error, c.FACES_KEY: faces} else: results = {c.ERROR_KEY: detection_error} self.progress = 100 handle = time.time() self.db_result4image[handle] = results return handle