示例#1
0
文件: fr_test.py 项目: crs4/ACTIVE
def fr_experiments(params, show_results):
    """
    Execute face recognition experiments

    :type params: dictionary
    :param params: configuration parameters
                   to be used for the experiment (see table)

    :type show_results: boolean
    :param show_results: show (True) or do not show (False)
                         images with detected faces

    ============================================  ========================================  ==============================
    Key                                           Value                                     Default value
    ============================================  ========================================  ==============================
    check_eye_positions                           If True, check eye positions              True
    classifiers_dir_path                          Path of directory with OpenCV
                                                  cascade classifiers
    eye_detection_classifier                      Classifier for eye detection              'haarcascade_mcs_lefteye.xml'
    face_detection_algorithm                      Classifier for face detection             'HaarCascadeFrontalFaceAlt2'
                                                  ('HaarCascadeFrontalFaceAlt',
                                                  'HaarCascadeFrontalFaceAltTree',
                                                  'HaarCascadeFrontalFaceAlt2',
                                                  'HaarCascadeFrontalFaceDefault',
                                                  'HaarCascadeProfileFace',
                                                  'HaarCascadeFrontalAndProfileFaces',
                                                  'HaarCascadeFrontalAndProfileFaces2',
                                                  'LBPCascadeFrontalface',
                                                  'LBPCascadeProfileFace' or
                                                  'LBPCascadeFrontalAndProfileFaces')
    flags                                         Flags used in face detection              'DoCannyPruning'
                                                  ('DoCannyPruning', 'ScaleImage',
                                                  'FindBiggestObject', 'DoRoughSearch').
                                                  If 'DoCannyPruning' is used, regions
                                                  that do not contain lines are discarded.
                                                  If 'ScaleImage' is used, image instead
                                                  of the detector is scaled
                                                  (it can be advantegeous in terms of
                                                  memory and cache use).
                                                  If 'FindBiggestObject' is used,
                                                  only the biggest object is returned
                                                  by the detector.
                                                  'DoRoughSearch', used together with
                                                  'FindBiggestObject',
                                                  terminates the search as soon as
                                                  the first candidate object is found
    min_neighbors                                 Mininum number of neighbor bounding       5
                                                  boxes for retaining face detection
    min_size_height                               Minimum height of face detection          20
                                                  bounding box (in pixels)
    min_size_width                                Minimum width of face detection           20
                                                  bounding box (in pixels)
    scale_factor                                  Scale factor between two scans            1.1
                                                  in face detection
    max_eye_angle                                 Maximum inclination of the line           0.125
                                                  connecting the eyes
                                                  (in % of pi radians)
    min_eye_distance                              Minimum distance between eyes             0.25
                                                  (in % of the width of the face
                                                  bounding box)
    nose_detection_classifier                     Classifier for nose detection             'haarcascade_mcs_nose.xml'
    test_set_path                                 path of directory
                                                  containing test set
    use_nose_pos_in_detection                     If True, detections with no good          False
                                                  nose position are discarded
    aligned_faces_path                            Default path of directory
                                                  for aligned faces
    cropped_face_height                           Height of aligned faces (in pixels)       400
    cropped_face_width                            Width of aligned faces (in pixels)        200
    dataset_already_divided                       If True, dataset is already divided       False
                                                  between training and test set
    dataset_path                                  Path of whole dataset, used if dataset
                                                  is not already divided between
                                                  training and test set
    db_name                                       Name of single file
                                                  containing face models
    db_models_path                                Path of directory containing face models
    face_model_algorithm                          Algorithm for face recognition            'LBP'
                                                  ('Eigenfaces', 'Fisherfaces' or 'LBP')
    face_recognition_results_path                 Path of directory where
                                                  test results will be saved
    test_set_path                                 Path of directory containing
                                                  test set
    training_set_path                             Path of directory containing
                                                  training set
    LBP_grid_x                                    Number of columns in grid                 4
                                                  used for calculating LBP
    LBP_grid_y                                    Number of columns in grid                 8
                                                  used for calculating LBP
    LBP_neighbors                                 Number of neighbors                       8
                                                  used for calculating LBP
    LBP_radius                                    Radius used                               1
                                                  for calculating LBP (in pixels)
    offset_pct_x                                  % of the image to keep next to            0.20
                                                  the eyes in the horizontal direction
    offset_pct_y                                  % of the image to keep next to            0.50
                                                  the eyes in the vertical direction
    software_test_file                            Path of image to be used for
                                                  software test
    training_images_nr                            Number of images per person used in
                                                  training set
    use_eye_detection                             If True, use eye detection for detecting  True
                                                  eye position for aligning faces in
                                                  test images
    use_eye_detection_in_training                 If True, use eye detection for detecting  True
                                                  eye position for aligning faces in
                                                  training images
    use_eyes_position                             If True, align faces in test images       True
                                                  by using eye positions
    use_eyes_position_in_training                 If True, align faces in training images   True
                                                  by using eye positions
    use_face_detection_in_training                If True, use face detection               False
                                                  for images in training set
    use_NBNN                                      If True,                                  False
                                                  use Naive Bayes Nearest Neighbor
    use_one_file_for_face_models                  If True, use one file for face models     True
    use_resizing                                  If True, resize images                    True
    use_weighted_regions                          If True, use weighted LBP                 False
    ============================================  ========================================  ==============================
    """

    rec_images_nr = 0  # Number of correctly recognized images
    test_images_nr = 0  # Number of total test images
    mean_rec_time = 0

    # List of confidence values for true positives
    true_pos_confidence_list = []
    # List of confidence values for false positives
    false_pos_confidence_list = []

    fm = FaceModels(params)

    training_images_nr = ce.TRAINING_IMAGES_NR

    if params is not None:
        # Number of images for each person to be used for the training
        training_images_nr = params[ce.TRAINING_IMAGES_NR_KEY]

    # Number of people
    people_nr = fm.get_people_nr()

    rec_dict = {}  # Dictionary containing all results for this experiment
    # List used for creating YAML file with list of images
    images_list_for_YAML = []
    # List used for creating YAML file with list of people
    people_list_for_YAML = []

    # List containing recognition rates
    rec_rate_list = []

    # Initialize dictionaries with people
    people_true_positives_dict = {}
    people_false_positives_dict = {}
    people_test_images_nr_dict = {}
    
    tags = fm.get_tags()
    
    for tag in tags:
        
        people_true_positives_dict[tag] = 0
        people_false_positives_dict[tag] = 0
        people_test_images_nr_dict[tag] = 0

    dataset_already_divided = ce.DATASET_ALREADY_DIVIDED

    # directory with test set
    test_set_path = ce.FACE_RECOGNITION_TEST_SET_PATH
    
    if not dataset_already_divided:
        test_set_path = ce.FACE_RECOGNITION_DATASET_PATH
   
    results_path = ce.FACE_RECOGNITION_RESULTS_PATH

    if params is not None:
        # Get path of directories with used files from params
        if ce.DATASET_ALREADY_DIVIDED_KEY in params:
            dataset_already_divided = params[ce.DATASET_ALREADY_DIVIDED_KEY]
        
        if dataset_already_divided:
            if ce.TEST_SET_PATH_KEY in params:
                test_set_path = params[ce.TEST_SET_PATH_KEY]
        else:
            if ce.DATASET_PATH_KEY in params:
                test_set_path = params[ce.DATASET_PATH_KEY]
        
        # directory with results
        if ce.FACE_RECOGNITION_RESULTS_PATH_KEY in params:
            results_path = params[ce.FACE_RECOGNITION_RESULTS_PATH_KEY]

    # Iterate over all directories with images
    images_dirs = os.listdir(test_set_path)

    total_test_images_nr = 0
    for images_dir in images_dirs:

        ann_face_tag = images_dir
        
        images_dir_complete_path = os.path.join(test_set_path, images_dir)

        # Iterate over all images in this directory
        image_counter = 0
        person_test_images = 0
        person_rec_images = 0
        
        for image in os.listdir(images_dir_complete_path):

            # If dataset is not already divided,
            # first training_images_nr images are used for training,
            # the remaining for test
            if(dataset_already_divided
               or (image_counter >= training_images_nr)):

                total_test_images_nr += 1
                person_test_images += 1
                
                # Complete path of image
                image_complete_path = os.path.join(
                    images_dir_complete_path, image)

                try:

                    assigned_tag = 'Undefined'
                    confidence = -1

                    if USE_FACEEXTRACTOR:

                        fe = FaceExtractor(fm, params)

                        handle = fe.extract_faces_from_image(
                            image_complete_path)

                        results = fe.get_results(handle)

                        faces = results[c.FACES_KEY]

                        if len(faces) != 0:
                            face = faces[0]

                            mean_rec_time = (
                                mean_rec_time +
                                results[c.ELAPSED_CPU_TIME_KEY])
                            
                            assigned_tag = face[c.ASSIGNED_TAG_KEY]

                            confidence = face[c.CONFIDENCE_KEY]
                    else:
                        face = cv2.imread(
                            image_complete_path, cv2.IMREAD_GRAYSCALE)

                        sz = None
                        
                        use_resizing = ce.USE_RESIZING
                        use_eyes_position = c.USE_EYES_POSITION
                        use_eye_detection = ce.USE_EYE_DETECTION
                        offset_pct_x = c.OFFSET_PCT_X
                        offset_pct_y = c.OFFSET_PCT_Y
                    
                        if params is not None:
                            if ce.USE_RESIZING_KEY in params:
                                use_resizing = params[ce.USE_RESIZING_KEY]
                            if c.USE_EYES_POSITION_KEY in params:
                                use_eyes_position = (
                                    params[c.USE_EYES_POSITION_KEY])
                            if ce.USE_EYE_DETECTION_KEY in params:
                                use_eye_detection = (
                                    params[ce.USE_EYE_DETECTION_KEY])
                            if c.OFFSET_PCT_X_KEY in params:
                                offset_pct_x = params[c.OFFSET_PCT_X_KEY]
                            if c.OFFSET_PCT_Y_KEY in params:
                                offset_pct_y = params[c.OFFSET_PCT_Y_KEY]  
                        
                        if use_resizing:
                            
                            width = c.CROPPED_FACE_WIDTH
                            height = c.CROPPED_FACE_HEIGHT
                    
                            if params is not None:
                                
                                if c.CROPPED_FACE_WIDTH_KEY in params:
                                    width = params[c.CROPPED_FACE_WIDTH_KEY]
                                
                                if c.CROPPED_FACE_HEIGHT_KEY in params:
                                    height = params[c.CROPPED_FACE_HEIGHT_KEY]
                                
                            sz = (width, height)

                        if use_eyes_position:
                            
                            align_path = c.ALIGNED_FACES_PATH
                            
                            if params is not None:
                                
                                align_path = params[c.ALIGNED_FACES_PATH_KEY]
                            
                            if use_eye_detection:
                                face = fd.get_cropped_face(
                                    image_complete_path, align_path, params,
                                    return_always_face=False)
                            else:
                                face = fd.get_cropped_face_using_fixed_eye_pos(
                                    image_complete_path, align_path, 
                                    offset_pct=(offset_pct_x, offset_pct_y), 
                                    dest_size=sz)

                            if face is not None:
                                face = face[c.FACE_KEY]
                        else:
                            if sz is not None:
                                face = cv2.resize(face, sz)
                        
                        if face is not None:
                        
                            rec_results = recognize_face(
                                face, fm, params, show_results)

                            assigned_tag = rec_results[c.ASSIGNED_TAG_KEY]
                            confidence = rec_results[c.CONFIDENCE_KEY]
                        
                            # Add recognition time to total
                            mean_rec_time = (
                                mean_rec_time +
                                rec_results[c.ELAPSED_CPU_TIME_KEY])

                    image_dict = {ce.IMAGE_KEY: image,
                                  c.ANN_TAG_KEY: images_dir,
                                  c.ASSIGNED_TAG_KEY: assigned_tag,
                                  c.CONFIDENCE_KEY: confidence}
                    
                    if assigned_tag == ann_face_tag:
                        image_dict[ce.PERSON_CHECK_KEY] = 'TP'
                        people_true_positives_dict[assigned_tag] += 1
                        rec_images_nr += 1
                        true_pos_confidence_list.append(confidence)
                        person_rec_images += 1
                    else:
                        image_dict[ce.PERSON_CHECK_KEY] = 'FP'
                        if assigned_tag != 'Undefined':
                            people_false_positives_dict[assigned_tag] += 1
                            false_pos_confidence_list.append(confidence)

                    image_dict_extended = {ce.IMAGE_KEY: image_dict}

                    images_list_for_YAML.append(image_dict_extended)

                except IOError, (errno, strerror):
                    print "I/O error({0}): {1}".format(errno, strerror)
                except:
                    print "Unexpected error:", sys.exc_info()[0]
                    raise
    def __read_images(self, path, sz=None):

        l = 0
        X, y = [], []
        
        # Set parameters
        align_path = c.ALIGNED_FACES_PATH
        use_eyes_pos_in_training = ce.USE_EYES_POSITION_IN_TRAINING
        use_eye_det_in_training = ce.USE_EYE_DETECTION_IN_TRAINING
        use_face_det_in_training = ce.USE_FACE_DETECTION_IN_TRAINING
        offset_pct_x = c.OFFSET_PCT_X
        offset_pct_y = c.OFFSET_PCT_Y
        
        if self._params is not None:
            
            align_path = self._params[c.ALIGNED_FACES_PATH_KEY]
            use_eyes_pos_in_training = (self._params
                [ce.USE_EYES_POSITION_IN_TRAINING_KEY])
            use_eye_det_in_training = (
                self._params[ce.USE_EYE_DETECTION_IN_TRAINING_KEY])
            use_face_det_in_training = (
                self._params[ce.USE_FACE_DETECTION_IN_TRAINING_KEY])
            offset_pct_x = self._params[c.OFFSET_PCT_X_KEY]
            offset_pct_y = self._params[c.OFFSET_PCT_Y_KEY]
        
        for dirname, dirnames, filenames in os.walk(path):
            for subdirname in dirnames:
                # print "creating model for", subdirname
                subject_path = os.path.join(dirname, subdirname)
                # print "subject path:", subject_path
                file_counter = 0
                for filename in os.listdir(subject_path):
                    # print "image path", os.path.join(subject_path, filename)
                    try:
                        
                        if use_face_det_in_training:
                            im = fd.get_detected_cropped_face(
                                os.path.join(subject_path, filename),
                                align_path, self._params,
                                return_always_face=False)

                        elif use_eyes_pos_in_training:
                            
                            if use_eye_det_in_training:
                                im = None
                                crop_result = fd.get_cropped_face(
                                    os.path.join(subject_path, filename),
                                    align_path, self._params,
                                    return_always_face=False)
                                if crop_result:
                                    im = crop_result[c.FACE_KEY]

                            else:
                                im = fd.get_cropped_face_using_fixed_eye_pos(
                                    os.path.join(subject_path, filename),
                                    align_path,
                                    offset_pct=(offset_pct_x, offset_pct_y),
                                    dest_size=sz)
                        
                        else:
                            im = cv2.imread(
                                os.path.join(subject_path, filename),
                                cv2.IMREAD_GRAYSCALE)
                            # resize to given size (if given)
                            if (im is not None) and (sz is not None):
                                im = cv2.resize(im, sz)

                        if im is not None:

                            X.append(np.asarray(im, dtype=np.uint8))
                            y.append(l)
                            self._tags[l] = str(subdirname)

                        else:
                            print "Image", os.path.join(subject_path, filename), "not considered"

                    except IOError, (errno, strerror):
                        print "I/O error({0}): {1}".format(errno, strerror)
                    except:
                        print "Unexpected error:", sys.exc_info()[0]
                        raise