def test_find_person_by_clothes(self):

        image_path = ('..' + os.sep + 'test_files' + os.sep +
                      'person_tracking' + os.sep + 'Test.jpg')

        ref_image_path = ('..' + os.sep + 'test_files' + os.sep +
                          'person_tracking' + os.sep + 'Reference.jpg')

        align_path = c.ALIGNED_FACES_PATH

        # Detect faces in image and take first result

        result_dict = detect_faces_in_image(
            ref_image_path, align_path, None, False)

        if c.FACES_KEY in result_dict:
            faces = result_dict[c.FACES_KEY]
            if len(faces) > 0:
                face_dict = faces[0]
                bbox = face_dict[c.BBOX_KEY]

                params = None
                show_results = True

                bbox = pt.find_person_by_clothes(
                    image_path, ref_image_path, bbox, params, show_results)

                if bbox:
                    x0 = bbox[0]
                    y0 = bbox[1]
                    width = bbox[2]
                    height = bbox[3]
                    x1 = x0 + width
                    y1 = y0 + height
                    im = cv2.imread(ref_image_path, cv2.IMREAD_COLOR)
                    im_height, im_width, channels = im.shape
                    self.assertGreaterEqual(x0, 0)
                    self.assertGreaterEqual(y0, 0)
                    self.assertLessEqual(x1, im_width)
                    self.assertLessEqual(y1, im_height)
示例#2
0
    c.LBP_NEIGHBORS_KEY: 8,
    c.LBP_RADIUS_KEY: 1
}

fm = FaceModels(params)

# Delete possible already existing models
fm.delete_models()

# Add face from first image to face models
label = 0
tag = ''
fm.add_face(label, tag, image_path_1)

# Detect face in second image
result = detect_faces_in_image(
   image_path_2, align_path, params, show_results=False)

# Recognize aligned face detected in second image
if result and c.FACES_KEY in result:
    faces = result[c.FACES_KEY]
    if len(faces) == 1:

        # Get aligned face and calculate distance between faces
        file_name = faces[0][c.ALIGNED_FACE_FILE_NAME_KEY]
        file_name_complete = file_name + '_gray.png'
        file_path = os.path.join(align_path, file_name_complete)
        aligned_face = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
        (label, conf) = fm.recognize_face(aligned_face)

        # Draw bounding box around face
        img = cv2.imread(image_path_2)
示例#3
0
def main(argv):
	# Set path of images to be analyzed
	image_path_1 = argv[1]
	print "image_path_1 ", image_path_1
	image_path_2 = argv[2]
	print "image_path_2 ", image_path_2

	# Set path of directory where aligned faces will be saved
	align_path = '/tmp'

	# Set parameters
	params = {
	    c.CHECK_EYE_POSITIONS_KEY: True,
	    c.CLASSIFIERS_DIR_PATH_KEY: '/home/active/gitactive/ACTIVE/face_extractor/examples/haarcascades',
	    c.CROPPED_FACE_HEIGHT_KEY: 250,
	    c.CROPPED_FACE_WIDTH_KEY: 200,
	    c.EYE_DETECTION_CLASSIFIER_KEY: 'haarcascade_mcs_lefteye.xml',
	    c.FACE_DETECTION_ALGORITHM_KEY: 'HaarCascadeFrontalFaceAlt2',
	    c.FLAGS_KEY: 'DoCannyPruning',
	    c.MIN_NEIGHBORS_KEY: 5,
	    c.MIN_SIZE_HEIGHT_KEY: 20,
	    c.MIN_SIZE_WIDTH_KEY: 20,
	    c.OFFSET_PCT_X_KEY: 0.30,
	    c.OFFSET_PCT_Y_KEY: 0.42,
	    c.SCALE_FACTOR_KEY: 1.1,
	    c.MAX_EYE_ANGLE_KEY: 0.125,
	    c.MIN_EYE_DISTANCE_KEY: 0.25,
	    c.USE_EYES_POSITION_KEY: True,
	    c.USE_NOSE_POS_IN_DETECTION_KEY: False,
	    c.ALIGNED_FACES_PATH_KEY: align_path,
	    c.GLOBAL_FACE_REC_DATA_DIR_PATH_KEY: '/home/active/gitactive/ACTIVE/face_extractor/examples/face_rec_data2',
	    c.LBP_GRID_X_KEY: 4,
	    c.LBP_GRID_Y_KEY: 5,
	    c.LBP_NEIGHBORS_KEY: 8,
	    c.LBP_RADIUS_KEY: 1
	}

	fm = FaceModels(params)

	# Delete possible already existing models
	fm.delete_models()

	# Add face from first image to face models
	label = 0
	tag = ''
	fm.add_face(label, tag, image_path_1)

	# Detect face in second image
	result = detect_faces_in_image(
	   image_path_2, align_path, params, show_results=False)

	# Recognize aligned face detected in second image
	if result and c.FACES_KEY in result:
	    faces = result[c.FACES_KEY]
	    if len(faces) == 1:

		# Get aligned face and calculate distance between faces
		file_name = faces[0][c.ALIGNED_FACE_FILE_NAME_KEY]
		file_name_complete = file_name + '_gray.png'
		file_path = os.path.join(align_path, file_name_complete)
		aligned_face = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
		(label, conf) = fm.recognize_face(aligned_face)

		# Draw bounding box around face
		img = cv2.imread(image_path_2)
		(x, y, w, h) = faces[0][c.BBOX_KEY]
		cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 3, 8, 0)

		# Write distance from reference image
		cv2.putText(img, str(conf), (x, y + h + 30),
		            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
		print "distance ", conf
		cv2.imshow('Image', img)
		cv2.waitKey(0)

		image_path_2 = os.path.splitext(image_path_2)[0]
		image_path_2 = image_path_2 + '_comparison.png'
		cv2.imwrite(image_path_2, img)
示例#4
0
    c.EYE_DETECTION_CLASSIFIER_KEY: 'haarcascade_mcs_lefteye.xml',
    c.FACE_DETECTION_ALGORITHM_KEY: 'HaarCascadeFrontalFaceAlt2',
    c.FLAGS_KEY: 'DoCannyPruning',
    c.MIN_NEIGHBORS_KEY: 5,
    c.MIN_SIZE_HEIGHT_KEY: 20,
    c.MIN_SIZE_WIDTH_KEY: 20,
    c.OFFSET_PCT_X_KEY: 0.30,
    c.OFFSET_PCT_Y_KEY: 0.42,
    c.SCALE_FACTOR_KEY: 1.1,
    c.MAX_EYE_ANGLE_KEY: 0.125,
    c.MIN_EYE_DISTANCE_KEY: 0.25,
    c.USE_EYES_POSITION_KEY: True,
    c.USE_NOSE_POS_IN_DETECTION_KEY: False
}

result = detect_faces_in_image(
    image_path, align_path, params, show_results=True)

# Show aligned faces
if result and c.FACES_KEY in result:
    faces = result[c.FACES_KEY]
    for face in faces:

        # Show RGB image
        file_name = face[c.ALIGNED_FACE_FILE_NAME_KEY] + '.png'
        file_path = os.path.join(align_path, file_name)
        img = cv2.imread(file_path)
        cv2.imshow('RGB aligned face', img)
        cv2.waitKey(0)

        # Show gray-level image
        file_name = face[c.ALIGNED_FACE_FILE_NAME_KEY] + '_gray.png'
    def extract_faces_from_image(self, resource_path):
        """
        Launch the face extractor on one image resource.
        This method returns a task handle.

        :type  resource_path: string
        :param resource_path: resource file path

        :rtype: float
        :returns: handle for getting results
        """
        # Save processing time
        start_time = cv2.getTickCount()

        error = None

        # Face detection
        align_path = c.ALIGNED_FACES_PATH
        if ((self.params is not None) and
                (c.ALIGNED_FACES_PATH_KEY in self.params)):
            align_path = self.params[c.ALIGNED_FACES_PATH_KEY]

        detection_result = detect_faces_in_image(resource_path, align_path,
                                                 self.params, False)

        detection_error = detection_result[c.ERROR_KEY]

        if not detection_error:

            face_images = detection_result[c.FACES_KEY]

            detected_faces = detection_result[c.FACES_KEY]

            # Face recognition

            faces = []
            # face=cv2.imread(resource_path,cv2.IMREAD_GRAYSCALE);
            # face_images=[face]
            for det_face_dict in face_images:

                face_dict = {}

                face = det_face_dict[c.FACE_KEY]
                bbox = det_face_dict[c.BBOX_KEY]

                # Resize face
                resize_face = ce.USE_RESIZING

                if ((self.params is not None) and
                        (ce.USE_RESIZING_KEY in self.params)):
                    resize_face = self.params[ce.USE_RESIZING_KEY]

                if resize_face:

                    face_width = c.CROPPED_FACE_WIDTH
                    face_height = c.CROPPED_FACE_HEIGHT

                    if ((self.params is not None) and
                            (c.CROPPED_FACE_WIDTH_KEY in self.params) and
                            (c.CROPPED_FACE_HEIGHT_KEY in self.params)):
                        face_width = self.params[c.CROPPED_FACE_WIDTH_KEY]
                        face_height = self.params[c.CROPPED_FACE_HEIGHT_KEY]

                    new_size = (face_width, face_height)
                    face = cv2.resize(face, new_size)

                rec_result = recognize_face(
                    face, self.face_models, self.params, False)

                tag = rec_result[c.ASSIGNED_TAG_KEY]
                confidence = rec_result[c.CONFIDENCE_KEY]
                face_dict[c.ASSIGNED_TAG_KEY] = tag
                face_dict[c.CONFIDENCE_KEY] = confidence
                face_dict[c.BBOX_KEY] = bbox
                face_dict[c.FACE_KEY] = face
                faces.append(face_dict)

            processing_time_in_clocks = cv2.getTickCount() - start_time
            processing_time_in_seconds = (
                processing_time_in_clocks / cv2.getTickFrequency())

            # Populate dictionary with results
            results = {c.ELAPSED_CPU_TIME_KEY: processing_time_in_seconds,
                       c.ERROR_KEY: error, c.FACES_KEY: faces}

        else:

            results = {c.ERROR_KEY: detection_error}

        self.progress = 100
        handle = time.time()
        self.db_result4image[handle] = results

        return handle