예제 #1
0
class AgePredictor:
    def __init__(self):
        # age model
        # model structure: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/age.prototxt
        # pre-trained weights: https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/dex_chalearn_iccv2015.caffemodel
        self.age_model = cv2.dnn.readNetFromCaffe(
            "data/age.prototxt", "data/dex_chalearn_iccv2015.caffemodel")
        self.fd = FaceDetector()

    # given an image
    # extract roi using face detector and predict the age using the age model
    # 3 return values:
    ## apparent_age is the model predicted age of the face
    ## roi is the region of the image that contains the face
    ## angle is the rotation angle (in CCW) for the roi
    def predict_age(self, img):
        # extract roi and resize it to the desired dimensions for the age model
        roi, angle = self.fd.detect_face(img)
        if roi is None:
            return -1, None, 0
        roi_resized = cv2.resize(roi, (224, 224))
        img_blob = cv2.dnn.blobFromImage(roi_resized)
        # run it through the model and return predicted age
        self.age_model.setInput(img_blob)
        age_dist = self.age_model.forward()[0]
        output_indexes = np.array([i for i in range(0, 101)])
        apparent_age = round(np.sum(age_dist * output_indexes), 2)
        return apparent_age, roi, angle
예제 #2
0
class FaceDetectorProcessor(ImageProcessor):
	"""FaceDetector processing class extending the Image processing
	base class, attempts to determine if there is a face in the image

	Parameters:
	-----------
	cascade_file: path to the xml cascade classifier parameters

	"""

	def __init__(self, cascade_file='haarcascade_frontalface_default.xml'):
		self.detector = FaceDetector(cascade_file)
		self.preprocessor = GrayscaleProcessor()


	def process_image(self, image):
		"""Process the image by determining if there is a face

		:param image: image as numpy array
		:return: cropped face image, and boolean indicating whether 
		there is a face
		"""
		gray = self.preprocessor.process_image(image)

		face = self.detector.detect_face(gray)

		if len(face) == 0:
			return image, False

		x, y, w, h = face

		cropped_face = image[y:y+h, x:x+w] 

		return cropped_face, True


	def save_image(self, image, user_id, photo_id):
		"""Saves the image to a temporary directory in the current working 
		folder with a concatentation of user_id and photo_id as the filename

		:param image: image as a numpy array
		:param user_id: user_id of the face image
		:param photo_id: photo_id of the original instagram image has .jpg postfix
		:return: file path name
		"""
		path = os.path.dirname(__file__)
		path = os.path.join(path, 'tmp')
		if not os.path.exists(path):
			os.mkdir(path)
		fname = os.path.join(path, str(user_id) + str(photo_id))
		cv2.imwrite(fname, image)
		return fname
예제 #3
0
class FaceDetectorProcessor(ImageProcessor):
    """FaceDetector processing class extending the Image processing
	base class, attempts to determine if there is a face in the image

	Parameters:
	-----------
	cascade_file: path to the xml cascade classifier parameters

	"""
    def __init__(self, cascade_file='haarcascade_frontalface_default.xml'):
        self.detector = FaceDetector(cascade_file)
        self.preprocessor = GrayscaleProcessor()

    def process_image(self, image):
        """Process the image by determining if there is a face

		:param image: image as numpy array
		:return: cropped face image, and boolean indicating whether 
		there is a face
		"""
        gray = self.preprocessor.process_image(image)

        face = self.detector.detect_face(gray)

        if len(face) == 0:
            return image, False

        x, y, w, h = face

        cropped_face = image[y:y + h, x:x + w]

        return cropped_face, True

    def save_image(self, image, user_id, photo_id):
        """Saves the image to a temporary directory in the current working 
		folder with a concatentation of user_id and photo_id as the filename

		:param image: image as a numpy array
		:param user_id: user_id of the face image
		:param photo_id: photo_id of the original instagram image has .jpg postfix
		:return: file path name
		"""
        path = os.path.dirname(__file__)
        path = os.path.join(path, 'tmp')
        if not os.path.exists(path):
            os.mkdir(path)
        fname = os.path.join(path, str(user_id) + str(photo_id))
        cv2.imwrite(fname, image)
        return fname
        bottom = min(top + new_width,img_height)
        right = min(left+new_width,img_width)
        return BBox(int(left),int(right),int(top),int(bottom))

    # self.emotions で指定した7次元のベクトルを推定値として返す
    #
    # input
    #   input_tensor 入力テンソル
    # output
    #   推定値ベクトル
    def _tensor2emotion(self, input_tensor):
        ret_emotions = []
        #print("_tensor2emotion input_tensor size",len(input_tensor))
        preds = self.recognizer.predict(input_tensor)
        #print("_tensor2emotion preds size",len(preds))
        return preds

if __name__ == "__main__":
    output_dir="../data/outputs"
    face_detect_conf_dir="../conf/face-detect"
    erecognizer_conf_dir="../conf/emotion-recognizer"
    image_path="../data/inputs/meeting_11_304.jpg"
    face_detector = FaceDetector(face_detect_conf_dir)
    box_faces = face_detector.detect_face(image_path)

    emotion_recognizer = EmotionRecognizer(erecognizer_conf_dir, 416, output_dir)
    ret_emotions = emotion_recognizer.classify_emotion(image_path, box_faces)
    emotion_recognizer.put_caption(image_path, box_faces, ret_emotions)