def update(self, dt):
        return_value, frame = self.capture.read()
        if return_value:
            texture = self.texture
            w, h = frame.shape[1], frame.shape[0]

            if not texture or texture.width != w or texture.height != h:
                self.texture = texture = Texture.create(size=(w, h))
                texture.flip_vertical()

            global detector
            global predictor

            frame = imutils.resize(frame)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            rects = detector(gray, 1)
            for (i, rect) in enumerate(rects):
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)
                output = face_utils.visualize_facial_landmarks(frame, shape)
                texture.blit_buffer(output.tobytes(), colorfmt='bgr')

            # texture.blit_buffer(frame.tobytes(), colorfmt='bgr')
            self.canvas.ask_update()
def face_landmark_detect(img, bg_img, res, blend, *args):

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.

    dets = detector(img, 0)
    det_num = len(dets)
    #print("Number of faces detected: {}".format(det_num))

    if (det_num >= 1):
        for k, d in enumerate(dets):
            #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)
            #print("Part 0: {}, Part 1: {} ...".format(shape.part(0),shape.part(1)))
            shape = face_utils.shape_to_np(shape)

            out_img = face_utils.visualize_facial_landmarks(bg_img, shape)

            if blend == 1:
                #blend in original
                out_img = cv2.addWeighted(img, 0.3, out_img, 0.7, 0)

    else:
        out_img = img

    #return
    return out_img
示例#3
0
def getPoints(path, width, height):
    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

    # load the input image, resize it, and convert it to grayscale
    image = cv2.imread(path)
    #image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale image
    rects = detector(gray, 1)
    # loop over the face detections

    points = []

    for (i, rect) in enumerate(rects):
        # determine the facial landmarks for the face region, then
        # convert the landmark (x, y)-coordinates to a NumPy array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # loop over the face parts individually
        for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
            # clone the original image so we can draw on it, then
            # display the name of the face part on the image
            clone = image.copy()
            cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

            # loop over the subset of facial landmarks, drawing the
            # specific face part
            for (x, y) in shape[i:j]:
                points.append((x, y))
                cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)
            # extract the ROI of the face region as a separate image
            (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
            roi = image[y:y + h, x:x + w]
            roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

            # show the particular face part
            #cv2.imshow("ROI", roi)
            #cv2.imshow("Image", clone)
            cv2.waitKey(0)

        # visualize all facial landmarks with a transparent overlay
        output = face_utils.visualize_facial_landmarks(image, shape)
        #cv2.imshow("Image", output)
        cv2.waitKey(0)

    points += [(0, 0), (math.floor(width / 3), 0),
               (math.floor(width * 2 / 3), 0), (width - 1, 0),
               (0, math.floor(height / 3)), (0, math.floor(height * 2 / 3)),
               (width - 1, math.floor(height / 3)),
               (width - 1, math.floor(height * 2 / 3)), (0, height - 1),
               (math.floor(width / 3), height - 1),
               (math.floor(width * 2 / 3), height - 1),
               (width - 1, height - 1)]
    return points
示例#4
0
文件: utils.py 项目: AminHP/RTFER-old
def draw_landmarks(image, landmarks, visualize=True, draw_dots=False):
    if not isinstance(landmarks, list):
        landmarks = [landmarks]
    for lm in landmarks:
        if visualize:
            image[:] = face_utils.visualize_facial_landmarks(image, lm)
        if draw_dots:
            for x, y in lm:
                cv2.circle(image, (x, y), 1, (255, 0, 0), 2)
示例#5
0
def detect(gray, frame):
    """ Input = greyscale image or frame from video stream
        Output = Image with rectangle box in the face
    """
    # Now get the tuples that detect the faces using above cascade
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    # faces are the tuples of 4 numbers
    # x,y => upperleft corner coordinates of face
    # width(w) of rectangle in the face
    # height(h) of rectangle in the face
    # grey means the input image to the detector
    # 1.3 is the kernel size or size of image reduced when applying the detection
    # 5 is the number of neighbors after which we accept that is a face

    # visualize de rectangles
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = frame[y:y+h, x:x+w]
        eyes = eyes_cascade.detectMultiScale(roi_gray)
        eyes_red_percentages = []
        for (ex, ey, ew, eh) in eyes:
            eyes_red_percentages.append(red_eye_test(roi_color, ex, ey, ew, eh))
            centre = (ex+(ew//2), ey+(eh//2))
            new_ex = centre[0] - (ew//4)
            new_ey = centre[1] - (eh//4)
            cv2.rectangle(roi_color, (new_ex, new_ey), ((new_ex+ew//2), (new_ey+eh//2)), (0, 255, 0), 2)
            cv2.imshow('img', frame)

        mean = sum(eyes_red_percentages)/2
        tests.append(["Test_14", f(mean)])
        output = face_utils.visualize_facial_landmarks(frame, roi_color)
        cv2.imshow("Image", output)
        cv2.waitKey(0)

    # Now iterate over the faces and detect eyes
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
        # Arguements => image, top-left coordinates, bottomright coordinates, color, rectangle border thickness
        # we now need two region of interests(ROI) grey and color for eyes one to detect and another to draw rectangle
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = frame[y:y+h, x:x+w]
        # Detect eyes now
        eyes = eyes_cascade.detectMultiScale(roi_gray, 1.1, 3)
        eye_center = {}
        for counter, (ex, ey, ew, eh) in enumerate(eyes):
            cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
            eye_center[counter] = (ex+(ew/2), ey+(eh/2))
        if(len(eye_center) < 2):
            eye_center[0] = (0, 0)
            eye_center[1] = (0, 0)
    return eye_center[0], eye_center[1], frame
示例#6
0
    def detect_face_part(self):
        # loop over the face detections
        # i : name
        # 0 : mouth, 1 : right_eyebrow, 2 : left_eyebrow
        # 3 : right_eye, 4 : left_eye, 5 : nose, 6 : jaw
        face_parts = [[],[],[],[],[],[],[]]
        for (i, rect) in enumerate(self.rects):
            # determine the facial landmarks for the face region, then
            # convert the landmark (x, y)-coordinates to a NumPy array
            shape = self.predictor(self.gray, rect)
            shape = face_utils.shape_to_np(shape)

            idx = 0
            # loop over the face parts individually
            for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
                # clone the original image so we can draw on it, then
                # display the name of the face part on the image
                clone = self.img.copy()
                cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                # loop over the subset of facial landmarks, drawing the
                # specific face part
                for (x, y) in shape[i:j]:
                    cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)


                face_parts[idx] = shape[i:j]
                idx += 1

                # extract the ROI of the face region as a separate image
                (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
                roi = self.img[y:y + h, x:x + w]
                roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
                '''
                # show the particular face part
                cv2.imshow("ROI", roi)
                cv2.imshow("Image", clone)
                cv2.waitKey(0)
                '''
            # visualize all facial landmarks with a transparent overlay
            output = face_utils.visualize_facial_landmarks(self.img, shape)


        # set the variables
        # Caution: this coordinates fits on the RESIZED image.
        self.mouth = face_parts[0]
        self.right_eyebrow = face_parts[1]
        self.left_eyebrow = face_parts[2]
        self.right_eye = face_parts[3]
        self.left_eye = face_parts[4]
        self.nose = face_parts[5]
        self.jaw = face_parts[6]
def facialDetector(image):
    # load the input image, resize it, and convert it to grayscale
    # image = cv2.imread(args["image"])
    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale image
    rects = detector(gray, 1)

    # loop over the face detections
    for (i, rect) in enumerate(rects):
        # determine the facial landmarks for the face region, then
        # convert the landmark (x, y)-coordinates to a NumPy array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # loop over the face parts individually
        # for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
        # 	# clone the original image so we can draw on it, then
        # 	# display the name of the face part on the image
        # 	clone = image.copy()
        # 	cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
        # 		0.7, (0, 0, 255), 2)

        # 	# loop over the subset of facial landmarks, drawing the
        # 	# specific face part
        # 	for (x, y) in shape[i:j]:
        # 		cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

        # 	# extract the ROI of the face region as a separate image
        # 	(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
        # 	roi = image[y:y + h, x:x + w]
        # 	roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

        # 	# show the particular face part
        # 	cv2.imshow("ROI", roi)
        # 	cv2.imshow("Image", clone)
        # 	cv2.waitKey(0)

        # visualize all facial landmarks with a transparent overlay
        output = face_utils.visualize_facial_landmarks(image, shape)
        # cv2.imshow("Image", output)
        # cv2.waitKey(0)
        return shape
示例#8
0
def facialDetector(image):
    # load the input image, resize it, and convert it to grayscale
    # image = cv2.imread(args["image"])
    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # detect faces in the grayscale image
    rects = detector(gray, 1)

    # loop over the face detections
    for (i, rect) in enumerate(rects):
        # determine the facial landmarks for the face region, then
        # convert the landmark (x, y)-coordinates to a NumPy array
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        # visualize all facial landmarks with a transparent overlay
        output = face_utils.visualize_facial_landmarks(image, shape)
        return output, shape
示例#9
0
def detect_parts(image):
    # resize the image, and convert it to grayscale
    image = imutils.resize(image, width=200, height=200)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale image

    rects = detector(gray, 1)

    # loop over the face detections
    for (i, rect) in enumerate(rects):
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        print(image.shape)
        plt.show()
        distances = euclidean_all(shape)

        # visualize all facial landmarks with a transparent overlay
        output = face_utils.visualize_facial_landmarks(image, shape, colors=colors)
        print(image.shape)
        plt.imshow(output)
        plt.show()

    return distances
示例#10
0
def extract_features(image):
	detector = dlib.get_frontal_face_detector()
	predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

	image = imutils.resize(image, width=500)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

	rects = detector(gray, 1)

	for (i, rect) in enumerate(rects):
		shape = predictor(gray, rect)
		shape = face_utils.shape_to_np(shape)

		landmarks = face_utils.FACIAL_LANDMARKS_IDXS.items()

		for (name, (i, j)) in landmarks:

			clone = image.copy()
			cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
				0.7, (0, 0, 255), 2)

			for (x, y) in shape[i:j]:
				cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

			(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
			roi = image[y:y + h, x:x + w]
			roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

			cv2.imshow("ROI", roi)
			cv2.imshow("Image", clone)
			cv2.waitKey(0)

		output = face_utils.visualize_facial_landmarks(image, shape)
		cv2.imshow("Image", output)
		cv2.waitKey(0)
	return landmarks
示例#11
0
    # Get frame
    frame = vs.read()
    frame = imutils.resize(frame, 1300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Detect faces
    rects = detector(gray, 0)

    # Loop over the face detections
    for rect in rects:
        # Facial landmarks FACS
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        if args['face']:
            frame = face_utils.visualize_facial_landmarks(frame, shape)
        else:
            pass

        for (i, name) in enumerate(face.keys()):
            # For all points in face

            if name in ['eyebrow']:
                # if part face in list, continue
                continue

            # All points in part face
            (j, k) = face[name]['pts']
            for i in range(j, k):
                face[name]['pts_list'][i + 1] = shape[i]
示例#12
0
            img_ori = cv2.imread(dataroot + row['subDirectory_filePath'])
            (width, height, _) = img_ori.shape

            ### LOAD LANDMARKS
            landmarks = []
            for lm in row['facial_landmarks'].split(';'):
                landmarks.append(float(lm))
            landmarks = np.array(landmarks).reshape((68, 2))

            for i in range(68):
                landmarks[i, 0] = min(landmarks[i, 0] / width * IMG_SIZE, 223)
                landmarks[i, 1] = min(landmarks[i, 1] / height * IMG_SIZE, 233)

            ## CREATE LABEL
            background = np.zeros((IMG_SIZE, IMG_SIZE))
            colors = []
            [colors.append((255, 255, 255)) for roi in range(8)]
            output = face_utils.visualize_facial_landmarks(
                background, landmarks.astype('int64'), colors=colors, alpha=1)

            cv2.imwrite(os.path.join(out_path, row['subDirectory_filePath']),
                        output)

        except:
            import pdb
            pdb.set_trace()
            print("Error en", row['subDirectory_filePath'])
            errors = errors.append(row)

    print(len(errors), "errores")
    errors.to_csv('errors.csv', columns=columns)
示例#13
0
            clone = image.copy()
            cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

            # loop over the subset of facial landmarks, drawing the
            # specific face part
            for (x, y) in shape[i:j]:
                cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

    # extract the ROI of the face region as a separate image
    (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
    roi = image[y:y + h, x:x + w]
    roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

    # visualize all facial landmarks with a transparent overlay
    #output = face_utils.visualize_facial_landmarks(image, shape) # we move these two line so we draw the face OVER the matte
    #cv2.imwrite(str(frame), output)
    ####

    fm.createMatte("frame%d.jpg" % count, "matte_frame%d.jpg" % count,
                   720)  # 720 is video res. Change it at need

    output = face_utils.visualize_facial_landmarks(
        cv2.imread("matte_frame%d.jpg" % count),
        shape)  # we move these two line so we draw the face OVER the matte
    cv2.imwrite("matte_frame%d.jpg" % count, output)

    count = count + 1

cap.release()
cv2.destroyAllWindows()  # destroy all the opened windows
def facefeature(image):
    rects = detector(image, 1)

    # loop over the face detections
    for (i, rect) in enumerate(rects):
        # determine the facial landmarks for the face region, then
        # convert the landmark (x, y)-coordinates to a NumPy array
        shape = predictor(image, rect)
        shape = face_utils.shape_to_np(shape)
        #print shape

        #face_locations = face_recognition.face_locations(image)
        #print 'face locations', face_locations
        #print 'rects', rects
        #print 'rect', rect
        new_rect = rect.top(), rect.right(), rect.bottom(), rect.left()
        face_encodings = face_recognition.face_encodings(image, [new_rect], num_jitters=10)
        #print face_encodings

        dist = face_recognition.face_distance(face_encodings, encoding)
        if len(face_encodings) == 0:
            continue

        use_dist = None
        if len(dist) > 0:
            use_dist = sorted(dist)[0]


        """
        # loop over the face parts individually
        for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
            # clone the original image so we can draw on it, then
            # display the name of the face part on the image
            clone = image.copy()
            print name
            cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
            0.7, (0, 0, 255), 2)

            # loop over the subset of facial landmarks, drawing the
            # specific face part
            for (x, y) in shape[i:j]:
                cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

                # extract the ROI of the face region as a separate image
                (x, y, w, h) = cv2.boundingRect(numpy.array([shape[i:j]]))
                roi = image[y:y + h, x:x + w]
                print roi.shape
                #roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

                # show the particular face part
                cv2.imshow("ROI", roi)
                cv2.imshow("Image", clone)
                cv2.waitKey(200)
        """
        # visualize all facial landmarks with a transparent overlay
        output = face_utils.visualize_facial_landmarks(image, shape)

        cv2.rectangle(output, (rect.tl_corner().x, rect.tl_corner().y), (rect.br_corner().x, rect.br_corner().y), (0, 255, 0), 4)

        if use_dist is not None:
            draw_xcentered_text(output, str(round(use_dist, 3)), 20)

        cv2.imshow("Image", output)
        cv2.waitKey(1)
示例#15
0
        shape = face_utils.shape_to_np(shape)

        # loop over the face parts individually
        for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
            # clone the original image so we can draw on it, then
            # display the name of the face part on the image
            clone = frame.copy()
            cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

            # loop over the subset of facial landmarks, drawing the
            # specific face part
            for (x, y) in shape[i:j]:
                cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

        output = face_utils.visualize_facial_landmarks(
            cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR), shape)
        cv2.imshow("output", output)

        # extract the left and right eye coordinates, then use the
        # coordinates to compute the eye aspect ratio for both eyes

        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)

        # average the eye aspect ratio together for both eyes
        ear = (leftEAR + rightEAR) / 2.0

        # compute the convex hull for the left and right eye, then
        # visualize each of the eyes
    for (x, y) in shape[i:j]:
        cv2.circle(clone, (x, y), 1, (0, 0, 225), -1)

    # Extract ROI of the face region separately
    (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
    roi = img[y:y + h, x:x + w]
    roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

    # Show the overlayed image as well as ROI
    print('Press any key on the image to close')
    cv2.imshow('ROI', roi)
    cv2.imshow('Image', clone)
    cv2.waitKey(0)

    if args.all_facial_parts:
        # show all facial landmarks
        all_face = face_utils.visualize_facial_landmarks(img, shape)
        cv2.imshow("Image", all_face)
        cv2.waitKey(0)

    if args.save:
        # save the specific part
        _, img_file = os.path.split(args.image)
        fid, ext = img_file.split('.')
        cv2.imwrite('result/{}_{}.{}'.format(fid, args.facial_parts, ext),
                    clone)

        # save all facial parts
        if args.all_facial_parts:
            cv2.imwrite('result/{}_all.{}'.format(fid, ext), all_face)
示例#17
0
def start_webcam(model_emotion,
                 model_gender,
                 window_size,
                 window_name='live',
                 update_time=50):
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    video_feed = cv2.VideoCapture(0)
    video_feed.set(3, width)
    video_feed.set(4, height)
    read_value, webcam_image = video_feed.read()

    delay = 0
    init = True
    while read_value:
        read_value, webcam_image = video_feed.read()
        ap = argparse.ArgumentParser()
        ap.add_argument("-p",
                        "--shape-predictor",
                        required=True,
                        help="path to facial landmark predictor")
        args = vars(ap.parse_args())
        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(args["shape_predictor"])
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            rects = detector(webcam_image, 1)
            for (i, rect) in enumerate(rects):
                shape = predictor(normalized_face, rect)
                shape = face_utils.shape_to_np(shape)
                output = face_utils.visualize_facial_landmarks(
                    webcam_image, shape)
                #if init or delay == 0:
                #init = False
                emotion_prediction = model_emotion.predict(normalized_face)
                gender_prediction = model_gender.predict(normalized_face)
                if (gender_prediction[0] == 0):
                    cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)
                    cv2.imshow('video', output)
                    cv2.putText(webcam_image, "Gender predictor: Female",
                                (x, y - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                (0, 255, 255), 2)
                else:
                    cv2.rectangle(webcam_image, (x, y), (x + w, y + h),
                                  (255, 0, 0), 2)
                    cv2.imshow('video', output)
                    cv2.putText(webcam_image, "Gender predictor: Male",
                                (x, y - 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                (0, 255, 255), 2)

                cv2.putText(webcam_image, emotions[emotion_prediction[0]],
                            (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.5,
                            (255, 0, 0), 2)
        #delay += 1
        #delay %= 20
        cv2.imshow(window_name, webcam_image)
        key = cv2.waitKey(update_time)
        if key == ESC:
            break

    cv2.destroyWindow(window_name)
示例#18
0
def faceDetection(img):
	path = os.path.abspath(img)
	PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
	# initialize dlib's face detector (HOG-based) and then create
	# the facial landmark predictor
	predictor = dlib.shape_predictor(PREDICTOR_PATH)
	detector = dlib.get_frontal_face_detector()

	# load the input image, resize it, and convert it to grayscale
	image = cv2.imread(path)
	image = imutils.resize(image, width=500)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

	# detect faces in the grayscale image
	rects = detector(gray, 1)

	k = 0
	
	features = []
	# loop over the face detections
	for (i, rect) in enumerate(rects):
		# determine the facial landmarks for the face region, then
		# convert the landmark (x, y)-coordinates to a NumPy array
		shape = predictor(gray, rect)
		shape = face_utils.shape_to_np(shape)

	# loop over the face parts individually
		for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
			# To display output to the console
			arr = ["Mouth", "Right Eyebrow", "Left Eyebrow", "Right Eye", "Left Eye", "Nose", "Jaw", "Complete Face w/ Mapping"]
			#print("Conducting Facial Feature Detection For " + arr[k] + "...\n")
			k +=1

			# clone the original image so we can draw on it, then
			# display the name of the face part on the image
			clone = image.copy()
			cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
				0.7, (0, 0, 255), 2)

			# loop over the subset of facial landmarks, drawing the
			# specific face part
			for (x, y) in shape[i:j]:
				cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

			# extract the ROI of the face region as a separate image
			(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
			roi = image[y:y + h, x:x + w]
			roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

			#Variable to create the feature vector
			feat_vec = roi
			#Will use standard deviation of the ROI for each region
			features.append(np.mean(feat_vec))

			# show the particular face part
			#cv2.imshow("ROI", roi)
			#cv2.imshow("Image", clone)
			#cv2.waitKey(0)

		# visualize all facial landmarks with a transparent overlay
		output = face_utils.visualize_facial_landmarks(image, shape)
		#cv2.imshow("Image", output)
		#cv2.waitKey(0)

		#Adds gender for the CSV File
		'''
	
		gender = re.findall(r"[\w']+", img)
		gender = gender[1]
		gender = gender.split('_')[1]

		features.append(int(gender))
		'''
		
		return(features)
示例#19
0
    # determine the facial landmarks for the face region, then
    # convert the landmark (x, y)-coordinates to a NumPy array
    shape = predictor(gray, rect)
    shape = face_utils.shape_to_np(shape)
    # loop over the face parts individually
    for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
        # clone the original image so we can draw on it, then
        # display the name of the face part on the image
        clone = image.copy()
        cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 0, 255), 2)

        # loop over the subset of facial landmarks, drawing the
        #  specific face part
        for (x, y) in shape[i:j]:
            cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

        # extract the ROI of the face region as a separate image
        (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
        roi = image[y:y + h, x:x + w]
        roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
        # show the particular face part
        cv2.imshow("ROI", roi)
        cv2.imshow("Image", clone)
        cv2.waitKey(500)

    # visualize all facial landmarks with a transparent overlay
    output = face_utils.visualize_facial_landmarks(image, shape)
    cv2.imshow("Image", output)
    cv2.waitKey(0)
示例#20
0
def dlib_testing():
    dlib_detector = dlib.get_frontal_face_detector()
    dlib_predictor = dlib.shape_predictor(
        'src/dlib/shape_predictor_68_face_landmarks.dat')

    ## delet tis
    img_list_file = 'img/FDDB-folds/FDDB-fold-02.txt'
    with open(img_list_file, 'r') as f:
        file_list = [x.rstrip() for x in f.readlines()]

    ## and tis
    rectangle_file = 'img/FDDB-folds/FDDB-fold-02-rectangleList.pkl'
    with open(rectangle_file, 'rb') as f:
        face_list = pickle.load(f)

    index_num = 9
    image = cv2.imread('img/FDDB-pics/{}.jpg'.format(file_list[index_num]))
    # cv2.imshow("Image", image)
    # cv2.waitKey(0)
    # return
    faces = face_list[index_num]
    # round all face values to integers
    faces = [[int(round(x)) for x in face] for face in faces]

    # rects = []
    # for (x, y, w, h) in faces:
    # rect = dlib.rectangle(int(round(x)), int(round(y)), int(round(x+w)), int(round(y+h)))
    # rects.append(rect)
    # cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)

    # cv2.imshow("Image", image)
    # cv2.waitKey(0)
    # return

    # image = cv2.imread(args["image"])
    # image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    rects = dlib_detector(gray, 1)

    # comparing face detectors
    clone = image.copy()
    for (x, y, w, h) in faces:
        cv2.rectangle(clone, (x, y), (x + w, y + h), (255, 0, 0), 1)
        # rect = dlib.rectangle(int(round(x)), int(round(y)), int(round(x+w)), int(round(y+h)))
        rect = dlib.rectangle(x, y, x + w, y + h)
        shape = dlib_predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        i, j = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']
        (x1, y1, w1, h1) = cv2.boundingRect(np.array([shape[i:j]]))
        cv2.rectangle(clone, (x1, y1), (x1 + w1, y1 + h1), (255, 0, 0), 1)

    for rect in rects:
        cv2.rectangle(clone, (rect.left(), rect.top()),
                      (rect.right(), rect.bottom()), (0, 255, 0), 1)
        shape = dlib_predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        i, j = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']
        (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
        cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 1)
    cv2.imshow("Image", clone)
    cv2.waitKey(0)
    return

    for (i, rect) in enumerate(rects):
        shape = dlib_predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)

        clone = image.copy()
        # NOTE: The right eye is the person's right eye, which appears as the left eye in photo
        i, j = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']

        for (x, y) in shape[i:j]:
            cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

        (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
        cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 1)

        cv2.imshow("Image", clone)
        cv2.waitKey(0)
        return

        for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
            # clone the original image so we can draw on it, then
            # display the name of the face part on the image
            clone = image.copy()
            cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

            # loop over the subset of facial landmarks, drawing the
            # specific face part
            for (x, y) in shape[i:j]:
                cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)

            (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
            cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 1)

            M = cv2.moments(shape[i:j])
            # print(M)
            cx = int(M['m10'] / M['m00'])
            cy = int(M['m01'] / M['m00'])
            cv2.circle(clone, (cx, cy), 2, (0, 255, 0), 1)
            # print(x,y,w,h)
            roi = image[y:y + h, x:x + w]
            roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)

            # show the particular face part
            # cv2.imshow("ROI", roi)
            cv2.imshow("Image", clone)
            cv2.waitKey(0)

        # visualize all facial landmarks with a transparent overlay
        output = face_utils.visualize_facial_landmarks(image, shape)
        cv2.imshow("Image", output)
        cv2.waitKey(0)