def __get_neural_feature(self):
     for f in glob.glob(os.path.join(self.__faceFolder, "*.tiff")):
         personName = f[30:32]
         if personName not in self.__neutralFeatures.keys():
             neuralImagePath = glob.glob(os.path.join(self.__faceFolder, "{0}.NE*.tiff".format(personName)))[0]
             image = cv2.imread(neuralImagePath)
             vec, center, face_bool = self.__faceUtil.get_vec(image)
             if face_bool:
                 feat = facialActions(vec, image)
                 neutralFeatures = feat.detectFeatures()
                 self.__neutralFeatures[personName] = neutralFeatures
    def process(self):
        for f in glob.glob(os.path.join(self.__faceFolder, "*.tiff")):
            print("Processing file: {0}".format(f))
            personName = f[30:32]
            emotion = self.filename2emotion(f)
            neutralFeatures = self.__neutralFeatures[personName]
            image = cv2.imread(f)
            vec, center, faceBool = self.__faceUtil.get_vec(image)
            if faceBool:
                feat = facialActions(vec, image)
                newFeatures = feat.detectFeatures()
                facialMotion = np.asarray(feat.FaceFeatures(neutralFeatures, newFeatures), dtype="float64").tolist()
                self.images_.append(facialMotion)
                self.labels_.append(emotion)

        return self.images_, self.labels_
def main():
    global neutralFeatures
    ap = argparse.ArgumentParser()

    ap.add_argument(
        "-m",
        "--model",
        type=int,
        default=0,
        help=
        "Model selection: 0: SVM 1: Gaussian naive bayes other: Decision tree")

    args = vars(ap.parse_args())

    # Label Facial Action Units (AUs) and Basic Emotions.
    dict_emotion = [
        "Thinking...", "Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise"
    ]

    # Font size for text on video
    font_size = 0.6
    font = cv2.FONT_HERSHEY_SIMPLEX

    # Initialize Dlib
    face_op = face_helper.faceUtil()

    tol = 5  # Tolerance for setting neutral expression profile. Verifies eye and ear separation

    # Reduces images size for image processing.
    scaleFactor = 0.4

    # Scale up at the end for viewing.
    scaleUp = 3 / 4

    # Position of text on video when face is detected.
    pos_emotion = (np.arange(25, 225, 25) * scaleUp).astype(int)

    model = joblib.load("model/svm_rbf_model.sav")

    # initialize the video stream and allow the camera sensor to warn up
    print("[INFO] camera sensor warming up...")
    vs = VideoStream(usePiCamera=False).start()

    time.sleep(2.0)
    neutralBool = False

    emotion = 0
    # loop over the frames from the video stream
    while True:
        frame = vs.read()
        small_frame = cv2.resize(frame, (0, 0), fx=scaleFactor, fy=scaleFactor)
        # Get facial landmarks and position of face on image.
        vec, point, face_bool = face_op.get_vec(small_frame)

        if face_bool:
            # Get facial features.
            feat = facs_helper.facialActions(vec, small_frame)
            newFeatures = feat.detectFeatures()
            if not neutralBool:
                neutralBool, neutralFeatures \
                    = face_op.set_neutral(feat, newFeatures, neutralBool, tol)
            else:
                facialMotion = np.asarray(feat.FaceFeatures(
                    neutralFeatures, newFeatures),
                                          dtype="float64").tolist()
                predict_single = model.predict([facialMotion])
                emotion = predict_single[0]

        # Increase size of frame for viewing.
        big_frame = cv2.resize(small_frame, (0, 0),
                               fx=scaleUp * 1 / scaleFactor,
                               fy=scaleUp * 1 / scaleFactor)

        for idxJ, dd in enumerate(dict_emotion):
            cv2.putText(big_frame, dd, (380, pos_emotion[idxJ]), font,
                        font_size, (255, 255, 255), 2, cv2.LINE_AA)

        cv2.putText(big_frame, dict_emotion[emotion],
                    (380, pos_emotion[emotion]), font, font_size, (255, 0, 0),
                    2, cv2.LINE_AA)
        cv2.imshow("Frame", big_frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    cv2.destroyAllWindows()
Exemple #4
0
                oriented_bool = False
                centered_bool = False
                go_bool = True
                face_seek_bool = False
                second_seek_bool = False

    elif face_bool:  # Face is detected.
        face_bool_count += 1
        # Get facial landmarks and position of face on image.
        vec, point, face_bool = face_op.get_vec(small_frame, centerFixed,
                                                face_bool)
        ##        cv2.circle(small_frame, (point[0], point[1]),5,(0,255,0),-1)
        ##        time.sleep(5)

        # Get facial features.
        feat = facs_helper.facialActions(vec, small_frame)
        newFeaturesUpper = feat.detectFeatures()
        newFeaturesLower = feat.detectLowerFeatures()
        if not go_no_go:  # Boolean for testing. Robot persists in facial sentiment recognition.
            face_bool = True
        if np.any(
                point
        ) and face_bool_count > 5:  # Facial location is detected and the face has been detected
            # for at least 5 frames.
            face_bool_count = 6  # Don't let the variable blow up.
            # Check if the robot is pointing right at the person's face.
            centered_face_bool, xShift_face, yShift_face = blob.check_centered(
                point)
            straighten_bool = blob.check_straighten(cameraPos)
            print('centered', centered_face_bool, 'straighten',
                  straighten_bool)
Exemple #5
0
    def get_features(self):
        _seq_count = 0
        for f in glob.glob(os.path.join(self.faces_folder_path, "*.png")):
            #    print("Processing file: {}".format(f))

            self._finalLen = len(
                glob.glob(
                    os.path.join(
                        '/Users/joshualamstein/Desktop/CK+/cohn-kanade-images/',
                        f[53:62], "*png")))

            self._imCount += 1
            img = io.imread(f)

            # Ask the detector to find the bounding boxes of each face. The 1 in the
            # second argument indicates that we should upsample the image 1 time. This
            # will make everything bigger and allow us to detect more faces.
            dets = self.detector(img, 1)
            #    print("Number of faces detected: {}".format(len(dets)))
            for k, d in enumerate(dets):
                #        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                #            k, d.left(), d.top(), d.right(), d.bottom()))
                # Get the landmarks/parts for the face in box d.
                shape = self.predictor(img, d)
                for i in range(shape.num_parts):
                    self.vec[i][0] = shape.part(i).x
                    self.vec[i][1] = shape.part(i).y

                # Save landmarks
                if len(self.landmarks) == 0:
                    self.landmarks = self.vec
#                    self.landmarksFlat = self.vec.flatten()
                else:
                    self.landmarks = np.hstack((self.landmarks, self.vec))
#                    self.landmarksFlat = np.vstack((self.landmarksFlat, vec.flatten()))
                if len(self._hotLand) == 0:
                    self._hotLand = self.vec
                else:
                    self._hotLand = np.dstack((self._hotLand, self.vec))

            if f[53:62] != self._old_f:
                _seq_count += 1  # Next sequence, new emotion
                self.seqBool = True
            else:
                self.seqBool = False  # Boolean for troubleshooting

            self._old_f = f[53:62]
            dist = 20
            dist_eye = 15
            dist_shift = 15
            dist_shift_brow = 15

            if (self._finalLen == self._imCount):
                self._imCount = 0
                # Reshape array
                self._hotLand = np.asarray(self._hotLand)
                self._hotLand2 = np.transpose(self._hotLand)
                self._hotLand = np.swapaxes(self._hotLand2, 1, 2)
                # Get key facial distances
                for idx, land in enumerate(self._hotLand):
                    self.brow_ell = land[17:22, :]
                    self.brow_r = land[22:27, :]
                    self.eye_ell = land[36:42, :]
                    self.eye_r = land[42:48, :]
                    self.nose_line = land[27:31, :]
                    self.nose_arc = land[31:36, :]
                    self.lip_tu = land[48:54, :]
                    self.lip_bl = land[54:60, :]
                    self.lip_tl = land[60:64, :]
                    self.lip_bu = land[64:68, :]

                    # Regions of interest can detect wrinkles between the brow
                    # and on the corner of the eye. These are transient
                    # features as young people do not have as many wrinkles as
                    # older people. The Canny edge detector finds lines, and the
                    # algorithm computes the density over the sample area.
                    roi = img[self.nose_line[0, 1] -
                              dist:self.nose_line[0, 1] + dist,
                              self.nose_line[0, 0] -
                              dist:self.nose_line[0, 0] + dist]
                    roi_ell = img[self.eye_ell[0, 1] -
                                  dist_eye:self.eye_ell[0, 1] + dist_eye,
                                  self.eye_ell[0, 0] - dist_eye -
                                  dist_shift:self.eye_ell[0, 0] + dist_eye -
                                  dist_shift]
                    roi_r = img[self.eye_r[3, 1] - dist_eye:self.eye_r[3, 1] +
                                dist_eye, self.eye_r[3, 0] - dist_eye +
                                dist_shift:self.eye_r[3, 0] + dist_eye +
                                dist_shift]
                    roi_brow_ri = img[self.brow_r[0, 1] - dist -
                                      dist_shift_brow:self.brow_r[0, 1] +
                                      dist - dist_shift_brow,
                                      self.brow_r[0, 0] -
                                      dist:self.brow_r[0, 0] + dist]
                    roi_brow_li = img[self.brow_ell[4, 1] - dist -
                                      dist_shift_brow:self.brow_ell[4, 1] +
                                      dist - dist_shift_brow,
                                      self.brow_ell[4, 0] -
                                      dist:self.brow_ell[4, 0] + dist]
                    roi_brow_ro = img[self.brow_r[4, 1] - dist -
                                      dist_shift_brow:self.brow_r[4, 1] +
                                      dist - dist_shift_brow,
                                      self.brow_r[4, 0] -
                                      dist:self.brow_r[4, 0] + dist]
                    roi_brow_lo = img[self.brow_ell[0, 1] - dist -
                                      dist_shift_brow:self.brow_ell[0, 1] +
                                      dist - dist_shift_brow,
                                      self.brow_ell[0, 0] -
                                      dist:self.brow_ell[0, 0] + dist]
                    canny = cv2.Canny(roi, 100, 200)
                    canny_eye_r = cv2.Canny(roi_r, 100, 200)
                    canny_eye_ell = cv2.Canny(roi_ell, 100, 200)
                    canny_brow_ri = cv2.Canny(roi_brow_ri, 100, 200)
                    canny_brow_li = cv2.Canny(roi_brow_li, 100, 200)
                    canny_brow_ro = cv2.Canny(roi_brow_ro, 100, 200)
                    canny_brow_lo = cv2.Canny(roi_brow_lo, 100, 200)
                    self.furrow = np.sum(canny / 255) / dist**2
                    self.wrinkle_ell = np.sum(
                        canny_eye_ell / 255) / dist_eye**2
                    self.wrinkle_r = np.sum(canny_eye_r / 255) / dist_eye**2
                    self.brow_ri = np.sum(canny_brow_ri / 255) / dist**2
                    self.brow_li = np.sum(canny_brow_li / 255) / dist**2
                    self.brow_ro = np.sum(canny_brow_ro / 255) / dist**2
                    self.brow_lo = np.sum(canny_brow_lo / 255) / dist**2

                    # Used for visualizing Canny edge detection in ROI.
                    #                    if self._imCount % 8 == 0:
                    #                        cv2.imshow('ro_%i' %_seq_count, roi_brow_ro)
                    #                        cv2.imshow('ri_%i' %_seq_count, roi_brow_ri)
                    #                        cv2.imshow('lo_%i' %_seq_count, roi_brow_lo)
                    #                        cv2.imshow('li_%i' %_seq_count, roi_brow_li)
                    #                        cv2.imshow('canny_ro_%i' %_seq_count, canny_brow_ro)
                    #                        cv2.imshow('canny_ri_%i' %_seq_count, canny_brow_ri)
                    #                        cv2.imshow('canny_lo_%i' %_seq_count, canny_brow_lo)
                    #                        cv2.imshow('canny_li_%i' %_seq_count, canny_brow_li)

                    #                        cv2.imshow('image_%i' %_seq_count,img)
                    # cv2.imshow('roi_%i' %_seq_count, roi)
                    #                        cv2.imshow('roi_ell_%i' %_seq_count, roi_ell)
                    #                        cv2.imshow('roi_r_%i' %_seq_count, roi_r)
                    #                        cv2.imshow('canny_%i' %_seq_count, canny)
                    #                        cv2.imshow('canny_eye_r_%i' %_seq_count, canny_eye_r)
                    #                        cv2.imshow('canny_eye_ell_%i' %_seq_count, canny_eye_ell)

                    feat = facs_helper.facialActions(
                        self.brow_r, self.brow_ell, self.eye_ell, self.eye_r,
                        self.lip_tu, self.lip_bu, self.lip_tl, self.lip_bl,
                        self.nose_line, self.nose_arc, self.furrow,
                        self.wrinkle_ell, self.wrinkle_r, self.brow_ri,
                        self.brow_li, self.brow_ro, self.brow_lo)
                    self.newFeatures = feat.detectFeatures()
                    self.newFeaturesLower = feat.detectLowerFeatures()
                    self.newFeatures = np.array(self.newFeatures)
                    self.newFeaturesLower = np.array(self.newFeaturesLower)
                    #                    print('newFeaturesInit', self.newFeatures)

                    if idx == 0:
                        self.firstFeatures = self.newFeatures
                        self.firstFeaturesLower = self.newFeaturesLower

                        if np.sum(
                                abs(self.newFeatures[:len(self.newFeatures) -
                                                     3]) < 1E-3):
                            self.breakFolder.append(f)
                            if len(self.holdFeatures) == 0:
                                self.holdFeatures = self.newFeatures
                            else:
                                self.holdFeatures = np.vstack(
                                    (self.holdFeatures, self.newFeatures))
                            self.newFeatures[
                                self.newFeatures ==
                                0] = 1E-5  # to avoid dividing by 0.
                        if np.sum(abs(self.newFeaturesLower) < 1E-3):
                            self.breakFolderLower.append(f)
                            self.newFeaturesLower[self.newFeaturesLower ==
                                                  0] = 1E-5
                    self._hotFeatures = np.concatenate(
                        [self._hotFeatures,
                         self.newFeatures])  # just for debugging
                    self._hotFeaturesLower = np.concatenate(
                        [self._hotFeaturesLower, self.newFeaturesLower])
                    self.oldFeatures = self.newFeatures
                    self.oldFeaturesLower = self.newFeaturesLower
                self.lastFeatures = self.newFeatures
                self.lastFeaturesLower = self.newFeaturesLower
                # Find changes from motion in facial features.
                if len(self.facialMotion) == 0:
                    self.newFeatures_Array = self.newFeatures
                    self.facialMotion = feat.UpperFaceFeatures(
                        self.firstFeatures, self.lastFeatures)
                    self.facialMotionLower = feat.LowerFaceFeatures(
                        self.firstFeaturesLower, self.lastFeaturesLower)
                else:
                    self.newFeatures_Array = np.vstack(
                        (self.newFeatures_Array, self.newFeatures))
                    self.facialMotion = np.vstack(
                        (self.facialMotion,
                         feat.UpperFaceFeatures(self.firstFeatures,
                                                self.lastFeatures)))
                    self.facialMotionLower = np.vstack(
                        (self.facialMotionLower,
                         feat.LowerFaceFeatures(self.firstFeaturesLower,
                                                self.lastFeaturesLower)))

                if (len(self.features) == 0):
                    self.features = self._hotFeatures
                else:
                    self.features = np.hstack(
                        (self.features, self._hotFeatures))
                self._hotLand = []
                self._hotFeatures = []

        print("Finished features")