def showScreenAndDectect(capture):
    print("Face dececting...")
    cnt = 5
    while cnt:
        flag, frame = capture.read()
        faceCoordinates = fdu.getFaceCoordinates(frame)
        refreshFrame(frame, faceCoordinates)

        if faceCoordinates is not None:
            cnt -= 1
            face_img = fdu.preprocess(frame,
                                      faceCoordinates,
                                      face_shape=FACE_SHAPE)
            #cv2.imshow(windowsName, face_img)

            input_img = np.expand_dims(face_img, axis=0)
            input_img = np.expand_dims(input_img, axis=0)

            result = model.predict(input_img)[0]
            if cnt == 4:
                tot_result = result
            else:
                tot_result += result
            index = np.argmax(result)
            print('Frame', 5 - cnt, ':', emo[index], 'prob:', max(result))
            #index = np.argmax(result)
            #print (emo[index], 'prob:', max(result))
            # print(face_img.shape)
            # emotion = class_label[result_index]
            # print(emotion)
    index = np.argmax(tot_result)
    print('Final decision:', emo[index], 'prob:', max(tot_result))
    return emo[index]
Esempio n. 2
0
def showScreenAndDectect(capture):
    while (True):
        flag, frame = capture.read()
        faceCoordinates = fdu.getFaceCoordinates(frame)
        refreshFrame(frame, faceCoordinates)

        if faceCoordinates is not None:
            face_img = fdu.preprocess(frame, faceCoordinates, face_shape=FACE_SHAPE)
            #cv2.imshow(windowsName, face_img)

            input_img = np.expand_dims(face_img, axis=0)
            input_img = np.expand_dims(input_img, axis=0)

            result = model.predict(input_img)[0]
            index = np.argmax(result)
            jsonObj = {
                "Angry" : result[0],
                "Fear" : result[1],
                "Happy" : result[2];
                "Sad" : result[3],
                "Suprise" : result[4],
                "Neutral" : result[5]
            }

            r = requests.get(url = API_ENDPOINT, data = jsonObj)

            pause(1)

            print (emo[index], 'prob:', max(result))
def showScreenAndDectect(capture):
    while (True):
        flag, frame = capture.read()
        faceCoordinates = fdu.getFaceCoordinates(frame)
        refreshFrame(frame, faceCoordinates)
        
        if faceCoordinates is not None:
            face_img = fdu.preprocess(frame, faceCoordinates, face_shape=FACE_SHAPE)
            #cv2.imshow(windowsName, face_img)

            input_img = np.expand_dims(face_img, axis=0)
            input_img = np.expand_dims(input_img, axis=0)

            result = model.predict(input_img)[0]
            index = np.argmax(result)
            print (emo[index], 'prob:', max(result))
 def getEmotion(self):
     i = 0
     capture = self.getCapture()
     seq = []
     while (i < 5):
         flag, frame = capture.read()
         faceCoordinates = fdu.getFaceCoordinates(frame)
         if faceCoordinates is not None:
             face_img = fdu.preprocess(frame,
                                       faceCoordinates,
                                       face_shape=FACE_SHAPE)
             input_img = np.expand_dims(face_img, axis=0)
             input_img = np.expand_dims(input_img, axis=0)
             result = model.predict(input_img)[0]
             index = np.argmax(result)
             print(emo[index], 'prob:', max(result))
             seq.append(emo[index])
             i += 1
     return seq[2]
def showScreenAndDectect(capture):
    while (True):
        '''
        Capture frame-by-frame
        read() returns two parameters:
            1.The actual video frame read (one frame on each loop)
            2.A return code (The return code tells us if we have run out of frames, which will happen if we are reading from a file. This doesn’t matter when reading from the webcam, since we can record forever, so we will ignore it.)
        '''

        flag, frame = capture.read()
        faceCoordinates = fdu.getFaceCoordinates(frame)
        # refreshFrame(frame, faceCoordinates)

        # Another solution, get multiple faces to show here.
        '''
        flag, frame = capture.read()
        faceCoordinates = fdu.getFaceCoordinates2(frame)
        refreshFrame2(frame, faceCoordinates)
        '''

        if faceCoordinates is not None:
            face_img = fdu.preprocess(frame,
                                      faceCoordinates,
                                      face_shape=FACE_SHAPE)
            #cv2.imshow(windowsName, face_img)

            input_img = np.expand_dims(face_img, axis=0)
            input_img = np.expand_dims(input_img, axis=0)

            result = model.predict(input_img)[0]
            index = np.argmax(result)
            print(emo[index], 'probability is: ', max(result))

            refreshFrame(frame, faceCoordinates, emo[index])

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    capture.release()
    cv2.destroyAllWindows()
Esempio n. 6
0
    def sendCertifyResult(self, dataURL, identity):
        head = "data:image/jpeg;base64,"
        assert (dataURL.startswith(head))
        imgdata = base64.b64decode(dataURL[len(head):])
        imgF = StringIO.StringIO()
        imgF.write(imgdata)
        imgF.seek(0)
        img = Image.open(imgF)
        # preprocess frame
        buf = np.fliplr(np.asarray(img))
        rgbFrame = np.zeros((300, 400, 3), dtype=np.uint8)
        rgbFrame[:, :, 0] = buf[:, :, 0]
        rgbFrame[:, :, 1] = buf[:, :, 1]
        rgbFrame[:, :, 2] = buf[:, :, 2]
        '''
        rgbFrame[:, :, 0] = buf[:, :, 2]
        rgbFrame[:, :, 1] = buf[:, :, 1]
        rgbFrame[:, :, 2] = buf[:, :, 0]
        '''
        #rgbFrame = cv2.resize(rgbFrame, (320, 240))
        #print (rgbFrame)
        #alignedFace = cv2.cvtColor(rgbFrame, cv2.COLOR_RGB2GRAY)
        fcoord = fdu.getFaceCoordinates(rgbFrame)
        print(fcoord)
        if fcoord is None:
            return
        alignedFace = fdu.preprocess(rgbFrame, fcoord, (64, 64))
        cv2.imwrite("test.png", alignedFace)
        alignedFace = np.expand_dims(alignedFace, axis=-1)
        print(alignedFace.shape)
        '''
        bb = align.getLargestFaceBoundingBox(rgbFrame)
        bbs = [bb] if bb is not None else []
        for bb in bbs:
            # print(len(bbs))
            landmarks = align.findLandmarks(rgbFrame, bb)
            alignedFace = align.align(args.imgDim, rgbFrame, bb,
                                      landmarks=landmarks,
                                      landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        alignedFace = cv2.cvtColor(alignedFace, cv2.COLOR_RGB2GRAY)
        alignedFace = cv2.resize(alignedFace, (48, 48))
        cv2.imwrite("test.png", alignedFace)
        alignedFace = np.expand_dims(alignedFace, axis=0)
        #print (alignedFace.shape)
        '''
        if alignedFace is None:
            return

        self.certifying_buf.append(alignedFace / 255.0)

        if (len(self.certifying_buf) >= 15):
            frame_to_be_tested = np.asarray(self.certifying_buf)
            result = self.emo_model.predict(frame_to_be_tested)
            print(result)
            print(result.sum(0))
            emotion_type = np.argmax(result.sum(0))

            if emotion_type == 3 and identity != -1:
                # send success signal
                print("successful")
                msg = {"type": "CERTIFIED_SUCCESS"}
            elif identity == -1:
                msg = {"type": "CERTIFIED_FAIL", "val": 1}
            else:
                msg = {"type": "CERTIFIED_FAIL", "val": 0}

            self.sendMessage(json.dumps(msg))
            # empty buffer
            self.certifying_buf = []
            # reset certifying signal
            self.certifying = False