示例#1
0
counter = 0
while True:
    counter += 1
    _, image = cam.read()
    if counter <= 10:  # 放弃前10帧
        continue
    image = cv2.flip(image, 1)

    if error == 1:
        end_time = time.time()
        difference = end_time - start_time
        print(difference)
        if difference >= limit_time:
            error = 0

    face_location_list = faceutil.get_face_location(image)
    for (left, top, right, bottom) in face_location_list:
        cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2)

    cv2.imshow('Collecting Faces', image)  # show the image
    # Press 'ESC' for exiting video
    k = cv2.waitKey(100) & 0xff
    if k == 27:
        break

    face_count = len(face_location_list)
    if error == 0 and face_count == 0:  # 没有检测到人脸
        print('[WARNING] 没有检测到人脸')
        audioplayer.play_audio(os.path.join(audio_dir, 'no_face_detected.mp3'))
        error = 1
        start_time = time.time()
示例#2
0
    # grab the current frame
    (grabbed, frame) = camera.read()

    # if we are viewing a video and we did not grab a frame, then we
    # have reached the end of the video
    if input_video and not grabbed:
        break

    if not input_video:
        frame = cv2.flip(frame, 1)

    # resize the frame, convert it to grayscale, and then clone the
    # original frame so we can draw on it later in the program
    frame = imutils.resize(frame, width=600)

    face_location_list = faceutil.get_face_location(frame)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # loop over the face bounding boxes
    for (left, top, right, bottom) in face_location_list:
        # extract the ROI of the face from the grayscale image,
        # resize it to a fixed 28x28 pixels, and then prepare the
        # ROI for classification via the CNN
        roi = gray[top:bottom, left:right]
        roi = cv2.resize(
            roi,
            (FACIAL_EXPRESSION_TARGET_WIDTH, FACIAL_EXPRESSION_TARGET_HEIGHT))
        roi = roi.astype("float") / 255.0
        roi = img_to_array(roi)
        roi = np.expand_dims(roi, axis=0)
示例#3
0
文件: SmileDetect.py 项目: Yimyl/Care
        prediction = sess.run(logits, feed_dict={x: image_arr})
        max_index = np.argmax(prediction)
        print('预测的标签为:', max_index, lists[max_index])
        print('预测的结果为:', prediction)
        return lists[max_index]


if __name__ == '__main__':
    import time
    from oldcare.facial import FaceUtil
    imgs = os.listdir(img_dir)
    for img in imgs:
        print(img)
        image = cv2.imread(img_dir + '/' + img)
        faceutil = FaceUtil()
        face_location_list = faceutil.get_face_location(image)
        # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # print(gray)
        #
        # faceCascade = cv2.CascadeClassifier(facePath)
        # faces = faceCascade.detectMultiScale(
        #     image,
        #     scaleFactor=1.1,
        #     minNeighbors=8,
        #     minSize=(55, 55),
        #     flags=cv2.CASCADE_SCALE_IMAGE
        # )
        print(len(face_location_list))
        for (x, y, w, h) in face_location_list:
            face = image[y:h, x:w]
            face = cv2.resize(face, (48, 48))