コード例 #1
0
def test_registry():
    Knob.clear_registry()
    assert Knob.get_knob_defaults() == ''

    k1 = Knob('K1', 'First knob', description='Omi')
    k2 = Knob('K2', 'Second knob', description='Padre')
    k1 = Knob('K3', 'Third knob', description='Magnifici')

    print(Knob.get_knob_defaults())
    print(repr(Knob.get_knob_defaults()))

    envout = '# Omi\n# K1=First knob\n\n# Padre\n# K2=Second knob\n\n# Magnifici\n# K3=Third knob\n'
    assert Knob.get_knob_defaults() == envout
コード例 #2
0
ファイル: cli.py プロジェクト: sthysel/dedrowse
def cli(
    shape_predictor,
    blink_ratio,
    trigger,
    set_alarm,
    alarm_sound,
    alert_msg,
    webcam,
    frame_width,
    print_knobs,
):
    """ Dedrowse drowsines detector """

    if print_knobs:
        print(Knob.get_knob_defaults())
        sys.exit(1)

    alarmer = AlarmDetector(blink_ratio, trigger=trigger, alert_message=alert_msg)

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    click.echo('Loading facial landmark predictor...')
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(shape_predictor)

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    l_start, l_end = face_utils.FACIAL_LANDMARKS_IDXS['left_eye']
    r_start, r_end = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']

    # start the video stream thread
    click.echo('Starting video stream thread')
    vs = VideoStream(webcam).start()

    # loop over frames from the video stream
    while True:
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        if frame is None:
            click.echo('No frame')
            time.sleep(1)
            continue

        frame = imutils.resize(frame, width=frame_width)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        faces = detector(gray, 0)

        # loop over the face detections
        for face in faces:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, face)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            left_eye = shape[l_start:l_end]
            right_eye = shape[r_start:r_end]
            left_ear = eye_aspect_ratio(left_eye)
            right_ear = eye_aspect_ratio(right_eye)

            # average the eye aspect ratio together for both eyes
            ear = (left_ear + right_ear) / 2.0
            alarmer.check(ear, frame)

            draw_eyes(ear, frame, left_eye, right_eye)

        # show the frame
        cv2.imshow('Dedrowser is looking out for you', frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()