Пример #1
0
    def test_inference_state(self):
        with InferenceEngine() as engine:
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)

            model_name = engine.load_model(fd.model())
            state = engine.get_inference_state()
            self.assertEqual(set(state.loaded_models), {model_name})
            self.assertFalse(state.processing_models)

            engine.reset()
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)

            model_name = engine.load_model(fd.model())

            with PiCamera(sensor_mode=4):
                engine.start_camera_inference(model_name)
                state = engine.get_inference_state()
                self.assertEqual(set(state.loaded_models), {model_name})
                self.assertEqual(set(state.processing_models), {model_name})

                engine.reset()
                state = engine.get_inference_state()
                self.assertFalse(state.loaded_models)
                self.assertFalse(state.processing_models)
Пример #2
0
    def test_inference_state(self):
        with InferenceEngine() as engine:
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)

            model_name = engine.load_model(fd.model())
            state = engine.get_inference_state()
            self.assertEqual(set(state.loaded_models), {model_name})
            self.assertFalse(state.processing_models)

            engine.reset()
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)

            model_name = engine.load_model(fd.model())

            with PiCamera(sensor_mode=4):
                engine.start_camera_inference(model_name)
                state = engine.get_inference_state()
                self.assertEqual(set(state.loaded_models), {model_name})
                self.assertEqual(set(state.processing_models), {model_name})

                engine.reset()
                state = engine.get_inference_state()
                self.assertFalse(state.loaded_models)
                self.assertFalse(state.processing_models)
Пример #3
0
def detect(num_frames):
    """Face detection camera inference example"""

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performance.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(num_frames):
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    x, y, width, height = face.bounding_box
                    annotator.bounding_box(
                        (scale_x * x, scale_y * y, scale_x *
                         (x + width), scale_y * (y + height)),
                        fill=0)
                annotator.update()

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                      (inference.count, inference.rate, len(faces),
                       avg_joy_score(faces)))

        camera.stop_preview()
Пример #4
0
 def run(self):
     while self._running:
         try:
             self.logger.debug('loading inference model ...')
             with CameraInference(face_detection.model()) as inference:
                 self.logger.debug('running inference ...')
                 for result in inference.run():
                     faces = face_detection.get_faces(result)
                     if faces:
                         self.logger.debug('found {} faces'.format(
                             len(faces)))
                     outgoing_signals = []
                     for face in faces:
                         signal_dict = {
                             'bounding_box': face.bounding_box,
                             'face_score': face.face_score,
                             'joy_score': face.joy_score,
                         }
                         outgoing_signal = Signal(signal_dict)
                         outgoing_signals.append(outgoing_signal)
                     if not self._running:
                         break
                     self.notify_signals(outgoing_signals)
         except:
             self.logger.exception('failed to get inference result!')
             self.reset_camera()
     self.release_camera()
Пример #5
0
def facedetect():
    with PiCamera() as camera, Leds() as leds:
        # Configure camera
        camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
        camera.start_preview()
        leds.update(Leds.privacy_on())

        # Do inference on VisionBonnet
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    camera.capture(
                        'faces_' + str(datetime.datetime.now()) + '.jpg')
                    # print(device.is_active)
                    print(led.is_active)
                    # device.on()
                    # bz.on()
                    led.on()
                    print(led.is_active)
                    # time.sleep(1)
                    # print(device.is_active)
                    led.off()
                    print(led.is_active)
                    break

        # Stop preview
        camera.stop_preview()
        leds.update(Leds.privacy_on())
Пример #6
0
def run_inference(num_frames, on_loaded):
    """Yields (faces, (frame_width, frame_height)) tuples."""
    with CameraInference(face_detection.model()) as inference:
        on_loaded()
        for result in inference.run(num_frames):
            yield face_detection.get_faces(result), (result.width,
                                                     result.height)
Пример #7
0
 def model_selector(argument):
     options = {
         "object": object_detection.model(),
         "face": face_detection.model(),
         "class": image_classification.model()
     }
     return options.get(argument, "nothing")
Пример #8
0
def detect_face():
    with CameraInference(face_detection.model()) as camera_inference:
        counter = 1
        x_history, y_history, w_history, h_history = [], [], [], []
        for result in camera_inference.run():
            check_termination_trigger()
            faces = face_detection.get_faces(result)
            face = select_face(faces)
            if face:
                x, y, w, h = face.bounding_box
                x_err = error_update(x_history, x)
                y_err = error_update(y_history, y)
                w_err = error_update(w_history, w)
                h_err = error_update(h_history, h)

                if face_detection_is_stable(x_err,
                                            y_err,
                                            w_err,
                                            h_err,
                                            cutoff=0.03):
                    face_box = (int(sum(x_history) / len(x_history)),
                                int(sum(y_history) / len(y_history)),
                                int(sum(w_history) / len(w_history)),
                                int(sum(h_history) / len(h_history)))
                    break
                counter += 1
        return face_box
Пример #9
0
    def _run_detector(self):
        with PiCamera() as camera, PrivacyLED():
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            camera.sensor_mode = 4
            camera.resolution = (1640, 1232)
            camera.framerate = 15
            # Blend the preview layer with the alpha value from the flags.
            camera.start_preview(alpha=self._preview_alpha)
            with CameraInference(face_detection.model()) as inference:
                self._play_sound(MODEL_LOAD_SOUND)
                self._animator.start()
                for i, result in enumerate(inference.run()):
                    faces = face_detection.get_faces(result)
                    # Calculate joy score as an average for all detected faces.
                    joy_score = 0.0
                    if faces:
                        joy_score = sum([face.joy_score for face in faces]) / len(faces)

                    # Append new joy score to the window and calculate mean value.
                    self._joy_score_window.append(joy_score)
                    self.joy_score = sum(self._joy_score_window) / len(
                        self._joy_score_window)
                    if self._num_frames == i or not self._run_event.is_set():
                        break
Пример #10
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')
        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4,
                          resolution=(1640, 1232)) as camera, PrivacyLed(leds):

                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                # Blend the preview layer with the alpha value from the flags.
                if preview_alpha > 0:
                    logger.info('Starting preview with alpha %d',
                                preview_alpha)
                    camera.start_preview(alpha=preview_alpha)
                else:
                    logger.info('Not starting preview, alpha 0')

                button = Button(23)
                button.when_pressed = take_photo

                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(inference.run()):
                        faces = face_detection.get_faces(result)
                        photographer.update_faces(faces)

                        joy_score = joy_score_moving_average.next(
                            average_joy_score(faces))
                        animator.update_joy_score(joy_score)

                        if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                            player.play(JOY_SOUND)
                        elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                            player.play(SAD_SOUND)

                        prev_joy_score = joy_score

                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                if i == args.num_frames:
                    break
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                annotator.update()
                print('Iteration #%d: num_faces=%d' % (i, len(faces)))
                #write to file
                F = open('../wwww/check_faces.txt', 'w')
                F.write(len(faces))
                F.close

        camera.stop_preview()
Пример #12
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    leds = Leds()
    leds.reset()
    leds.update(Leds.privacy_on())

    noCustomerDelay = 0;


    with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera:
    # with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
    # with PiCamera() as camera:
        camera.start_preview()

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    noCustomerDelay = 0
                    leds.update(Leds.rgb_on(GREEN))
                    # stream = io.BytesIO()
                    # camera.capture(stream, format='jpeg')
                    # stream.seek(0)
                    camera.capture('faces.jpg')

                    faces = GetFaceId('faces.jpg')
                    print(faces)
                    if(len(faces) > 0):
                        result = GetUserId(faces[0])
                        print(result)

                        highestScore = 0
                        userId = ""
                        for face in result:
                            for candidate in face['candidates']:
                                if(highestScore < candidate['confidence']):
                                    userId = candidate['personId']


                        InfoVendingMachine("10", userId)

                        print(userId)
                    # break
                else:
                    if noCustomerDelay >= 30:
                        leds.update(Leds.rgb_on(WHITE))
                        InfoVendingMachine("10", '')
                        noCustomerDelay = 0;
                    else:
                        noCustomerDelay += 1;


        camera.stop_preview()

    leds.reset()
 def test_face_detection(self):
     avg_end_to_end, avg_bonnet = self.benchmarkModel(
         face_detection.model(), face_detection.get_faces)
     # Latency depends on number of faces in the scene, which is
     # unpredictable when the test runs. Setting a larger variation value
     # here to accommodate this.
     self.assertLatency(avg_bonnet, 76.0, variation=0.6)
     self.assertLatency(avg_end_to_end, 91.0, variation=0.6)
Пример #14
0
def _initialize_inference() -> CameraInference:
    '''
    One time, the process died without stopping inference, which
    made it impossible to start again. So if we get InferenceException
    when trying to initialize CameraInference, we retry after resetting
    the InferenceEngine.
    '''
    return CameraInference(face_detection.model())
Пример #15
0
 def test_face_detection(self):
     avg_end_to_end, avg_bonnet = self.benchmarkModel(
         face_detection.model(), face_detection.get_faces)
     # Latency depends on number of faces in the scene, which is
     # unpredictable when the test runs. Setting a larger variation value
     # here to accommodate this.
     self.assertLatency(avg_bonnet, 76.0, variation=0.6)
     self.assertLatency(avg_end_to_end, 91.0, variation=0.6)
Пример #16
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # pool = [ImageProcessor() for i in range(4)]

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        camera.start_preview()
        # time.sleep(2)

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        # annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            # camera.capture_sequence(streams(), use_video_port=True)
            # print("after capture_sequence")
            print(args.num_frames)
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                # annotator.clear()
                # for face in faces:
                #     annotator.bounding_box(transform(face.bounding_box), fill=0)
                # annotator.update()
                if len(faces) > 0:
                    # start to identify the person
                    print("Has Customer")
                    hasCustomer = True
                else:
                    print("No Customer")
                    hasCustomer = False

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                      (inference.count, inference.rate, len(faces),
                       avg_joy_score(faces)))

        camera.stop_preview()
Пример #17
0
    def test_camera_inference(self):
        with PiCamera(sensor_mode=4):
            with CameraInference(fd.model()) as inference:
                state = inference.engine.get_inference_state()
                self.assertEqual(len(state.loaded_models), 1)
                self.assertEqual(len(state.processing_models), 1)

                results = [fd.get_faces(result) for result in inference.run(10)]
                self.assertEqual(len(results), 10)
    def run(self, num_frames, preview_alpha, image_format, image_folder, enable_streaming):
        logger.info('Starting...')
        leds = Leds()

        with contextlib.ExitStack() as stack:
            player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))
            photographer = stack.enter_context(Photographer(image_format, image_folder))
            animator = stack.enter_context(Animator(leds))
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            # Use half of that for video streaming (820x616).
            camera = stack.enter_context(PiCamera(sensor_mode=4, resolution=(820, 616)))
            stack.enter_context(PrivacyLed(leds))

            server = None
            if enable_streaming:
                server = stack.enter_context(StreamingServer(camera))
                server.run()

            def take_photo():
                logger.info('Button pressed.')
                player.play(BEEP_SOUND)
                photographer.shoot(camera)

            if preview_alpha > 0:
                camera.start_preview(alpha=preview_alpha)

            button = Button(BUTTON_GPIO)
            button.when_pressed = take_photo

            joy_score_moving_average = MovingAverage(10)
            prev_joy_score = 0.0
            with CameraInference(face_detection.model()) as inference:
                logger.info('Model loaded.')
                player.play(MODEL_LOAD_SOUND)
                for i, result in enumerate(inference.run()):
                    faces = face_detection.get_faces(result)
                    photographer.update_faces(faces)

                    joy_score = joy_score_moving_average.next(average_joy_score(faces))
                    animator.update_joy_score(joy_score)
                    if server:
                        data = server_inference_data(result.width, result.height, faces, joy_score)
                        server.send_inference_data(data)

                    if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                        player.play(JOY_SOUND)
                    elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                        player.play(SAD_SOUND)

                    prev_joy_score = joy_score

                    if self._done.is_set() or i == num_frames:
                        break
Пример #19
0
    def test_camera_inference(self):
        with PiCamera(sensor_mode=4):
            with CameraInference(fd.model()) as inference:
                state = inference.engine.get_inference_state()
                self.assertEqual(len(state.loaded_models), 1)
                self.assertEqual(len(state.processing_models), 1)

                results = [
                    fd.get_faces(result) for result in inference.run(10)
                ]
                self.assertEqual(len(results), 10)
Пример #20
0
    def testFaceDetectionWithParams(self):
        with TestImage('faces.jpg') as image:
            with ImageInference(face_detection.model()) as inference:
                params = {'max_face_size': 500}
                faces = face_detection.get_faces(inference.run(image, params))
                self.assertEqual(1, len(faces))

                face0 = faces[0]
                self.assertAlmostEqual(face0.face_score, 0.884, delta=0.001)
                self.assertAlmostEqual(face0.joy_score, 0.073, delta=0.001)
                self.assertEqual((748.0, 1063.0, 496.0, 496.0), face0.bounding_box)
Пример #21
0
def main():
    with PiCamera() as camera:
        camera.resolution = (1640, 922)
        camera.start_preview()

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    camera.capture('faces.jpg')
                    break

        camera.stop_preview()
Пример #22
0
    def _run(self):
        logger.info('Starting...')
        leds = Leds()

        with contextlib.ExitStack() as stack:
            player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))
            photographer = stack.enter_context(
                Photographer(self.args.image_format, self.args.image_folder))
            animator = stack.enter_context(Animator(leds))
            stack.enter_context(PrivacyLed(leds))

            server = None
            if self.args.enable_streaming:
                server = stack.enter_context(StreamingServer(self.camera))
                server.run()

            def take_photo():
                logger.info('Button pressed.')
                player.play(BEEP_SOUND)
                photographer.shoot(self.camera)

            button = Button(BUTTON_GPIO)
            button.when_pressed = take_photo

            joy_score_moving_average = MovingAverage(10)
            prev_joy_score = 0.0
            with CameraInference(face_detection.model()) as inference:
                logger.info('Model loaded.')
                player.play(MODEL_LOAD_SOUND)
                for i, result in enumerate(inference.run()):
                    faces = face_detection.get_faces(result)
                    photographer.update_faces(faces)
                    avg_joy_score = average_joy_score(faces)
                    joy_score = joy_score_moving_average.next(avg_joy_score)
                    animator.update_joy_score(joy_score)
                    if server:
                        data = server_inference_data(result.width,
                                                     result.height, faces,
                                                     joy_score)
                        server.send_inference_data(data)
                    if avg_joy_score > JOY_SCORE_MIN:
                        photographer.shoot(self.camera)


#                    if joy_score > JOY_SCORE_PEAK > prev_joy_score:
#                       player.play(JOY_SOUND)
#                   elif joy_score < JOY_SCORE_MIN < prev_joy_score:
#                       player.play(SAD_SOUND)

                    prev_joy_score = joy_score

                    if self._done.is_set() or i == self.args.num_frames:
                        break
Пример #23
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                print(faces)
                annotator.clear()
                for index, face in enumerate(faces):

                    sio.emit('movement', {
                        'index': index,
                        'score': face.face_score
                    })

                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                annotator.update()

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                      (inference.count, inference.rate, len(faces),
                       avg_joy_score(faces)))

        camera.stop_preview()
Пример #24
0
def main():
    parser = argparse.ArgumentParser('Face detection using raspivid.')
    parser.add_argument('--num_frames', '-n', type=int, default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with Process(raspivid_cmd(sensor_mode=4)), \
         CameraInference(face_detection.model()) as inference:
        for result in inference.run(args.num_frames):
            faces = face_detection.get_faces(result)
            print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                (inference.count, inference.rate, len(faces), avg_joy_score(faces)))
Пример #25
0
def main():
    with PiCamera() as camera:
        # Configure camera
        camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
        camera.start_preview()

        # Do inference on VisionBonnet
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    camera.capture('faces.jpg')
                    break

        # Stop preview
        camera.stop_preview()
Пример #26
0
    def testFaceDetection(self):
        with TestImage('faces.jpg') as image:
            with ImageInference(face_detection.model()) as inference:
                faces = face_detection.get_faces(inference.run(image))
                self.assertEqual(2, len(faces))

                face0 = faces[0]
                self.assertAlmostEqual(face0.face_score, 1.0, delta=0.001)
                self.assertAlmostEqual(face0.joy_score, 0.969, delta=0.001)
                self.assertEqual((812.0, 44.0, 1000.0, 1000.0), face0.bounding_box)

                face1 = faces[1]
                self.assertAlmostEqual(face1.face_score, 0.884, delta=0.001)
                self.assertAlmostEqual(face1.joy_score, 0.073, delta=0.001)
                self.assertEqual((748.0, 1063.0, 496.0, 496.0), face1.bounding_box)
Пример #27
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--input', '-i', dest='input', required=True)
  parser.add_argument('--output', '-o', dest='output')
  args = parser.parse_args()

  with ImageInference(face_detection.model()) as inference:
    image = Image.open(args.input)
    draw = ImageDraw.Draw(image)
    for i, face in enumerate(face_detection.get_faces(inference.run(image))):
      print('Face #%d: %s' % (i, str(face)))
      x, y, width, height = face.bounding_box
      draw.rectangle((x, y, x + width, y + height), outline='red')
    if args.output:
      image.save(args.output)
Пример #28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', '-i', dest='input', required=True)
    parser.add_argument('--output', '-o', dest='output')
    args = parser.parse_args()

    with ImageInference(face_detection.model()) as inference:
        image = Image.open(args.input)
        draw = ImageDraw.Draw(image)
        faces = face_detection.get_faces(inference.run(image))
        for i, face in enumerate(faces):
            print('Face #%d: %s' % (i, face))
            x, y, width, height = face.bounding_box
            draw.rectangle((x, y, x + width, y + height), outline='red')
        if args.output:
            image.save(args.output)
Пример #29
0
def main():
    parser = argparse.ArgumentParser('Face detection using raspivid.')
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with Process(raspivid_cmd(sensor_mode=4)), \
         CameraInference(face_detection.model()) as inference:
        for result in inference.run(args.num_frames):
            faces = face_detection.get_faces(result)
            print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                  (inference.count, inference.rate, len(faces),
                   avg_joy_score(faces)))
Пример #30
0
def facedetector():
    """Face detection camera inference example."""
    global currentState
    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (width),
                    scale_y * (height))

        def checkSquat(bounding_box):

            x, y, width, height = bounding_box
            # calc average y position
            avgHeight = y + height / 2
            #print("AvgHeight: ", avgHeight)
            if avgHeight > THRESHOLD_Y:
                return 2
            else:
                return 1

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(None):
                faces = face_detection.get_faces(result)
                checkedFaces = []
                for face in faces:
                    checkedFaces.append(
                        checkSquat(transform(face.bounding_box)))

                if len(checkedFaces) == 0:
                    currentState = 1
                elif (2 in checkedFaces):
                    currentState = 2
                else:
                    currentState = 0
Пример #31
0
    def test_load_unload(self):
        with InferenceEngine() as engine:
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)

            model_name = engine.load_model(fd.model())
            state = engine.get_inference_state()
            self.assertEqual(set(state.loaded_models), {model_name})
            self.assertFalse(state.processing_models)

            with self.assertRaises(InferenceException):
                engine.unload_model('invalid_model_name')

            engine.unload_model(model_name)
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)
Пример #32
0
    def test_load_unload(self):
        with InferenceEngine() as engine:
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)

            model_name = engine.load_model(fd.model())
            state = engine.get_inference_state()
            self.assertEqual(set(state.loaded_models), {model_name})
            self.assertFalse(state.processing_models)

            with self.assertRaises(InferenceException):
                engine.unload_model('invalid_model_name')

            engine.unload_model(model_name)
            state = engine.get_inference_state()
            self.assertFalse(state.loaded_models)
            self.assertFalse(state.processing_models)
Пример #33
0
def main():
    with PiCamera(resolution=(1640, 922)) as camera:
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    print("face detected!")
                    h264_file_path = generate_filename(datetime.datetime.now())

                    leds = Leds()
                    with PrivacyLed(leds):
                        camera.start_recording(h264_file_path, format='h264')
                        sleep(5)
                        camera.stop_recording()
                    leds.reset()

                    output_file_path = h264_to_mp4(h264_file_path)

                    upload_video_to_slack(output_file_path, SLACK_TOKEN,
                                          SLACK_CHANNEL_ID)
Пример #34
0
def main():
    global lastSaveTime
    with PiCamera() as camera:
        # Configure camera
        camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
        camera.start_preview()

        # Do inference on VisionBonnet
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(
                        result)) >= 1 and time.time() - lastSaveTime > 60:
                    print('yay, we got an image')
                    camera.capture('image.jpg')
                    discordcode.send_image('image.jpg')
                    lastSaveTime = time.time()

        # Stop preview
        camera.stop_preview()
Пример #35
0
def main():
    print("Play tune")
    player = TonePlayer(gpio=BUZZER_GPIO_PIN, bpm=10)
    player.play(*START_SOUND)

    print("Initialize robot")
    robot = Robot()
    robot.resetPosition()

    print("Switch on leds")
    with Leds() as leds:
        leds.update(Leds.rgb_on(Color.GREEN))

        print("Switch on camera")
        with PiCamera(sensor_mode=4,
                      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                      framerate=30) as camera:

            if ENABLE_DISPLAY:
                camera.start_preview()
                annotator = Annotator(camera, dimensions=(320, 240))
            else:
                annotator = None

            print("Load model")
            with CameraInference(face_detection.model()) as inference:

                loop(inference=inference,
                     robot=robot,
                     annotator=annotator,
                     leds=leds)

            if ENABLE_DISPLAY:
                camera.stop_preview()

    player.play(*STOP_SOUND)

    # Give time for the user to remote its finger.
    sleep(3)
    robot.resetPosition()
Пример #36
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box), fill=0)
                annotator.update()

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                    (inference.count, inference.rate, len(faces), avg_joy_score(faces)))

        camera.stop_preview()
Пример #37
0
def run_inference(num_frames, on_loaded):
    """Yields (faces, (frame_width, frame_height)) tuples."""
    with CameraInference(face_detection.model()) as inference:
        on_loaded()
        for result in inference.run(num_frames):
            yield face_detection.get_faces(result), (result.width, result.height)
Пример #38
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')
        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4,
                          resolution=(1640, 1232)) as camera, PrivacyLed(leds):

                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                # Blend the preview layer with the alpha value from the flags.
                if preview_alpha > 0:
                    logger.info('Starting preview with alpha %d',
                                preview_alpha)
                    camera.start_preview(alpha=preview_alpha)
                else:
                    logger.info('Not starting preview, alpha 0')

                button = Button(23)
                button.when_pressed = take_photo

                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                #fb.delete('/', 'joy')
                time_interval = 1
                camera_id = 1
                t_ref = time.localtime()
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(inference.run()):
                        t_begin = datetime.now()
                        faces = face_detection.get_faces(result)
                        num_faces = len(faces)
                        print("face num" + str(num_faces))
                        # for face in faces:
                        #     print(face.joy_score)
                        photographer.update_faces(faces)
                        joy_score = joy_score_moving_average.next(
                            average_joy_score(faces))
                        animator.update_joy_score(joy_score)
                        print(joy_score)
                        t0 = time.localtime()
                        time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec) + " | " + str(
                                t0.tm_mon) + "/" + str(t0.tm_mday) + "/" + str(
                                    t0.tm_year)
                        time_up = time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec)
                        elapsed_time = int(t0.tm_sec) - int(t_ref.tm_sec)

                        # print("Elapsed time: " + str(elapsed_time_milli))
                        if elapsed_time < 0:
                            elapsed_time += 60
                        if elapsed_time >= time_interval:
                            if joy_score != 0:
                                fb.put(
                                    '/joy_data/' + str(t0.tm_year) + "/" +
                                    str(t0.tm_mon) + "/" + str(t0.tm_mday) +
                                    "/" + str(t0.tm_hour) + "/" +
                                    str(t0.tm_min) + "/", time.ctime(), {
                                        'time': time_stamp,
                                        'cam_id': camera_id,
                                        'num_faces': num_faces,
                                        'joy_score': joy_score
                                    })
                            t_ref = t0
                        print(time_stamp)
                        #if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                        #    player.play(JOY_SOUND)
                        #elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                        #    player.play(SAD_SOUND)

                        prev_joy_score = joy_score
                        t_end = datetime.now()
                        ellapsed_milli = t_end - t_begin
                        print("Elapsed time: ")
                        print(int(ellapsed_milli.total_seconds() * 1000))
                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
Пример #39
0
 def test_image_inference_raw(self):
     with ImageInference(fd.model()) as inference, TestImage('faces.jpg') as image:
         fd.get_faces(inference.run(image))
Пример #40
0
 def test_image_inference_jpeg(self):
     with ImageInference(fd.model()) as inference, TestImageFile('faces.jpg') as f:
         fd.get_faces(inference.run(f.read()))