예제 #1
0
 def run(self):
     while self._running:
         try:
             self.logger.debug('loading inference model ...')
             with CameraInference(face_detection.model()) as inference:
                 self.logger.debug('running inference ...')
                 for result in inference.run():
                     faces = face_detection.get_faces(result)
                     if faces:
                         self.logger.debug('found {} faces'.format(
                             len(faces)))
                     outgoing_signals = []
                     for face in faces:
                         signal_dict = {
                             'bounding_box': face.bounding_box,
                             'face_score': face.face_score,
                             'joy_score': face.joy_score,
                         }
                         outgoing_signal = Signal(signal_dict)
                         outgoing_signals.append(outgoing_signal)
                     if not self._running:
                         break
                     self.notify_signals(outgoing_signals)
         except:
             self.logger.exception('failed to get inference result!')
             self.reset_camera()
     self.release_camera()
예제 #2
0
def facedetect():
    with PiCamera() as camera, Leds() as leds:
        # Configure camera
        camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
        camera.start_preview()
        leds.update(Leds.privacy_on())

        # Do inference on VisionBonnet
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    camera.capture(
                        'faces_' + str(datetime.datetime.now()) + '.jpg')
                    # print(device.is_active)
                    print(led.is_active)
                    # device.on()
                    # bz.on()
                    led.on()
                    print(led.is_active)
                    # time.sleep(1)
                    # print(device.is_active)
                    led.off()
                    print(led.is_active)
                    break

        # Stop preview
        camera.stop_preview()
        leds.update(Leds.privacy_on())
예제 #3
0
    def iterator(self):
        for inference_result in self._inference.run():
            aiy_faces = face_detection.get_faces(inference_result)
            if not aiy_faces:
                yield None
                continue

            # inference runs on the vision bonnet, which grabs images from the camera directly
            # we need to capture the image separately on the Raspberry in order to use dlib for face rec
            image = MyImage.capture(self._camera, use_video_port=True)

            inference_size = Size(w=inference_result.width,
                                  h=inference_result.height)
            image_size = Size(w=image.width, h=image.height)

            yield InputOutput(
                image=image,
                faces=[
                    Face(
                        image_region=_get_image_region(aiy_face.bounding_box,
                                                       inference_size,
                                                       image_size),
                        face_score=aiy_face.face_score,
                        joy_score=aiy_face.joy_score,
                        person=self._person,
                    ) for aiy_face in aiy_faces
                ],
            )
예제 #4
0
def run_inference(num_frames, on_loaded):
    """Yields (faces, (frame_width, frame_height)) tuples."""
    with CameraInference(face_detection.model()) as inference:
        on_loaded()
        for result in inference.run(num_frames):
            yield face_detection.get_faces(result), (result.width,
                                                     result.height)
예제 #5
0
def detect_face():
    with CameraInference(face_detection.model()) as camera_inference:
        counter = 1
        x_history, y_history, w_history, h_history = [], [], [], []
        for result in camera_inference.run():
            check_termination_trigger()
            faces = face_detection.get_faces(result)
            face = select_face(faces)
            if face:
                x, y, w, h = face.bounding_box
                x_err = error_update(x_history, x)
                y_err = error_update(y_history, y)
                w_err = error_update(w_history, w)
                h_err = error_update(h_history, h)

                if face_detection_is_stable(x_err,
                                            y_err,
                                            w_err,
                                            h_err,
                                            cutoff=0.03):
                    face_box = (int(sum(x_history) / len(x_history)),
                                int(sum(y_history) / len(y_history)),
                                int(sum(w_history) / len(w_history)),
                                int(sum(h_history) / len(h_history)))
                    break
                counter += 1
        return face_box
예제 #6
0
def detect(num_frames):
    """Face detection camera inference example"""

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performance.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(num_frames):
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    x, y, width, height = face.bounding_box
                    annotator.bounding_box(
                        (scale_x * x, scale_y * y, scale_x *
                         (x + width), scale_y * (y + height)),
                        fill=0)
                annotator.update()

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                      (inference.count, inference.rate, len(faces),
                       avg_joy_score(faces)))

        camera.stop_preview()
예제 #7
0
    def _run_detector(self):
        with PiCamera() as camera, PrivacyLED():
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            camera.sensor_mode = 4
            camera.resolution = (1640, 1232)
            camera.framerate = 15
            # Blend the preview layer with the alpha value from the flags.
            camera.start_preview(alpha=self._preview_alpha)
            with CameraInference(face_detection.model()) as inference:
                self._play_sound(MODEL_LOAD_SOUND)
                self._animator.start()
                for i, result in enumerate(inference.run()):
                    faces = face_detection.get_faces(result)
                    # Calculate joy score as an average for all detected faces.
                    joy_score = 0.0
                    if faces:
                        joy_score = sum([face.joy_score for face in faces]) / len(faces)

                    # Append new joy score to the window and calculate mean value.
                    self._joy_score_window.append(joy_score)
                    self.joy_score = sum(self._joy_score_window) / len(
                        self._joy_score_window)
                    if self._num_frames == i or not self._run_event.is_set():
                        break
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                if i == args.num_frames:
                    break
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                annotator.update()
                print('Iteration #%d: num_faces=%d' % (i, len(faces)))
                #write to file
                F = open('../wwww/check_faces.txt', 'w')
                F.write(len(faces))
                F.close

        camera.stop_preview()
예제 #9
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')
        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4,
                          resolution=(1640, 1232)) as camera, PrivacyLed(leds):

                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                # Blend the preview layer with the alpha value from the flags.
                if preview_alpha > 0:
                    logger.info('Starting preview with alpha %d',
                                preview_alpha)
                    camera.start_preview(alpha=preview_alpha)
                else:
                    logger.info('Not starting preview, alpha 0')

                button = Button(23)
                button.when_pressed = take_photo

                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(inference.run()):
                        faces = face_detection.get_faces(result)
                        photographer.update_faces(faces)

                        joy_score = joy_score_moving_average.next(
                            average_joy_score(faces))
                        animator.update_joy_score(joy_score)

                        if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                            player.play(JOY_SOUND)
                        elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                            player.play(SAD_SOUND)

                        prev_joy_score = joy_score

                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
예제 #10
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    leds = Leds()
    leds.reset()
    leds.update(Leds.privacy_on())

    noCustomerDelay = 0;


    with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera:
    # with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
    # with PiCamera() as camera:
        camera.start_preview()

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    noCustomerDelay = 0
                    leds.update(Leds.rgb_on(GREEN))
                    # stream = io.BytesIO()
                    # camera.capture(stream, format='jpeg')
                    # stream.seek(0)
                    camera.capture('faces.jpg')

                    faces = GetFaceId('faces.jpg')
                    print(faces)
                    if(len(faces) > 0):
                        result = GetUserId(faces[0])
                        print(result)

                        highestScore = 0
                        userId = ""
                        for face in result:
                            for candidate in face['candidates']:
                                if(highestScore < candidate['confidence']):
                                    userId = candidate['personId']


                        InfoVendingMachine("10", userId)

                        print(userId)
                    # break
                else:
                    if noCustomerDelay >= 30:
                        leds.update(Leds.rgb_on(WHITE))
                        InfoVendingMachine("10", '')
                        noCustomerDelay = 0;
                    else:
                        noCustomerDelay += 1;


        camera.stop_preview()

    leds.reset()
예제 #11
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # pool = [ImageProcessor() for i in range(4)]

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        camera.start_preview()
        # time.sleep(2)

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        # annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            # camera.capture_sequence(streams(), use_video_port=True)
            # print("after capture_sequence")
            print(args.num_frames)
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                # annotator.clear()
                # for face in faces:
                #     annotator.bounding_box(transform(face.bounding_box), fill=0)
                # annotator.update()
                if len(faces) > 0:
                    # start to identify the person
                    print("Has Customer")
                    hasCustomer = True
                else:
                    print("No Customer")
                    hasCustomer = False

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                      (inference.count, inference.rate, len(faces),
                       avg_joy_score(faces)))

        camera.stop_preview()
예제 #12
0
    def test_camera_inference(self):
        with PiCamera(sensor_mode=4):
            with CameraInference(fd.model()) as inference:
                state = inference.engine.get_inference_state()
                self.assertEqual(len(state.loaded_models), 1)
                self.assertEqual(len(state.processing_models), 1)

                results = [fd.get_faces(result) for result in inference.run(10)]
                self.assertEqual(len(results), 10)
    def run(self, num_frames, preview_alpha, image_format, image_folder, enable_streaming):
        logger.info('Starting...')
        leds = Leds()

        with contextlib.ExitStack() as stack:
            player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))
            photographer = stack.enter_context(Photographer(image_format, image_folder))
            animator = stack.enter_context(Animator(leds))
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            # Use half of that for video streaming (820x616).
            camera = stack.enter_context(PiCamera(sensor_mode=4, resolution=(820, 616)))
            stack.enter_context(PrivacyLed(leds))

            server = None
            if enable_streaming:
                server = stack.enter_context(StreamingServer(camera))
                server.run()

            def take_photo():
                logger.info('Button pressed.')
                player.play(BEEP_SOUND)
                photographer.shoot(camera)

            if preview_alpha > 0:
                camera.start_preview(alpha=preview_alpha)

            button = Button(BUTTON_GPIO)
            button.when_pressed = take_photo

            joy_score_moving_average = MovingAverage(10)
            prev_joy_score = 0.0
            with CameraInference(face_detection.model()) as inference:
                logger.info('Model loaded.')
                player.play(MODEL_LOAD_SOUND)
                for i, result in enumerate(inference.run()):
                    faces = face_detection.get_faces(result)
                    photographer.update_faces(faces)

                    joy_score = joy_score_moving_average.next(average_joy_score(faces))
                    animator.update_joy_score(joy_score)
                    if server:
                        data = server_inference_data(result.width, result.height, faces, joy_score)
                        server.send_inference_data(data)

                    if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                        player.play(JOY_SOUND)
                    elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                        player.play(SAD_SOUND)

                    prev_joy_score = joy_score

                    if self._done.is_set() or i == num_frames:
                        break
예제 #14
0
    def test_camera_inference(self):
        with PiCamera(sensor_mode=4):
            with CameraInference(fd.model()) as inference:
                state = inference.engine.get_inference_state()
                self.assertEqual(len(state.loaded_models), 1)
                self.assertEqual(len(state.processing_models), 1)

                results = [
                    fd.get_faces(result) for result in inference.run(10)
                ]
                self.assertEqual(len(results), 10)
예제 #15
0
    def testFaceDetectionWithParams(self):
        with TestImage('faces.jpg') as image:
            with ImageInference(face_detection.model()) as inference:
                params = {'max_face_size': 500}
                faces = face_detection.get_faces(inference.run(image, params))
                self.assertEqual(1, len(faces))

                face0 = faces[0]
                self.assertAlmostEqual(face0.face_score, 0.884, delta=0.001)
                self.assertAlmostEqual(face0.joy_score, 0.073, delta=0.001)
                self.assertEqual((748.0, 1063.0, 496.0, 496.0), face0.bounding_box)
예제 #16
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                print(faces)
                annotator.clear()
                for index, face in enumerate(faces):

                    sio.emit('movement', {
                        'index': index,
                        'score': face.face_score
                    })

                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                annotator.update()

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                      (inference.count, inference.rate, len(faces),
                       avg_joy_score(faces)))

        camera.stop_preview()
예제 #17
0
def main():
    with PiCamera() as camera:
        camera.resolution = (1640, 922)
        camera.start_preview()

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    camera.capture('faces.jpg')
                    break

        camera.stop_preview()
예제 #18
0
def main():
    parser = argparse.ArgumentParser('Face detection using raspivid.')
    parser.add_argument('--num_frames', '-n', type=int, default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with Process(raspivid_cmd(sensor_mode=4)), \
         CameraInference(face_detection.model()) as inference:
        for result in inference.run(args.num_frames):
            faces = face_detection.get_faces(result)
            print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                (inference.count, inference.rate, len(faces), avg_joy_score(faces)))
예제 #19
0
    def _run(self):
        logger.info('Starting...')
        leds = Leds()

        with contextlib.ExitStack() as stack:
            player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))
            photographer = stack.enter_context(
                Photographer(self.args.image_format, self.args.image_folder))
            animator = stack.enter_context(Animator(leds))
            stack.enter_context(PrivacyLed(leds))

            server = None
            if self.args.enable_streaming:
                server = stack.enter_context(StreamingServer(self.camera))
                server.run()

            def take_photo():
                logger.info('Button pressed.')
                player.play(BEEP_SOUND)
                photographer.shoot(self.camera)

            button = Button(BUTTON_GPIO)
            button.when_pressed = take_photo

            joy_score_moving_average = MovingAverage(10)
            prev_joy_score = 0.0
            with CameraInference(face_detection.model()) as inference:
                logger.info('Model loaded.')
                player.play(MODEL_LOAD_SOUND)
                for i, result in enumerate(inference.run()):
                    faces = face_detection.get_faces(result)
                    photographer.update_faces(faces)
                    avg_joy_score = average_joy_score(faces)
                    joy_score = joy_score_moving_average.next(avg_joy_score)
                    animator.update_joy_score(joy_score)
                    if server:
                        data = server_inference_data(result.width,
                                                     result.height, faces,
                                                     joy_score)
                        server.send_inference_data(data)
                    if avg_joy_score > JOY_SCORE_MIN:
                        photographer.shoot(self.camera)


#                    if joy_score > JOY_SCORE_PEAK > prev_joy_score:
#                       player.play(JOY_SOUND)
#                   elif joy_score < JOY_SCORE_MIN < prev_joy_score:
#                       player.play(SAD_SOUND)

                    prev_joy_score = joy_score

                    if self._done.is_set() or i == self.args.num_frames:
                        break
예제 #20
0
    def testFaceDetection(self):
        with TestImage('faces.jpg') as image:
            with ImageInference(face_detection.model()) as inference:
                faces = face_detection.get_faces(inference.run(image))
                self.assertEqual(2, len(faces))

                face0 = faces[0]
                self.assertAlmostEqual(face0.face_score, 1.0, delta=0.001)
                self.assertAlmostEqual(face0.joy_score, 0.969, delta=0.001)
                self.assertEqual((812.0, 44.0, 1000.0, 1000.0), face0.bounding_box)

                face1 = faces[1]
                self.assertAlmostEqual(face1.face_score, 0.884, delta=0.001)
                self.assertAlmostEqual(face1.joy_score, 0.073, delta=0.001)
                self.assertEqual((748.0, 1063.0, 496.0, 496.0), face1.bounding_box)
예제 #21
0
def main():
    with PiCamera() as camera:
        # Configure camera
        camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
        camera.start_preview()

        # Do inference on VisionBonnet
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    camera.capture('faces.jpg')
                    break

        # Stop preview
        camera.stop_preview()
예제 #22
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--input', '-i', dest='input', required=True)
  parser.add_argument('--output', '-o', dest='output')
  args = parser.parse_args()

  with ImageInference(face_detection.model()) as inference:
    image = Image.open(args.input)
    draw = ImageDraw.Draw(image)
    for i, face in enumerate(face_detection.get_faces(inference.run(image))):
      print('Face #%d: %s' % (i, str(face)))
      x, y, width, height = face.bounding_box
      draw.rectangle((x, y, x + width, y + height), outline='red')
    if args.output:
      image.save(args.output)
예제 #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', '-i', dest='input', required=True)
    parser.add_argument('--output', '-o', dest='output')
    args = parser.parse_args()

    with ImageInference(face_detection.model()) as inference:
        image = Image.open(args.input)
        draw = ImageDraw.Draw(image)
        faces = face_detection.get_faces(inference.run(image))
        for i, face in enumerate(faces):
            print('Face #%d: %s' % (i, face))
            x, y, width, height = face.bounding_box
            draw.rectangle((x, y, x + width, y + height), outline='red')
        if args.output:
            image.save(args.output)
예제 #24
0
def loop(inference, robot, annotator, leds):
    for inference_result in inference.run(None):

        # Get all the faces.
        faces = face_detection.get_faces(inference_result)

        # Get the stronger face.
        stronger_face = selectFaceWithHigherScore(faces)

        if stronger_face is None:
            print("No face detected")
            continue

        # Angular difference between the face and the center of the camera.
        face_x_center = stronger_face.bounding_box[
            0] + stronger_face.bounding_box[2] / 2
        face_y_center = stronger_face.bounding_box[
            1] + stronger_face.bounding_box[3] / 2

        face_x_normalized = face_x_center / CAMERA_WIDTH
        face_y_normalized = face_y_center / CAMERA_HEIGHT

        face_x_angle = (face_x_normalized * 2 - 1) * (CAMERA_FOV_WIDTH / 2)
        # The screen and motor axis are reversed.
        face_y_angle = -(face_y_normalized * 2 - 1) * (CAMERA_FOV_HEIGHT / 2)

        print("Face delta: %s %s" % (face_x_angle, face_y_angle))
        robot.deltaYaw(controller(face_x_angle))
        robot.deltaPitch(controller(face_y_angle))

        leds.update(
            Leds.rgb_on((255 * (1 - stronger_face.joy_score),
                         255 * stronger_face.joy_score, 0)))

        if SLEEP > 0:
            sleep(SLEEP)

        if PRINT_LOGS:
            for face_idx, face in enumerate(faces):
                print("Face %i -> %s" % (face_idx, face))

        if ENABLE_DISPLAY:
            annotator.clear()
            for face in faces:
                annotator.bounding_box(transform_annotator(face.bounding_box),
                                       fill=0)
            annotator.update()
예제 #25
0
def main():
    parser = argparse.ArgumentParser('Face detection using raspivid.')
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with Process(raspivid_cmd(sensor_mode=4)), \
         CameraInference(face_detection.model()) as inference:
        for result in inference.run(args.num_frames):
            faces = face_detection.get_faces(result)
            print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                  (inference.count, inference.rate, len(faces),
                   avg_joy_score(faces)))
예제 #26
0
def facedetector():
    """Face detection camera inference example."""
    global currentState
    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (width),
                    scale_y * (height))

        def checkSquat(bounding_box):

            x, y, width, height = bounding_box
            # calc average y position
            avgHeight = y + height / 2
            #print("AvgHeight: ", avgHeight)
            if avgHeight > THRESHOLD_Y:
                return 2
            else:
                return 1

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(None):
                faces = face_detection.get_faces(result)
                checkedFaces = []
                for face in faces:
                    checkedFaces.append(
                        checkSquat(transform(face.bounding_box)))

                if len(checkedFaces) == 0:
                    currentState = 1
                elif (2 in checkedFaces):
                    currentState = 2
                else:
                    currentState = 0
예제 #27
0
def main():
    with PiCamera(resolution=(1640, 922)) as camera:
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    print("face detected!")
                    h264_file_path = generate_filename(datetime.datetime.now())

                    leds = Leds()
                    with PrivacyLed(leds):
                        camera.start_recording(h264_file_path, format='h264')
                        sleep(5)
                        camera.stop_recording()
                    leds.reset()

                    output_file_path = h264_to_mp4(h264_file_path)

                    upload_video_to_slack(output_file_path, SLACK_TOKEN,
                                          SLACK_CHANNEL_ID)
예제 #28
0
def main():
    global lastSaveTime
    with PiCamera() as camera:
        # Configure camera
        camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
        camera.start_preview()

        # Do inference on VisionBonnet
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(
                        result)) >= 1 and time.time() - lastSaveTime > 60:
                    print('yay, we got an image')
                    camera.capture('image.jpg')
                    discordcode.send_image('image.jpg')
                    lastSaveTime = time.time()

        # Stop preview
        camera.stop_preview()
예제 #29
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box), fill=0)
                annotator.update()

                print('#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                    (inference.count, inference.rate, len(faces), avg_joy_score(faces)))

        camera.stop_preview()
예제 #30
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')
        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4,
                          resolution=(1640, 1232)) as camera, PrivacyLed(leds):

                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                # Blend the preview layer with the alpha value from the flags.
                if preview_alpha > 0:
                    logger.info('Starting preview with alpha %d',
                                preview_alpha)
                    camera.start_preview(alpha=preview_alpha)
                else:
                    logger.info('Not starting preview, alpha 0')

                button = Button(23)
                button.when_pressed = take_photo

                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                #fb.delete('/', 'joy')
                time_interval = 1
                camera_id = 1
                t_ref = time.localtime()
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(inference.run()):
                        t_begin = datetime.now()
                        faces = face_detection.get_faces(result)
                        num_faces = len(faces)
                        print("face num" + str(num_faces))
                        # for face in faces:
                        #     print(face.joy_score)
                        photographer.update_faces(faces)
                        joy_score = joy_score_moving_average.next(
                            average_joy_score(faces))
                        animator.update_joy_score(joy_score)
                        print(joy_score)
                        t0 = time.localtime()
                        time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec) + " | " + str(
                                t0.tm_mon) + "/" + str(t0.tm_mday) + "/" + str(
                                    t0.tm_year)
                        time_up = time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec)
                        elapsed_time = int(t0.tm_sec) - int(t_ref.tm_sec)

                        # print("Elapsed time: " + str(elapsed_time_milli))
                        if elapsed_time < 0:
                            elapsed_time += 60
                        if elapsed_time >= time_interval:
                            if joy_score != 0:
                                fb.put(
                                    '/joy_data/' + str(t0.tm_year) + "/" +
                                    str(t0.tm_mon) + "/" + str(t0.tm_mday) +
                                    "/" + str(t0.tm_hour) + "/" +
                                    str(t0.tm_min) + "/", time.ctime(), {
                                        'time': time_stamp,
                                        'cam_id': camera_id,
                                        'num_faces': num_faces,
                                        'joy_score': joy_score
                                    })
                            t_ref = t0
                        print(time_stamp)
                        #if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                        #    player.play(JOY_SOUND)
                        #elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                        #    player.play(SAD_SOUND)

                        prev_joy_score = joy_score
                        t_end = datetime.now()
                        ellapsed_milli = t_end - t_begin
                        print("Elapsed time: ")
                        print(int(ellapsed_milli.total_seconds() * 1000))
                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--label',
        '-lbl',
        type=str,
        dest='label',
        required=True,
        help='Specifies the class (label) of training images (e.g. no_hangs).')

    parser.add_argument('--num_images',
                        '-nimg',
                        type=int,
                        dest='num_images',
                        default=10,
                        help='Sets the number of training images to make.')

    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        # Stage #1: Capture and store raw images
        # Create foler to store raw images
        path_to_raw_img_folder = path_to_training_folder + 'raw/'
        if not os.path.exists(path_to_raw_img_folder):
            os.makedirs(path_to_raw_img_folder)
        time.sleep(2)

        # Create list to store hand boxes location for each image
        hand_boxes_locations = []

        with CameraInference(face_detection.model()) as inference:
            leds.update(Leds.rgb_on(RED))
            time.sleep(3)
            counter = 1
            start = time.time()

            for result in inference.run():
                faces = face_detection.get_faces(result)
                face = select_face(faces)
                if face:
                    if counter > args.num_images:
                        break
                    face_box = transform(face.bounding_box)
                    hands = hand_box(face_box)

                    # Capture raw image
                    img_name = path_to_raw_img_folder + 'img' + str(
                        counter) + '.jpg'
                    camera.capture(img_name)
                    time.sleep(0.2)

                    # Record position of hands
                    hand_boxes_locations.append([counter, hands])

                    print('Captured ', str(counter), " out of ",
                          str(args.num_images))
                    counter += 1
            print('Stage #1: It took', str(round(time.time() - start, 1)),
                  'sec to record', str(args.num_images), 'raw images')
        camera.stop_preview()

        # Stage #2: Crop training images from the raw ones and store them in class (label) subfolder
        leds.update(Leds.rgb_on(BLUE))
        start = time.time()
        for i, entry in enumerate(hand_boxes_locations):
            img_number = entry[0]
            hands = entry[1]
            raw_img_name = path_to_raw_img_folder + 'img' + str(
                img_number) + '.jpg'
            if os.path.isfile(raw_img_name):
                raw_image = Image.open(raw_img_name)
                crop_and_store_images(args.label, hands, raw_image)
                raw_image.close()
                time.sleep(0.5)
                os.remove(raw_img_name)
            print('Processed ', str(i + 1), " out of ", str(args.num_images))
        print('Stage #2: It took ', str(round(time.time() - start, 1)),
              'sec to process', str(args.num_images), 'images')
        time.sleep(3)
        # Delete empty folder for raw images
        if os.listdir(path_to_raw_img_folder) == []:
            os.rmdir(path_to_raw_img_folder)
        leds.update(Leds.rgb_off())
예제 #32
0
 def test_image_inference_jpeg(self):
     with ImageInference(
             fd.model()) as inference, TestImageFile('faces.jpg') as f:
         fd.get_faces(inference.run(f.read()))
예제 #33
0
 def test_image_inference_raw(self):
     with ImageInference(
             fd.model()) as inference, TestImage('faces.jpg') as image:
         fd.get_faces(inference.run(image))
예제 #34
0
def main():

    #### Setup stepper motor ####
    # create a default object, no changes to I2C address or frequency
    mh = Adafruit_MotorHAT(addr=0x60)

    # recommended for auto-disabling motors on shutdown!
    def turnOffMotors():
        mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
        mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
        mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
        mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)

    atexit.register(turnOffMotors)

    mh = Adafruit_MotorHAT(addr=0x60)
    myStepper = mh.getStepper(200, 1)  # 200 steps/rev, motor port #1
    myStepper.setSpeed(30)  # 30 RPM

    #### setup camera ####
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        # Tempolary disable camera preview so that I can see the log on Terminal
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                if i == args.num_frames:
                    break
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                    # Print the (x, y) location of face
                    print('X = %d, Y = %d' %
                          (face.bounding_box[0], face.bounding_box[1]))

                    # Move stepper motor

                    if face.bounding_box[0] > 1640 / 2 and abs(
                            face.bounding_box[0] - 1640 / 2) > 200:
                        print("Double coil step - right")
                        myStepper.step(10, Adafruit_MotorHAT.FORWARD,
                                       Adafruit_MotorHAT.DOUBLE)
                    elif face.bounding_box[0] < 1640 / 2 and abs(
                            face.bounding_box[0] - 1640 / 2) > 300:
                        print("Double coil step - left")
                        myStepper.step(10, Adafruit_MotorHAT.BACKWARD,
                                       Adafruit_MotorHAT.DOUBLE)

                annotator.update()
                # print('Iteration #%d: num_faces=%d' % (i, len(faces)))

        camera.stop_preview()
예제 #35
0
 def test_image_inference_raw(self):
     with ImageInference(fd.model()) as inference, TestImage('faces.jpg') as image:
         fd.get_faces(inference.run(image))
예제 #36
0
 def test_image_inference_jpeg(self):
     with ImageInference(fd.model()) as inference, TestImageFile('faces.jpg') as f:
         fd.get_faces(inference.run(f.read()))
예제 #37
0
def run_inference(num_frames, on_loaded):
    """Yields (faces, (frame_width, frame_height)) tuples."""
    with CameraInference(face_detection.model()) as inference:
        on_loaded()
        for result in inference.run(num_frames):
            yield face_detection.get_faces(result), (result.width, result.height)