Beispiel #1
0
 def __init__(self, gpio, bpm):
     super().__init__()
     self._toneplayer = TonePlayer(gpio, bpm)
import argparse

from picamera import PiCamera
from time import time, strftime

from aiy.vision.leds import Leds
from aiy.vision.leds import PrivacyLed
from aiy.toneplayer import TonePlayer

from aiy.vision.inference import CameraInference
import aiy_cat_detection

# Sound setup
MODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w')
BEEP_SOUND = ('E6q', 'C6q')
player = TonePlayer(gpio=22, bpm=30)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-f',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')

    parser.add_argument(
        '--num_pics',
        '-p',
Beispiel #3
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera,\
                        Leds() as leds:
        leds.update(Leds.privacy_on())
        leds.update(Leds.rgb_on(Color.BLUE))
        camera.start_preview()
        tone_player = TonePlayer(BUZZER_GPIO, bpm=70)
        tone_player.play(*LOAD_SOUND)

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                    x, y, width, height = face.bounding_box

                annotator.update()

                if len(faces) >= 1:
                    print(
                        '#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f, x=%.2f, y=%.2f, width=%.2f, height=%.2f'
                        % (inference.count, inference.rate, len(faces),
                           avg_joy_score(faces), x, y, width, height))
                    if x > 0:
                        alpha = x / float(1200)
                    else:
                        alpha = .5
                    try:
                        leds.update(
                            Leds.rgb_on(
                                Color.blend(LEFT_COLOR, Color.GREEN, alpha)))
                    except:
                        pass
                    distance = focal_length * real_face_width_inches / width
                    camera.annotate_text = '%d inches' % distance
                else:
                    pass

        camera.stop_preview()