Exemplo n.º 1
0
 def _run(self):
     while not self._done.is_set():
         joy_score = self._joy_score.value
         if joy_score > 0:
             self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
         else:
             self._leds.update(Leds.rgb_off())
def capture():
    time.sleep(0.1)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    leds.update(Leds.rgb_on(GREEN))
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())
Exemplo n.º 3
0
def record_start():
    # File name setting
    now = datetime.now(timezone('Asia/Seoul'))
    y = now.strftime('%Y-%m-%d_%H-%M-%S')
    filename = y + '.wav'

    #Wav header setting
    CHUNK = 1024  # 1frame 당 1024개 buffer
    FORMAT = pyaudio.paInt16  # 16-bit signed int형 .
    CHANNELS = 1  # mono
    RATE = 16000  # 16khz
    #RECORD_SECONDS = 2
    WAVE_OUTPUT_FILENAME = filename

    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)
    #Start recording
    print("Start to record the audio.")
    frames = []
    while GPIO.input(_GPIO_BUTTON) == 0:
        leds.update(Leds.rgb_on(MAGENTA))
        time.sleep(0.05)
        data = stream.read(CHUNK)
        frames.append(data)
        # record stream
        if GPIO.input(_GPIO_BUTTON) == 1:
            leds.update(Leds.rgb_off())
            break
    # stop stream
    stream.stop_stream()
    stream.close()
    # close PyAudio
    p.terminate()
    aiy.audio.say('Thank you')

    #Save recording and close
    wo = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wo.setnchannels(CHANNELS)
    wo.setsampwidth(p.get_sample_size(FORMAT))
    wo.setframerate(RATE)
    wo.writeframes(b''.join(frames))
    wo.close()

    #File length check
    f = sf.SoundFile(filename)
    file_len = len(f) / f.samplerate
    print(file_len)
    if file_len < 1:
        print("Recording is stopped.")
        call(["rm", filename])
        main()

    print("Recording is finished.")
    leds.update(Leds.rgb_off())
    print(WAVE_OUTPUT_FILENAME)
def shutdown():
    leds.update(Leds.privacy_off())
    camera.close()
    for i in range(3):
        leds.update(Leds.rgb_on(RED))
        time.sleep(0.2)
        leds.update(Leds.rgb_off())
        time.sleep(0.2)
    check_call(['sudo', 'poweroff'])
def capture():
    #print('button pressed')
    leds.update(Leds.rgb_on(GREEN))
    time.sleep(0.5)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    print('captured {}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())
Exemplo n.º 6
0
def main():
    button.when_pressed = run
    leds.update(Leds.rgb_on(WHITE))

    try:
        while True:
            pass
    except KeyboardInterrupt:
        leds.update(Leds.rgb_off())
        pass
Exemplo n.º 7
0
 def _run(self):
     while not self._done.is_set():
         joy_score = self._joy_score.value
         if self._danger:
             self._leds.update(Leds.rgb_pattern(RED))
             self._danger = False
         elif joy_score > 0:
             self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
         else:
             self._leds.update(Leds.rgb_off())
def squirt(position):
    angle = math.degrees(math.atan((position / 2.9) * 2.5))
    bus.write_byte_data(slaveAddress, 2, angle)
    time.sleep(0.1)

    while bus.read_word_data(slaveAddress, 1) != 0:
        time.sleep(0.1)

    leds.update(Leds.rgb_on(RED))
    solenoid.on()
    time.sleep(1)
    solenoid.off()
    leds.update(Leds.rgb_pattern(RED))
Exemplo n.º 9
0
def main():

    status_ui = aiy.voicehat.get_status_ui()
    status_ui.status('starting')
    assistant = aiy.assistant.grpc.get_assistant()
    #button = aiy.voicehat.get_button()
    leds = Leds()
    with aiy.audio.get_recorder():
        while True:
            status_ui.status('ready')
            #print('Press the button and speak')
            #button.wait_for_press()
            leds.reset()
            status_ui.status('listening')
            print('Listening...')
            text, audio = assistant.recognize()
def main():

    print('Human detection')

    # Turn on the LED so we know the box is ready
    leds.pattern = Pattern.breathe(1000)
    leds.update(Leds.rgb_pattern(RED))

    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4
        # Set camera to match
        camera.resolution = (1640, 1232)
        camera.framerate = 30

        with CameraInference(object_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                for i, obj in enumerate(
                        object_detection.get_objects(result, 0.3)):
                    if obj.score > 0.7 and obj.kind == 1:  # Person
                        print('Human detected #%d: %s' % (i, str(obj)))
                        x, y, width, height = obj.bounding_box
                        squirt((x + (width / 2) - (1640 / 2)) / 1640)
Exemplo n.º 11
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')
        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4,
                          resolution=(1640, 1232)) as camera, PrivacyLed(leds):

                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                # Blend the preview layer with the alpha value from the flags.
                if preview_alpha > 0:
                    logger.info('Starting preview with alpha %d',
                                preview_alpha)
                    camera.start_preview(alpha=preview_alpha)
                else:
                    logger.info('Not starting preview, alpha 0')

                button = Button(23)
                button.when_pressed = take_photo

                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(inference.run()):
                        faces = face_detection.get_faces(result)
                        photographer.update_faces(faces)

                        joy_score = joy_score_moving_average.next(
                            average_joy_score(faces))
                        animator.update_joy_score(joy_score)

                        if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                            player.play(JOY_SOUND)
                        elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                            player.play(SAD_SOUND)

                        prev_joy_score = joy_score

                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
Exemplo n.º 12
0
def main():
    with PiCamera(resolution=(1640, 922)) as camera:
        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    print("face detected!")
                    h264_file_path = generate_filename(datetime.datetime.now())

                    leds = Leds()
                    with PrivacyLed(leds):
                        camera.start_recording(h264_file_path, format='h264')
                        sleep(5)
                        camera.stop_recording()
                    leds.reset()

                    output_file_path = h264_to_mp4(h264_file_path)

                    upload_video_to_slack(output_file_path, SLACK_TOKEN,
                                          SLACK_CHANNEL_ID)
Exemplo n.º 13
0
def run():
    if KeepWatchForSeconds(3):
        print("Going shutdown by GPIO")
        leds.update(Leds.rgb_off())
        os.system("/sbin/shutdown -h now 'Poweroff by GPIO'")

    else:
        print("Beep sound")
        toneplayer.play(*BEEP_SOUND)

        leds.update(Leds.rgb_on(RED))
        print("Taking photo")
        with picamera.PiCamera() as camera:
            camera.resolution = (640, 480)
            camera.start_preview()
            sleep(3.000)
            camera.capture(photo_filename)

        leds.update(Leds.rgb_on(GREEN))
        print("Dish classifier")
        with ImageInference(dish_classifier.model()) as inference:
            image = Image.open(photo_filename)
            classes = dish_classifier.get_classes(inference.run(image),
                                                  max_num_objects=5,
                                                  object_prob_threshold=0.1)
            dish_name = ''
            for i, (label, score) in enumerate(classes):
                dish_name += label + '/'
                print('Result %d: %s (prob=%f)' % (i, label, score))

        leds.update(Leds.rgb_on(BLUE))
        print("Post to slack")
        slack.files.upload(photo_filename,
                           channels='#food_diary',
                           title=dish_name)

        leds.update(Leds.rgb_on(WHITE))
Exemplo n.º 14
0
class MyLed:
    def __init__(self, led=(0x00, 0x00, 0x00)):
        self.logger = MyLogger(level=logging.INFO, get="LED")
        self.leds = Leds()
        self.leds.update(Leds.rgb_on(led))
        self.logger.logger.debug("Init LED drivers")

    def set_color(self, led):
        self.leds.update(Leds.rgb_on(led))
        self.logger.logger.debug("set LED colors")

    def __exit__(self):
        led = (0x00, 0x00, 0x00)
        self.leds.update(Leds.rgb_on(led))
        self.logger.logger.debug("exit LED drivers")
Exemplo n.º 15
0
def main():
    parse = argparse.ArgumentParser()
    parse.add_argument('--image_format',
                       type=str,
                       dest='image_format',
                       default='jpeg',
                       choices=('jpeg', 'bmp', 'png'),
                       help='Format of captured images.')
    parse.add_argument('--image_folder',
                       type=str,
                       dest='image_folder',
                       default='~/Pictures',
                       help='Folder to save captured images')
    args = parse.parse_args()
    print('good')

    logger.info('starting')
    leds = Leds()
    photographer = Photographer(args.image_format, args.image_folder)
    button = Button(23)
    pir = MotionSensor(PIN_A)

    try:
        with PiCamera(sensor_mode=4,
                      resolution=(1641, 1232)) as camera, PrivacyLed(leds):
            while True:

                def take_photo():
                    logger.info('button pressed')
                    print('this is camera', camera)
                    photographer.shoot(camera)

                #button = Button(23)

                pir.when_motion = take_photo
                #pir.when_motion = take_photo
                button.when_pressed = take_photo
    finally:
        photographer.stop()
        photographer.join()
Exemplo n.º 16
0
class Led_module:
    from aiy.vision.leds import Leds
    from aiy.vision.leds import Pattern
    from aiy.vision.leds import PrivacyLed
    from aiy.vision.leds import RgbLeds

    RED = (0xFF, 0x00, 0x00)
    GREEN = (0x00, 0xFF, 0x00)
    YELLOW = (0xFF, 0xFF, 0x00)
    BLUE = (0x00, 0x00, 0xFF)
    PURPLE = (0xFF, 0x00, 0xFF)
    CYAN = (0x00, 0xFF, 0xFF)
    WHITE = (0xFF, 0xFF, 0xFF)
    OFF = (0x00, 0xFF, 0xFF)

    leds = Leds()

    def set_led_red(self):
        self.leds.update(self.Leds.rgb_on(self.RED))

    def set_led_yellow(self):
        self.leds.update(self.Leds.rgb_on(self.YELLOW))

    def set_led_green(self):
        self.leds.update(self.Leds.rgb_on(self.GREEN))

    def set_led_off(self):
        self.leds.update(self.Leds.rgb_off())

    def set_privacy_on(self):
        self.leds.update(self.Leds.privacy_on())

    def set_privacy_off(self):
        self.leds.update(self.Leds.privacy_off())

    def set_led_blink(self):
        self.leds.pattern = self.Pattern.blink(500)

    def reset_led(self):
        self.leds.reset()
Exemplo n.º 17
0
from aiy.vision.leds import RgbLeds

RED = (0xFF, 0x00, 0x00)
GREEN = (0x00, 0xFF, 0x00)
YELLOW = (0xFF, 0xFF, 0x00)
BLUE = (0x00, 0x00, 0xFF)
PURPLE = (0xFF, 0x00, 0xFF)
CYAN = (0x00, 0xFF, 0xFF)
WHITE = (0xFF, 0xFF, 0xFF)


def blend(color_a, color_b, alpha):
    return tuple([math.ceil(alpha * color_a[i] + (1.0 - alpha) * color_b[i]) for i in range(3)])


leds = Leds()

print('RGB: Solid RED for 1 second')
leds.update(Leds.rgb_on(RED))
time.sleep(1)

print('RGB: Solid GREEN for 1 second')
leds.update(Leds.rgb_on(GREEN))
time.sleep(1)

print('RGB: Solid YELLOW for 1 second')
leds.update(Leds.rgb_on(YELLOW))
time.sleep(1)

print('RGB: Solid BLUE for 1 second')
leds.update(Leds.rgb_on(BLUE))
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--label',
        '-lbl',
        type=str,
        dest='label',
        required=True,
        help='Specifies the class (label) of training images (e.g. no_hangs).')

    parser.add_argument('--num_images',
                        '-nimg',
                        type=int,
                        dest='num_images',
                        default=10,
                        help='Sets the number of training images to make.')

    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        # Stage #1: Capture and store raw images
        # Create foler to store raw images
        path_to_raw_img_folder = path_to_training_folder + 'raw/'
        if not os.path.exists(path_to_raw_img_folder):
            os.makedirs(path_to_raw_img_folder)
        time.sleep(2)

        # Create list to store hand boxes location for each image
        hand_boxes_locations = []

        with CameraInference(face_detection.model()) as inference:
            leds.update(Leds.rgb_on(RED))
            time.sleep(3)
            counter = 1
            start = time.time()

            for result in inference.run():
                faces = face_detection.get_faces(result)
                face = select_face(faces)
                if face:
                    if counter > args.num_images:
                        break
                    face_box = transform(face.bounding_box)
                    hands = hand_box(face_box)

                    # Capture raw image
                    img_name = path_to_raw_img_folder + 'img' + str(
                        counter) + '.jpg'
                    camera.capture(img_name)
                    time.sleep(0.2)

                    # Record position of hands
                    hand_boxes_locations.append([counter, hands])

                    print('Captured ', str(counter), " out of ",
                          str(args.num_images))
                    counter += 1
            print('Stage #1: It took', str(round(time.time() - start, 1)),
                  'sec to record', str(args.num_images), 'raw images')
        camera.stop_preview()

        # Stage #2: Crop training images from the raw ones and store them in class (label) subfolder
        leds.update(Leds.rgb_on(BLUE))
        start = time.time()
        for i, entry in enumerate(hand_boxes_locations):
            img_number = entry[0]
            hands = entry[1]
            raw_img_name = path_to_raw_img_folder + 'img' + str(
                img_number) + '.jpg'
            if os.path.isfile(raw_img_name):
                raw_image = Image.open(raw_img_name)
                crop_and_store_images(args.label, hands, raw_image)
                raw_image.close()
                time.sleep(0.5)
                os.remove(raw_img_name)
            print('Processed ', str(i + 1), " out of ", str(args.num_images))
        print('Stage #2: It took ', str(round(time.time() - start, 1)),
              'sec to process', str(args.num_images), 'images')
        time.sleep(3)
        # Delete empty folder for raw images
        if os.listdir(path_to_raw_img_folder) == []:
            os.rmdir(path_to_raw_img_folder)
        leds.update(Leds.rgb_off())
Exemplo n.º 19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-f',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')

    parser.add_argument(
        '--num_pics',
        '-p',
        type=int,
        dest='num_pics',
        default=-1,
        help='Sets the max number of pictures to take, otherwise runs forever.'
    )

    args = parser.parse_args()

    with PiCamera() as camera, PrivacyLed(Leds()):
        # See the Raspicam documentation for mode and framerate limits:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # Set to the highest resolution possible at 16:9 aspect ratio
        camera.sensor_mode = 4
        camera.resolution = (1640, 1232)
        camera.start_preview(fullscreen=True)

        with CameraInference(pikachu_object_detection.model()) as inference:
            print("Camera inference started")
            player.play(*MODEL_LOAD_SOUND)

            last_time = time()
            pics = 0
            save_pic = False
            enable_label = True

            # Annotator renders in software so use a smaller size and scale results
            # for increased performace.
            annotator = Annotator(camera, dimensions=(320, 240))
            scale_x = 320 / 1640
            scale_y = 240 / 1232

            # Incoming boxes are of the form (x, y, width, height). Scale and
            # transform to the form (x1, y1, x2, y2).
            def transform(bounding_box):
                x, y, width, height = bounding_box
                return (scale_x * x, scale_y * y, scale_x * (x + width),
                        scale_y * (y + height))

            def leftCorner(bounding_box):
                x, y, width, height = bounding_box
                return (scale_x * x, scale_y * y)

            def truncateFloat(value):
                return '%.3f' % (value)

            for f, result in enumerate(inference.run()):
                print("sono dentro al ciclo..")
                print(os.getcwd() + '/pikachu_detector.binaryproto')
                annotator.clear()
                detections = enumerate(
                    pikachu_object_detection.get_objects(result, 0.3))
                for i, obj in detections:
                    print("sono dentro al secondo ciclo..")
                    print('%s', obj.label)
                    annotator.bounding_box(transform(obj.bounding_box), fill=0)
                    if enable_label:
                        annotator.text(
                            leftCorner(obj.bounding_box),
                            obj.label + " - " + str(truncateFloat(obj.score)))
                    print('%s Object #%d: %s' %
                          (strftime("%Y-%m-%d-%H:%M:%S"), i, str(obj)))
                    x, y, width, height = obj.bounding_box
                    if obj.label == 'PIKACHU':
                        save_pic = True
                        #player.play(*BEEP_SOUND)

                # save the image if there was 1 or more cats detected

                if save_pic:
                    # save the clean image
                    #camera.capture("images/image_%s.jpg" % strftime("%Y%m%d-%H%M%S"))
                    pics += 1
                    save_pic = False

                #if f == args.num_frames or pics == args.num_pics:
                #    break

                now = time()
                duration = (now - last_time)
                annotator.update()

                # The Movidius chip runs at 35 ms per image.
                # Then there is some additional overhead for the object detector to
                # interpret the result and to save the image. If total process time is
                # running slower than 50 ms it could be a sign the CPU is geting overrun
                #if duration > 0.50:
                #    print("Total process time: %s seconds. Bonnet inference time: %s ms " %
                #          (duration, result.duration_ms))

                last_time = now

        camera.stop_preview()
Exemplo n.º 20
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')

        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4,
                          resolution=(1640, 1232)) as camera, PrivacyLed(leds):

                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                # Blend the preview layer with the alpha value from the flags.
                if preview_alpha > 0:
                    logger.info('Starting preview with alpha %d',
                                preview_alpha)
                    camera.start_preview(alpha=preview_alpha)
                else:
                    logger.info('Not starting preview, alpha 0')

                button = Button(23)
                button.when_pressed = take_photo

                #intialize all local vars for computation
                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                #fb.delete('/', 'joy')
                time_interval = 1
                camera_id = 4
                filename_new = "Vision" + "_" + str(camera_id) + "_" + str(
                    datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) + ".csv"
                t_ref = time.localtime()

                f = open(filename_new, 'w')
                #file_write_str = time_stamp_now + ", " + elapsed_time_total_str + ", " + str(camera_id) + ", " + str(num_faces) + ", " + str(joy_score)
                f.write(
                    "time_stamp, elapsed_time, camera_id, face_count, avg_FEQ, person1, person2, person3, person4, person5, person6, person7, person8, person9\n"
                )
                file_write_str = ""
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    print("ARGOS 4 - BEGIN!")
                    t_init = datetime.now()
                    for i, result in enumerate(inference.run()):
                        # t_begin = datetime.now()
                        # t_begin = datetime.now()
                        faces = face_detection.get_faces(result)
                        num_faces = len(faces)
                        #print("face num" + str(num_faces))
                        # for face in faces:
                        #     print(face.joy_score)
                        photographer.update_faces(faces)
                        #joy_score = joy_score_moving_average.next(average_joy_score(faces))
                        joy_score = average_joy_score(faces)
                        animator.update_joy_score(joy_score)
                        #print(joy_score)
                        t0 = time.localtime()
                        t_compare = datetime.now()
                        delta = t_compare - t_init
                        elapsed_time_total_str = str(delta.total_seconds())
                        #print(elapsed_time_total_str)
                        # delta_s = delta.seconds
                        # delta_m = float(delta.microseconds)/1000.
                        # elapsed_time_total_str = str(delta_s) + "." + str(delta_m)
                        # print(elapsed_time_total_str)
                        time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec) + " | " + str(
                                t0.tm_mon) + "/" + str(t0.tm_mday) + "/" + str(
                                    t0.tm_year)
                        time_stamp_now = datetime.now().strftime(
                            '%Y-%m-%d-%H:%M:%S.%f')[:-3]
                        time_up = time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec)
                        elapsed_time = int(t0.tm_sec) - int(t_ref.tm_sec)
                        time_diff = datetime.now()
                        # print("Elapsed time: " + str(elapsed_time_milli))
                        if elapsed_time < 0:
                            elapsed_time += 60
                        # if elapsed_time >= time_interval: #for time interval
                        if True:
                            if num_faces != 0:
                                file_write_str = time_stamp_now + ", " + elapsed_time_total_str + ", " + str(
                                    camera_id) + ", " + str(
                                        num_faces) + ", " + str(joy_score)
                                for i in faces:
                                    file_write_str += ", " + str(i.joy_score)
                                file_write_str += "\n"

                                f.write(file_write_str)

                            #fb.put('/joy_data/'+ str(t0.tm_year) + "/" + str(t0.tm_mon) + "/" + str(t0.tm_mday) + "/" + str(t0.tm_hour) + "/" + str(t0.tm_min) + "/" , time.ctime(), {'time':time_stamp, 'cam_id' : camera_id, 'num_faces': num_faces, 'joy_score': joy_score})
                            t_ref = t0
                        #print(time_stamp)
                        #if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                        #    player.play(JOY_SOUND)
                        #elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                        #    player.play(SAD_SOUND)

                        prev_joy_score = joy_score
                        # t_end = datetime.now()
                        # ellapsed_milli = t_end - t_begin
                        # print("Elapsed time: ")
                        # print(int(ellapsed_milli.total_seconds() * 1000))
                        # t_end = datetime.now()
                        # ellapsed_milli = t_end - t_begin
                        # print(int(ellapsed_milli.total_seconds() * 1000))
                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
Exemplo n.º 21
0
def run_inference(run_event,
                  model="face",
                  framerate=15,
                  cammode=5,
                  hres=1640,
                  vres=922,
                  stats=True):
    # See the Raspicam documentation for mode and framerate limits:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # Default to the highest resolution possible at 16:9 aspect ratio

    global socket_connected, time_log

    leds = Leds()

    with PiCamera() as camera, PrivacyLed(leds):
        camera.sensor_mode = cammode
        camera.resolution = (hres, vres)
        camera.framerate = framerate
        camera.video_stabilization = True
        camera.start_preview()  # fullscreen=True)

        def model_selector(argument):
            options = {
                "object": object_detection.model(),
                "face": face_detection.model(),
                "class": image_classification.model()
            }
            return options.get(argument, "nothing")

        tf_model = model_selector(model)

        # this is not needed because the function defaults to "face"
        if tf_model == "nothing":
            print("No tensorflow model or invalid model specified - exiting..")
            camera.stop_preview()
            os._exit(0)
            return

        with CameraInference(tf_model) as inference:
            print("%s model loaded" % model)

            last_time = time()  # measure inference time

            for result in inference.run():

                # exit on shutdown
                if not run_event.is_set():
                    camera.stop_preview()
                    return

                output = ApiObject()

                # handler for the AIY Vision object detection model
                if model == "object":
                    output.threshold = 0.3
                    objects = object_detection.get_objects(
                        result, output.threshold)

                    for obj in objects:
                        # print(object)
                        item = {
                            'name': 'object',
                            'class_name': obj._LABELS[obj.kind],
                            'score': obj.score,
                            'x': obj.bounding_box[0] / capture_width,
                            'y': obj.bounding_box[1] / capture_height,
                            'width': obj.bounding_box[2] / capture_width,
                            'height': obj.bounding_box[3] / capture_height
                        }

                        output.numObjects += 1
                        output.objects.append(item)

                # handler for the AIY Vision face detection model
                elif model == "face":
                    faces = face_detection.get_faces(result)

                    for face in faces:
                        # print(face)
                        item = {
                            'name': 'face',
                            'score': face.face_score,
                            'joy': face.joy_score,
                            'x': face.bounding_box[0] / capture_width,
                            'y': face.bounding_box[1] / capture_height,
                            'width': face.bounding_box[2] / capture_width,
                            'height': face.bounding_box[3] / capture_height,
                        }

                        output.numObjects += 1
                        output.objects.append(item)

                elif model == "class":
                    output.threshold = 0.3
                    classes = image_classification.get_classes(result)

                    s = ""

                    for (obj, prob) in classes:
                        if prob > output.threshold:
                            s += '%s=%1.2f\t|\t' % (obj, prob)

                            item = {
                                'name': 'class',
                                'class_name': obj,
                                'score': prob
                            }

                            output.numObjects += 1
                            output.objects.append(item)

                    # print('%s\r' % s)

                now = time()
                output.timeStamp = now
                output.inferenceTime = (now - last_time)
                last_time = now

                # No need to do anything else if there are no objects
                if output.numObjects > 0:
                    output_json = output.to_json()
                    print(output_json)

                    # Send the json object if there is a socket connection
                    if socket_connected is True:
                        q.put(output_json)

                # Additional data to measure inference time
                if stats is True:
                    time_log.append(output.inferenceTime)
                    time_log = time_log[-10:]  # just keep the last 10 times
                    print("Avg inference time: %s" %
                          (sum(time_log) / len(time_log)))
Exemplo n.º 22
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')
        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera, PrivacyLed(leds):
                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                def turn_off():
                    command = "/usr/bin/sudo /sbin/shutdown now"
                    import subprocess
                    process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
                    output = process.communicate()[0]
                    print(output)

                # Blend the preview layer with the alpha value from the flags.
                camera.start_preview(alpha=preview_alpha)

                def button_released():
                    if button.hold_time == 0:
                        turn_off()
                    else:
                        take_photo()
                def button_held():
                    player.play(SAD_SOUND)
                    button.hold_time = 0

                button = Button(23, hold_time=3.0, hold_repeat = False)
                button.when_held = button_held
                button.when_released = button_released

                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(inference.run()):
                        faces = face_detection.get_faces(result)
                        photographer.update_faces(faces)

                        joy_score = joy_score_moving_average.next(average_joy_score(faces))
                        animator.update_joy_score(joy_score)

                        if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                            player.play(JOY_SOUND)
                        elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                            player.play(SAD_SOUND)

                        prev_joy_score = joy_score

                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
Exemplo n.º 23
0
def blink_led(color=RED, period=1, num_blinks=5):
    for _ in range(num_blinks):
        leds.update(Leds.rgb_on(color))
        time.sleep(period / 2)
        leds.update(Leds.rgb_off())
        time.sleep(period / 2)
Exemplo n.º 24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_path',
        required=True,
        help='Path to converted model file that can run on VisionKit.')
    parser.add_argument(
        '--label_path',
        required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument('--input_height',
                        type=int,
                        required=True,
                        help='Input height.')
    parser.add_argument('--input_width',
                        type=int,
                        required=True,
                        help='Input width.')
    parser.add_argument('--input_layer',
                        required=True,
                        help='Name of input layer.')
    parser.add_argument('--output_layer',
                        required=True,
                        help='Name of output layer.')
    parser.add_argument(
        '--num_frames',
        type=int,
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--input_mean',
                        type=float,
                        default=128.0,
                        help='Input mean.')
    parser.add_argument('--input_std',
                        type=float,
                        default=128.0,
                        help='Input std.')
    parser.add_argument('--input_depth',
                        type=int,
                        default=3,
                        help='Input depth.')
    parser.add_argument(
        '--threshold',
        type=float,
        default=0.6,
        help='Threshold for classification score (from output tensor).')
    parser.add_argument(
        '--preview',
        action='store_true',
        default=False,
        help=
        'Enables camera preview in addition to printing result to terminal.')
    parser.add_argument(
        '--gpio_logic',
        default='NORMAL',
        help='Indicates if NORMAL or INVERSE logic is used in GPIO pins.')
    parser.add_argument('--show_fps',
                        action='store_true',
                        default=False,
                        help='Shows end to end FPS.')
    args = parser.parse_args()

    # Model & labels
    model = ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, args.input_height, args.input_width, args.input_depth),
        input_normalizer=(args.input_mean, args.input_std),
        compute_graph=utils.load_compute_graph(args.model_path))
    labels = read_labels(args.label_path)

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        while True:
            while True:
                long_buffer = []
                short_buffer = []
                pinStatus(pin_A, 'LOW', args.gpio_logic)
                pinStatus(pin_B, 'LOW', args.gpio_logic)
                pinStatus(pin_C, 'LOW', args.gpio_logic)
                leds.update(Leds.rgb_on(GREEN))
                face_box = detect_face()
                hand_box_params = determine_hand_box_params(face_box)
                if image_boundary_check(hand_box_params):
                    break

            # Start hand classifier
            is_active = False
            leds.update(Leds.rgb_on(PURPLE))
            start_timer = time.time()
            with ImageInference(model) as img_inference:
                while True:
                    check_termination_trigger()
                    if is_active:
                        leds.update(Leds.rgb_on(RED))
                    hands_image = capture_hands_image(camera, hand_box_params)
                    output = classify_hand_gestures(
                        img_inference,
                        hands_image,
                        model=model,
                        labels=labels,
                        output_layer=args.output_layer,
                        threshold=args.threshold)

                    short_guess, num_short_guess = buffer_update(
                        output, short_buffer, short_buffer_length)
                    long_guess, num_long_guess = buffer_update(
                        output, long_buffer, long_buffer_length)

                    # Activation of classifier
                    if (long_guess == activation_index
                            or long_guess == deactivation_index
                        ) and not is_active and num_long_guess >= (
                            long_buffer_length - 3):
                        is_active = True
                        leds.update(Leds.rgb_on(RED))
                        send_signal_to_pins(activation_index, args.gpio_logic)
                        long_buffer = []
                        num_long_guess = 0
                        time.sleep(1)

                    # Deactivation of classifier (go back to stable face detection)
                    if (long_guess == activation_index
                            or long_guess == deactivation_index
                        ) and is_active and num_long_guess >= (
                            long_buffer_length - 3):
                        is_active = False
                        leds.update(Leds.rgb_off())
                        long_buffer = []
                        num_long_guess = 0
                        send_signal_to_pins(deactivation_index,
                                            args.gpio_logic)
                        time.sleep(1)
                        break

                    # If not activated within max_no_activity_period seconds, go back to stable face detection
                    if not is_active:
                        timer = time.time() - start_timer
                        if timer >= max_no_activity_period:
                            leds.update(Leds.rgb_off())
                            send_signal_to_pins(deactivation_index,
                                                args.gpio_logic)
                            time.sleep(1)
                            break
                    else:
                        start_timer = time.time()

                        # Displaying classified hand gesture commands
                        if num_short_guess >= (short_buffer_length -
                                               1) and is_active:
                            print_hand_command(short_guess)
                            send_signal_to_pins(short_guess, args.gpio_logic)

        camera.stop_preview()
Exemplo n.º 25
0
def check_termination_trigger():
    if button.is_pressed:
        print('Terinating session...')
        leds.update(Leds.rgb_off())
        time.sleep(5)
        os.system("sudo shutdown -h now")
Exemplo n.º 26
0
# Initialize the buzzer
ready = [
    'C6q',
    'G5q',
    'E5q',
    'C5q',
]

player = aiy.toneplayer.TonePlayer(22)
player.play(*ready)

# Initialize the button (on the top of AIY Google Vision box)
button = Button(BUTTON_GPIO_PIN)

# Initialize LED (in the button on the top of AIY Google Vision box)
leds = Leds()
leds.update(Leds.rgb_off())

# Global variables
input_img_width = 1640
input_img_height = 1232
output_img_size = 160
faces_buffer_size = 40
hand_gesture_buffer_size = 5
threshold = 0.6

# Length of long buffer (to make a decision to de/activate app)
# and short buffer (to declare a specific hand gesture command)
long_buffer_length = 10
short_buffer_length = 3
Exemplo n.º 27
0
 def __exit__(self):
     led = (0x00, 0x00, 0x00)
     self.leds.update(Leds.rgb_on(led))
     self.logger.logger.debug("exit LED drivers")
Exemplo n.º 28
0
#!/usr/bin/env python3
"""Monitor the button and perform shutdown 
when pressed for a centern amount of time
"""
import os
import time
import aiy.toneplayer
from aiy.vision.leds import Leds
from aiy.vision.leds import Pattern
from aiy.vision.leds import RgbLeds
from gpiozero import Button
from gpiozero import LED
from aiy.vision.pins import BUTTON_GPIO_PIN
elapsed_time = 5
button = Button(BUTTON_GPIO_PIN)
leds = Leds()
RED = (0xFF, 0x00, 0x00)


def shutdown_confirmation():
    player = aiy.toneplayer.TonePlayer(22)
    #print('Breathe RED')
    #leds.pattern = Pattern.breathe(1000)
    #start_time_breathe = time.time()
    #leds.update(Leds.rgb_pattern(RED))
    player.play(
        'E5q',
        'Be',
        'C5e',
        'D5e',
        'E5s',
Exemplo n.º 29
0
    def run(self, num_frames, preview_alpha, image_format, image_folder):
        logger.info('Starting...')
        leds = Leds()
        player = Player(gpio=22, bpm=10)
        photographer = Photographer(image_format, image_folder)
        animator = Animator(leds, self._done)

        try:
            # Forced sensor mode, 1640x1232, full FoV. See:
            # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
            # This is the resolution inference run on.
            with PiCamera(sensor_mode=4,
                          resolution=(1640, 1232)) as camera, PrivacyLed(leds):

                def take_photo():
                    logger.info('Button pressed.')
                    player.play(BEEP_SOUND)
                    photographer.shoot(camera)

                # Blend the preview layer with the alpha value from the flags.
                if preview_alpha > 0:
                    logger.info('Starting preview with alpha %d',
                                preview_alpha)
                    camera.start_preview(alpha=preview_alpha)
                else:
                    logger.info('Not starting preview, alpha 0')

                button = Button(23)
                button.when_pressed = take_photo

                joy_score_moving_average = MovingAverage(10)
                prev_joy_score = 0.0
                #fb.delete('/', 'joy')
                time_interval = 1
                camera_id = 1
                t_ref = time.localtime()
                with CameraInference(face_detection.model()) as inference:
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(inference.run()):
                        t_begin = datetime.now()
                        faces = face_detection.get_faces(result)
                        num_faces = len(faces)
                        print("face num" + str(num_faces))
                        # for face in faces:
                        #     print(face.joy_score)
                        photographer.update_faces(faces)
                        joy_score = joy_score_moving_average.next(
                            average_joy_score(faces))
                        animator.update_joy_score(joy_score)
                        print(joy_score)
                        t0 = time.localtime()
                        time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec) + " | " + str(
                                t0.tm_mon) + "/" + str(t0.tm_mday) + "/" + str(
                                    t0.tm_year)
                        time_up = time_stamp = str(t0.tm_hour) + ":" + str(
                            t0.tm_min) + ":" + str(t0.tm_sec)
                        elapsed_time = int(t0.tm_sec) - int(t_ref.tm_sec)

                        # print("Elapsed time: " + str(elapsed_time_milli))
                        if elapsed_time < 0:
                            elapsed_time += 60
                        if elapsed_time >= time_interval:
                            if joy_score != 0:
                                fb.put(
                                    '/joy_data/' + str(t0.tm_year) + "/" +
                                    str(t0.tm_mon) + "/" + str(t0.tm_mday) +
                                    "/" + str(t0.tm_hour) + "/" +
                                    str(t0.tm_min) + "/", time.ctime(), {
                                        'time': time_stamp,
                                        'cam_id': camera_id,
                                        'num_faces': num_faces,
                                        'joy_score': joy_score
                                    })
                            t_ref = t0
                        print(time_stamp)
                        #if joy_score > JOY_SCORE_PEAK > prev_joy_score:
                        #    player.play(JOY_SOUND)
                        #elif joy_score < JOY_SCORE_MIN < prev_joy_score:
                        #    player.play(SAD_SOUND)

                        prev_joy_score = joy_score
                        t_end = datetime.now()
                        ellapsed_milli = t_end - t_begin
                        print("Elapsed time: ")
                        print(int(ellapsed_milli.total_seconds() * 1000))
                        if self._done.is_set() or i == num_frames:
                            break
        finally:
            player.stop()
            photographer.stop()

            player.join()
            photographer.join()
            animator.join()
Exemplo n.º 30
0
 def __init__(self, led=(0x00, 0x00, 0x00)):
     self.logger = MyLogger(level=logging.INFO, get="LED")
     self.leds = Leds()
     self.leds.update(Leds.rgb_on(led))
     self.logger.logger.debug("Init LED drivers")