예제 #1
0
def record_start():
    # File name setting
    now = datetime.now(timezone('Asia/Seoul'))
    y = now.strftime('%Y-%m-%d_%H-%M-%S')
    filename = y + '.wav'

    #Wav header setting
    CHUNK = 1024  # 1frame 당 1024개 buffer
    FORMAT = pyaudio.paInt16  # 16-bit signed int형 .
    CHANNELS = 1  # mono
    RATE = 16000  # 16khz
    #RECORD_SECONDS = 2
    WAVE_OUTPUT_FILENAME = filename

    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)
    #Start recording
    print("Start to record the audio.")
    frames = []
    while GPIO.input(_GPIO_BUTTON) == 0:
        leds.update(Leds.rgb_on(MAGENTA))
        time.sleep(0.05)
        data = stream.read(CHUNK)
        frames.append(data)
        # record stream
        if GPIO.input(_GPIO_BUTTON) == 1:
            leds.update(Leds.rgb_off())
            break
    # stop stream
    stream.stop_stream()
    stream.close()
    # close PyAudio
    p.terminate()
    aiy.audio.say('Thank you')

    #Save recording and close
    wo = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wo.setnchannels(CHANNELS)
    wo.setsampwidth(p.get_sample_size(FORMAT))
    wo.setframerate(RATE)
    wo.writeframes(b''.join(frames))
    wo.close()

    #File length check
    f = sf.SoundFile(filename)
    file_len = len(f) / f.samplerate
    print(file_len)
    if file_len < 1:
        print("Recording is stopped.")
        call(["rm", filename])
        main()

    print("Recording is finished.")
    leds.update(Leds.rgb_off())
    print(WAVE_OUTPUT_FILENAME)
예제 #2
0
 def _run(self):
     while not self._done.is_set():
         joy_score = self._joy_score.value
         if joy_score > 0:
             self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
         else:
             self._leds.update(Leds.rgb_off())
def capture():
    time.sleep(0.1)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    leds.update(Leds.rgb_on(GREEN))
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())
def shutdown():
    leds.update(Leds.privacy_off())
    camera.close()
    for i in range(3):
        leds.update(Leds.rgb_on(RED))
        time.sleep(0.2)
        leds.update(Leds.rgb_off())
        time.sleep(0.2)
    check_call(['sudo', 'poweroff'])
def capture():
    #print('button pressed')
    leds.update(Leds.rgb_on(GREEN))
    time.sleep(0.5)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    print('captured {}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())
예제 #6
0
def main():
    button.when_pressed = run
    leds.update(Leds.rgb_on(WHITE))

    try:
        while True:
            pass
    except KeyboardInterrupt:
        leds.update(Leds.rgb_off())
        pass
예제 #7
0
 def _run(self):
     while not self._done.is_set():
         joy_score = self._joy_score.value
         if self._danger:
             self._leds.update(Leds.rgb_pattern(RED))
             self._danger = False
         elif joy_score > 0:
             self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
         else:
             self._leds.update(Leds.rgb_off())
예제 #8
0
def run():
    if KeepWatchForSeconds(3):
        print("Going shutdown by GPIO")
        leds.update(Leds.rgb_off())
        os.system("/sbin/shutdown -h now 'Poweroff by GPIO'")

    else:
        print("Beep sound")
        toneplayer.play(*BEEP_SOUND)

        leds.update(Leds.rgb_on(RED))
        print("Taking photo")
        with picamera.PiCamera() as camera:
            camera.resolution = (640, 480)
            camera.start_preview()
            sleep(3.000)
            camera.capture(photo_filename)

        leds.update(Leds.rgb_on(GREEN))
        print("Dish classifier")
        with ImageInference(dish_classifier.model()) as inference:
            image = Image.open(photo_filename)
            classes = dish_classifier.get_classes(inference.run(image),
                                                  max_num_objects=5,
                                                  object_prob_threshold=0.1)
            dish_name = ''
            for i, (label, score) in enumerate(classes):
                dish_name += label + '/'
                print('Result %d: %s (prob=%f)' % (i, label, score))

        leds.update(Leds.rgb_on(BLUE))
        print("Post to slack")
        slack.files.upload(photo_filename,
                           channels='#food_diary',
                           title=dish_name)

        leds.update(Leds.rgb_on(WHITE))
예제 #9
0
time.sleep(1)

print('RGB: Solid PURPLE for 1 second')
leds.update(Leds.rgb_on(PURPLE))
time.sleep(1)

print('RGB: Solid CYAN for 1 second')
leds.update(Leds.rgb_on(CYAN))
time.sleep(1)

print('RGB: Solid WHITE for 1 second')
leds.update(Leds.rgb_on(WHITE))
time.sleep(1)

print('RGB: Off for 1 second')
leds.update(Leds.rgb_off())
time.sleep(1)

for _ in range(3):
    print('Privacy: On (brightness=default)')
    leds.update(Leds.privacy_on())
    time.sleep(1)
    print('Privacy: Off')
    leds.update(Leds.privacy_off())
    time.sleep(1)

for _ in range(3):
    print('Privacy: On (brightness=5)')
    leds.update(Leds.privacy_on(5))
    time.sleep(1)
    print('Privacy: Off')
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--label',
        '-lbl',
        type=str,
        dest='label',
        required=True,
        help='Specifies the class (label) of training images (e.g. no_hangs).')

    parser.add_argument('--num_images',
                        '-nimg',
                        type=int,
                        dest='num_images',
                        default=10,
                        help='Sets the number of training images to make.')

    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        # Stage #1: Capture and store raw images
        # Create foler to store raw images
        path_to_raw_img_folder = path_to_training_folder + 'raw/'
        if not os.path.exists(path_to_raw_img_folder):
            os.makedirs(path_to_raw_img_folder)
        time.sleep(2)

        # Create list to store hand boxes location for each image
        hand_boxes_locations = []

        with CameraInference(face_detection.model()) as inference:
            leds.update(Leds.rgb_on(RED))
            time.sleep(3)
            counter = 1
            start = time.time()

            for result in inference.run():
                faces = face_detection.get_faces(result)
                face = select_face(faces)
                if face:
                    if counter > args.num_images:
                        break
                    face_box = transform(face.bounding_box)
                    hands = hand_box(face_box)

                    # Capture raw image
                    img_name = path_to_raw_img_folder + 'img' + str(
                        counter) + '.jpg'
                    camera.capture(img_name)
                    time.sleep(0.2)

                    # Record position of hands
                    hand_boxes_locations.append([counter, hands])

                    print('Captured ', str(counter), " out of ",
                          str(args.num_images))
                    counter += 1
            print('Stage #1: It took', str(round(time.time() - start, 1)),
                  'sec to record', str(args.num_images), 'raw images')
        camera.stop_preview()

        # Stage #2: Crop training images from the raw ones and store them in class (label) subfolder
        leds.update(Leds.rgb_on(BLUE))
        start = time.time()
        for i, entry in enumerate(hand_boxes_locations):
            img_number = entry[0]
            hands = entry[1]
            raw_img_name = path_to_raw_img_folder + 'img' + str(
                img_number) + '.jpg'
            if os.path.isfile(raw_img_name):
                raw_image = Image.open(raw_img_name)
                crop_and_store_images(args.label, hands, raw_image)
                raw_image.close()
                time.sleep(0.5)
                os.remove(raw_img_name)
            print('Processed ', str(i + 1), " out of ", str(args.num_images))
        print('Stage #2: It took ', str(round(time.time() - start, 1)),
              'sec to process', str(args.num_images), 'images')
        time.sleep(3)
        # Delete empty folder for raw images
        if os.listdir(path_to_raw_img_folder) == []:
            os.rmdir(path_to_raw_img_folder)
        leds.update(Leds.rgb_off())
예제 #11
0
def blink_led(color=RED, period=1, num_blinks=5):
    for _ in range(num_blinks):
        leds.update(Leds.rgb_on(color))
        time.sleep(period / 2)
        leds.update(Leds.rgb_off())
        time.sleep(period / 2)
예제 #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_path',
        required=True,
        help='Path to converted model file that can run on VisionKit.')
    parser.add_argument(
        '--label_path',
        required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument('--input_height',
                        type=int,
                        required=True,
                        help='Input height.')
    parser.add_argument('--input_width',
                        type=int,
                        required=True,
                        help='Input width.')
    parser.add_argument('--input_layer',
                        required=True,
                        help='Name of input layer.')
    parser.add_argument('--output_layer',
                        required=True,
                        help='Name of output layer.')
    parser.add_argument(
        '--num_frames',
        type=int,
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--input_mean',
                        type=float,
                        default=128.0,
                        help='Input mean.')
    parser.add_argument('--input_std',
                        type=float,
                        default=128.0,
                        help='Input std.')
    parser.add_argument('--input_depth',
                        type=int,
                        default=3,
                        help='Input depth.')
    parser.add_argument(
        '--threshold',
        type=float,
        default=0.6,
        help='Threshold for classification score (from output tensor).')
    parser.add_argument(
        '--preview',
        action='store_true',
        default=False,
        help=
        'Enables camera preview in addition to printing result to terminal.')
    parser.add_argument(
        '--gpio_logic',
        default='NORMAL',
        help='Indicates if NORMAL or INVERSE logic is used in GPIO pins.')
    parser.add_argument('--show_fps',
                        action='store_true',
                        default=False,
                        help='Shows end to end FPS.')
    args = parser.parse_args()

    # Model & labels
    model = ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, args.input_height, args.input_width, args.input_depth),
        input_normalizer=(args.input_mean, args.input_std),
        compute_graph=utils.load_compute_graph(args.model_path))
    labels = read_labels(args.label_path)

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        while True:
            while True:
                long_buffer = []
                short_buffer = []
                pinStatus(pin_A, 'LOW', args.gpio_logic)
                pinStatus(pin_B, 'LOW', args.gpio_logic)
                pinStatus(pin_C, 'LOW', args.gpio_logic)
                leds.update(Leds.rgb_on(GREEN))
                face_box = detect_face()
                hand_box_params = determine_hand_box_params(face_box)
                if image_boundary_check(hand_box_params):
                    break

            # Start hand classifier
            is_active = False
            leds.update(Leds.rgb_on(PURPLE))
            start_timer = time.time()
            with ImageInference(model) as img_inference:
                while True:
                    check_termination_trigger()
                    if is_active:
                        leds.update(Leds.rgb_on(RED))
                    hands_image = capture_hands_image(camera, hand_box_params)
                    output = classify_hand_gestures(
                        img_inference,
                        hands_image,
                        model=model,
                        labels=labels,
                        output_layer=args.output_layer,
                        threshold=args.threshold)

                    short_guess, num_short_guess = buffer_update(
                        output, short_buffer, short_buffer_length)
                    long_guess, num_long_guess = buffer_update(
                        output, long_buffer, long_buffer_length)

                    # Activation of classifier
                    if (long_guess == activation_index
                            or long_guess == deactivation_index
                        ) and not is_active and num_long_guess >= (
                            long_buffer_length - 3):
                        is_active = True
                        leds.update(Leds.rgb_on(RED))
                        send_signal_to_pins(activation_index, args.gpio_logic)
                        long_buffer = []
                        num_long_guess = 0
                        time.sleep(1)

                    # Deactivation of classifier (go back to stable face detection)
                    if (long_guess == activation_index
                            or long_guess == deactivation_index
                        ) and is_active and num_long_guess >= (
                            long_buffer_length - 3):
                        is_active = False
                        leds.update(Leds.rgb_off())
                        long_buffer = []
                        num_long_guess = 0
                        send_signal_to_pins(deactivation_index,
                                            args.gpio_logic)
                        time.sleep(1)
                        break

                    # If not activated within max_no_activity_period seconds, go back to stable face detection
                    if not is_active:
                        timer = time.time() - start_timer
                        if timer >= max_no_activity_period:
                            leds.update(Leds.rgb_off())
                            send_signal_to_pins(deactivation_index,
                                                args.gpio_logic)
                            time.sleep(1)
                            break
                    else:
                        start_timer = time.time()

                        # Displaying classified hand gesture commands
                        if num_short_guess >= (short_buffer_length -
                                               1) and is_active:
                            print_hand_command(short_guess)
                            send_signal_to_pins(short_guess, args.gpio_logic)

        camera.stop_preview()
예제 #13
0
def check_termination_trigger():
    if button.is_pressed:
        print('Terinating session...')
        leds.update(Leds.rgb_off())
        time.sleep(5)
        os.system("sudo shutdown -h now")
예제 #14
0
 def process(self, joy_score):
     if joy_score > 0:
         self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
     else:
         self._leds.update(Leds.rgb_off())