def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    leds = Leds()
    leds.reset()
    leds.update(Leds.privacy_on())

    noCustomerDelay = 0;


    with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera:
    # with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
    # with PiCamera() as camera:
        camera.start_preview()

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run():
                if len(face_detection.get_faces(result)) >= 1:
                    noCustomerDelay = 0
                    leds.update(Leds.rgb_on(GREEN))
                    # stream = io.BytesIO()
                    # camera.capture(stream, format='jpeg')
                    # stream.seek(0)
                    camera.capture('faces.jpg')

                    faces = GetFaceId('faces.jpg')
                    print(faces)
                    if(len(faces) > 0):
                        result = GetUserId(faces[0])
                        print(result)

                        highestScore = 0
                        userId = ""
                        for face in result:
                            for candidate in face['candidates']:
                                if(highestScore < candidate['confidence']):
                                    userId = candidate['personId']


                        InfoVendingMachine("10", userId)

                        print(userId)
                    # break
                else:
                    if noCustomerDelay >= 30:
                        leds.update(Leds.rgb_on(WHITE))
                        InfoVendingMachine("10", '')
                        noCustomerDelay = 0;
                    else:
                        noCustomerDelay += 1;


        camera.stop_preview()

    leds.reset()
def capture():
    time.sleep(0.1)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    leds.update(Leds.rgb_on(GREEN))
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())
Beispiel #3
0
 def _run(self):
     while not self._done.is_set():
         joy_score = self._joy_score.value
         if joy_score > 0:
             self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
         else:
             self._leds.update(Leds.rgb_off())
Beispiel #4
0
def record_start():
    # File name setting
    now = datetime.now(timezone('Asia/Seoul'))
    y = now.strftime('%Y-%m-%d_%H-%M-%S')
    filename = y + '.wav'

    #Wav header setting
    CHUNK = 1024  # 1frame 당 1024개 buffer
    FORMAT = pyaudio.paInt16  # 16-bit signed int형 .
    CHANNELS = 1  # mono
    RATE = 16000  # 16khz
    #RECORD_SECONDS = 2
    WAVE_OUTPUT_FILENAME = filename

    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)
    #Start recording
    print("Start to record the audio.")
    frames = []
    while GPIO.input(_GPIO_BUTTON) == 0:
        leds.update(Leds.rgb_on(MAGENTA))
        time.sleep(0.05)
        data = stream.read(CHUNK)
        frames.append(data)
        # record stream
        if GPIO.input(_GPIO_BUTTON) == 1:
            leds.update(Leds.rgb_off())
            break
    # stop stream
    stream.stop_stream()
    stream.close()
    # close PyAudio
    p.terminate()
    aiy.audio.say('Thank you')

    #Save recording and close
    wo = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wo.setnchannels(CHANNELS)
    wo.setsampwidth(p.get_sample_size(FORMAT))
    wo.setframerate(RATE)
    wo.writeframes(b''.join(frames))
    wo.close()

    #File length check
    f = sf.SoundFile(filename)
    file_len = len(f) / f.samplerate
    print(file_len)
    if file_len < 1:
        print("Recording is stopped.")
        call(["rm", filename])
        main()

    print("Recording is finished.")
    leds.update(Leds.rgb_off())
    print(WAVE_OUTPUT_FILENAME)
def shutdown():
    leds.update(Leds.privacy_off())
    camera.close()
    for i in range(3):
        leds.update(Leds.rgb_on(RED))
        time.sleep(0.2)
        leds.update(Leds.rgb_off())
        time.sleep(0.2)
    check_call(['sudo', 'poweroff'])
def capture():
    #print('button pressed')
    leds.update(Leds.rgb_on(GREEN))
    time.sleep(0.5)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    print('captured {}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())
def main():
    button.when_pressed = run
    leds.update(Leds.rgb_on(WHITE))

    try:
        while True:
            pass
    except KeyboardInterrupt:
        leds.update(Leds.rgb_off())
        pass
Beispiel #8
0
 def _run(self):
     while not self._done.is_set():
         joy_score = self._joy_score.value
         if self._danger:
             self._leds.update(Leds.rgb_pattern(RED))
             self._danger = False
         elif joy_score > 0:
             self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
         else:
             self._leds.update(Leds.rgb_off())
def squirt(position):
    angle = math.degrees(math.atan((position / 2.9) * 2.5))
    bus.write_byte_data(slaveAddress, 2, angle)
    time.sleep(0.1)

    while bus.read_word_data(slaveAddress, 1) != 0:
        time.sleep(0.1)

    leds.update(Leds.rgb_on(RED))
    solenoid.on()
    time.sleep(1)
    solenoid.off()
    leds.update(Leds.rgb_pattern(RED))
def run():
    if KeepWatchForSeconds(3):
        print("Going shutdown by GPIO")
        leds.update(Leds.rgb_off())
        os.system("/sbin/shutdown -h now 'Poweroff by GPIO'")

    else:
        print("Beep sound")
        toneplayer.play(*BEEP_SOUND)

        leds.update(Leds.rgb_on(RED))
        print("Taking photo")
        with picamera.PiCamera() as camera:
            camera.resolution = (640, 480)
            camera.start_preview()
            sleep(3.000)
            camera.capture(photo_filename)

        leds.update(Leds.rgb_on(GREEN))
        print("Dish classifier")
        with ImageInference(dish_classifier.model()) as inference:
            image = Image.open(photo_filename)
            classes = dish_classifier.get_classes(inference.run(image),
                                                  max_num_objects=5,
                                                  object_prob_threshold=0.1)
            dish_name = ''
            for i, (label, score) in enumerate(classes):
                dish_name += label + '/'
                print('Result %d: %s (prob=%f)' % (i, label, score))

        leds.update(Leds.rgb_on(BLUE))
        print("Post to slack")
        slack.files.upload(photo_filename,
                           channels='#food_diary',
                           title=dish_name)

        leds.update(Leds.rgb_on(WHITE))
Beispiel #11
0
def main():
    leds.update(Leds.rgb_on(BLUE))
    while True:
        button.wait_for_press()
        record_start()
Beispiel #12
0
def blink_led(color=RED, period=1, num_blinks=5):
    for _ in range(num_blinks):
        leds.update(Leds.rgb_on(color))
        time.sleep(period / 2)
        leds.update(Leds.rgb_off())
        time.sleep(period / 2)
Beispiel #13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_path',
        required=True,
        help='Path to converted model file that can run on VisionKit.')
    parser.add_argument(
        '--label_path',
        required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument('--input_height',
                        type=int,
                        required=True,
                        help='Input height.')
    parser.add_argument('--input_width',
                        type=int,
                        required=True,
                        help='Input width.')
    parser.add_argument('--input_layer',
                        required=True,
                        help='Name of input layer.')
    parser.add_argument('--output_layer',
                        required=True,
                        help='Name of output layer.')
    parser.add_argument(
        '--num_frames',
        type=int,
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--input_mean',
                        type=float,
                        default=128.0,
                        help='Input mean.')
    parser.add_argument('--input_std',
                        type=float,
                        default=128.0,
                        help='Input std.')
    parser.add_argument('--input_depth',
                        type=int,
                        default=3,
                        help='Input depth.')
    parser.add_argument(
        '--threshold',
        type=float,
        default=0.6,
        help='Threshold for classification score (from output tensor).')
    parser.add_argument(
        '--preview',
        action='store_true',
        default=False,
        help=
        'Enables camera preview in addition to printing result to terminal.')
    parser.add_argument(
        '--gpio_logic',
        default='NORMAL',
        help='Indicates if NORMAL or INVERSE logic is used in GPIO pins.')
    parser.add_argument('--show_fps',
                        action='store_true',
                        default=False,
                        help='Shows end to end FPS.')
    args = parser.parse_args()

    # Model & labels
    model = ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, args.input_height, args.input_width, args.input_depth),
        input_normalizer=(args.input_mean, args.input_std),
        compute_graph=utils.load_compute_graph(args.model_path))
    labels = read_labels(args.label_path)

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        while True:
            while True:
                long_buffer = []
                short_buffer = []
                pinStatus(pin_A, 'LOW', args.gpio_logic)
                pinStatus(pin_B, 'LOW', args.gpio_logic)
                pinStatus(pin_C, 'LOW', args.gpio_logic)
                leds.update(Leds.rgb_on(GREEN))
                face_box = detect_face()
                hand_box_params = determine_hand_box_params(face_box)
                if image_boundary_check(hand_box_params):
                    break

            # Start hand classifier
            is_active = False
            leds.update(Leds.rgb_on(PURPLE))
            start_timer = time.time()
            with ImageInference(model) as img_inference:
                while True:
                    check_termination_trigger()
                    if is_active:
                        leds.update(Leds.rgb_on(RED))
                    hands_image = capture_hands_image(camera, hand_box_params)
                    output = classify_hand_gestures(
                        img_inference,
                        hands_image,
                        model=model,
                        labels=labels,
                        output_layer=args.output_layer,
                        threshold=args.threshold)

                    short_guess, num_short_guess = buffer_update(
                        output, short_buffer, short_buffer_length)
                    long_guess, num_long_guess = buffer_update(
                        output, long_buffer, long_buffer_length)

                    # Activation of classifier
                    if (long_guess == activation_index
                            or long_guess == deactivation_index
                        ) and not is_active and num_long_guess >= (
                            long_buffer_length - 3):
                        is_active = True
                        leds.update(Leds.rgb_on(RED))
                        send_signal_to_pins(activation_index, args.gpio_logic)
                        long_buffer = []
                        num_long_guess = 0
                        time.sleep(1)

                    # Deactivation of classifier (go back to stable face detection)
                    if (long_guess == activation_index
                            or long_guess == deactivation_index
                        ) and is_active and num_long_guess >= (
                            long_buffer_length - 3):
                        is_active = False
                        leds.update(Leds.rgb_off())
                        long_buffer = []
                        num_long_guess = 0
                        send_signal_to_pins(deactivation_index,
                                            args.gpio_logic)
                        time.sleep(1)
                        break

                    # If not activated within max_no_activity_period seconds, go back to stable face detection
                    if not is_active:
                        timer = time.time() - start_timer
                        if timer >= max_no_activity_period:
                            leds.update(Leds.rgb_off())
                            send_signal_to_pins(deactivation_index,
                                                args.gpio_logic)
                            time.sleep(1)
                            break
                    else:
                        start_timer = time.time()

                        # Displaying classified hand gesture commands
                        if num_short_guess >= (short_buffer_length -
                                               1) and is_active:
                            print_hand_command(short_guess)
                            send_signal_to_pins(short_guess, args.gpio_logic)

        camera.stop_preview()
Beispiel #14
0
 def __init__(self, led=(0x00, 0x00, 0x00)):
     self.logger = MyLogger(level=logging.INFO, get="LED")
     self.leds = Leds()
     self.leds.update(Leds.rgb_on(led))
     self.logger.logger.debug("Init LED drivers")
Beispiel #15
0
 def __exit__(self):
     led = (0x00, 0x00, 0x00)
     self.leds.update(Leds.rgb_on(led))
     self.logger.logger.debug("exit LED drivers")
Beispiel #16
0
 def set_color(self, led):
     self.leds.update(Leds.rgb_on(led))
     self.logger.logger.debug("set LED colors")
        leds.update(Leds.rgb_off())
        time.sleep(0.2)
    check_call(['sudo', 'poweroff'])


def capture():
    time.sleep(0.1)
    camera.resolution = (1920, 1080)
    timestamp = datetime.now().isoformat()
    leds.update(Leds.rgb_on(GREEN))
    camera.capture('/home/pi/Pictures/{}.jpg'.format(timestamp))
    leds.update(Leds.rgb_off())


# blink ready
leds.update(Leds.rgb_on(RED))
time.sleep(0.15)
leds.update(Leds.rgb_off())
time.sleep(0.15)

leds.update(Leds.rgb_on(YELLOW))
time.sleep(0.15)
leds.update(Leds.rgb_off())
time.sleep(0.15)

leds.update(Leds.rgb_on(GREEN))
time.sleep(0.15)
leds.update(Leds.rgb_off())
time.sleep(0.15)

while True:
Beispiel #18
0
def light_on():
    for i in range(20):
        leds.update(Leds.rgb_on(blend(WHITE, BLUE, i / 20)))
        time.sleep(0.02)
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--label',
        '-lbl',
        type=str,
        dest='label',
        required=True,
        help='Specifies the class (label) of training images (e.g. no_hangs).')

    parser.add_argument('--num_images',
                        '-nimg',
                        type=int,
                        dest='num_images',
                        default=10,
                        help='Sets the number of training images to make.')

    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        # Stage #1: Capture and store raw images
        # Create foler to store raw images
        path_to_raw_img_folder = path_to_training_folder + 'raw/'
        if not os.path.exists(path_to_raw_img_folder):
            os.makedirs(path_to_raw_img_folder)
        time.sleep(2)

        # Create list to store hand boxes location for each image
        hand_boxes_locations = []

        with CameraInference(face_detection.model()) as inference:
            leds.update(Leds.rgb_on(RED))
            time.sleep(3)
            counter = 1
            start = time.time()

            for result in inference.run():
                faces = face_detection.get_faces(result)
                face = select_face(faces)
                if face:
                    if counter > args.num_images:
                        break
                    face_box = transform(face.bounding_box)
                    hands = hand_box(face_box)

                    # Capture raw image
                    img_name = path_to_raw_img_folder + 'img' + str(
                        counter) + '.jpg'
                    camera.capture(img_name)
                    time.sleep(0.2)

                    # Record position of hands
                    hand_boxes_locations.append([counter, hands])

                    print('Captured ', str(counter), " out of ",
                          str(args.num_images))
                    counter += 1
            print('Stage #1: It took', str(round(time.time() - start, 1)),
                  'sec to record', str(args.num_images), 'raw images')
        camera.stop_preview()

        # Stage #2: Crop training images from the raw ones and store them in class (label) subfolder
        leds.update(Leds.rgb_on(BLUE))
        start = time.time()
        for i, entry in enumerate(hand_boxes_locations):
            img_number = entry[0]
            hands = entry[1]
            raw_img_name = path_to_raw_img_folder + 'img' + str(
                img_number) + '.jpg'
            if os.path.isfile(raw_img_name):
                raw_image = Image.open(raw_img_name)
                crop_and_store_images(args.label, hands, raw_image)
                raw_image.close()
                time.sleep(0.5)
                os.remove(raw_img_name)
            print('Processed ', str(i + 1), " out of ", str(args.num_images))
        print('Stage #2: It took ', str(round(time.time() - start, 1)),
              'sec to process', str(args.num_images), 'images')
        time.sleep(3)
        # Delete empty folder for raw images
        if os.listdir(path_to_raw_img_folder) == []:
            os.rmdir(path_to_raw_img_folder)
        leds.update(Leds.rgb_off())
 def process(self, joy_score):
     if joy_score > 0:
         self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
     else:
         self._leds.update(Leds.rgb_off())
Beispiel #21
0
GREEN = (0x00, 0xFF, 0x00)
YELLOW = (0xFF, 0xFF, 0x00)
BLUE = (0x00, 0x00, 0xFF)
PURPLE = (0xFF, 0x00, 0xFF)
CYAN = (0x00, 0xFF, 0xFF)
WHITE = (0xFF, 0xFF, 0xFF)


def blend(color_a, color_b, alpha):
    return tuple([math.ceil(alpha * color_a[i] + (1.0 - alpha) * color_b[i]) for i in range(3)])


leds = Leds()

print('RGB: Solid RED for 1 second')
leds.update(Leds.rgb_on(RED))
time.sleep(1)

print('RGB: Solid GREEN for 1 second')
leds.update(Leds.rgb_on(GREEN))
time.sleep(1)

print('RGB: Solid YELLOW for 1 second')
leds.update(Leds.rgb_on(YELLOW))
time.sleep(1)

print('RGB: Solid BLUE for 1 second')
leds.update(Leds.rgb_on(BLUE))
time.sleep(1)

print('RGB: Solid PURPLE for 1 second')
button = Button(23, hold_time=2, hold_repeat=False)
camera = PiCamera()
leds = Leds()

print('Script starting...')

camera.resolution = (512, 512)
sleep(2)
camera.start_preview()

while True:
    try:
        if button.is_held is True:
            raise KeyboardInterrupt
        else:
            leds.update(Leds.rgb_on(Color.GREEN))
            button.wait_for_press()
            leds.pattern = Pattern.blink(2000)
            leds.update(Leds.privacy_on(5))
            for i in camera.capture_continuous(
                    '/home/pi/images/' +
                    'tire{timestamp:%Y-%m-%d-%H-%M-%S}.jpg'):
                print('Captured %s' % i)
                if button.is_held is True:
                    leds.pattern = Pattern.blink(300)
                    leds.update(Leds.privacy_off())
                    leds.update(Leds.rgb_pattern(Color.RED))
                    sleep(2)
                    leds.update(Leds.rgb_on(Color.GREEN))
                    break
                else: