Exemple #1
0
def blink_led(color=RED, period=1, n_blinks=3):
    for blink in range(n_blinks):
        leds.update(Leds.rgb_off())
        sleep(period / 2)
        leds.update(Leds.rgb_on(color))
        sleep(period / 2)
    leds.update(Leds.rgb_off())
Exemple #2
0
 def _run(self):
     while not self._done.is_set():
         joy_score = self._joy_score.value
         if joy_score > 0:
             self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))
         else:
             self._leds.update(Leds.rgb_off())
Exemple #3
0
def send_signal_to_pins(result0, gpio_logic):
    if 'stop' in result0:
        pinStatus(pin_A, 'LOW', gpio_logic)
        pinStatus(pin_B, 'LOW', gpio_logic)
        pinStatus(pin_C, 'LOW', gpio_logic)
        leds.update(Leds.rgb_on(RED))
    elif 'left' in result0:
        pinStatus(pin_A, 'LOW', gpio_logic)
        pinStatus(pin_B, 'LOW', gpio_logic)
        pinStatus(pin_C, 'HIGH', gpio_logic)
        leds.update(Leds.rgb_on(BLUE))
    elif 'right' in result0:
        pinStatus(pin_A, 'LOW', gpio_logic)
        pinStatus(pin_B, 'HIGH', gpio_logic)
        pinStatus(pin_C, 'LOW', gpio_logic)
        leds.update(Leds.rgb_on(PURPLE))
    elif 'slow' in result0:
        pinStatus(pin_A, 'LOW', gpio_logic)
        pinStatus(pin_B, 'HIGH', gpio_logic)
        pinStatus(pin_C, 'HIGH', gpio_logic)
        leds.update(Leds.rgb_on(GREEN))
    else:
        pinStatus(pin_A, 'HIGH', gpio_logic)
        pinStatus(pin_B, 'LOW', gpio_logic)
        pinStatus(pin_C, 'LOW', gpio_logic)
        leds.update(Leds.rgb_off())
    time.sleep(1)
Exemple #4
0
    def process(self, joy_score):
        if joy_score > 0:
            self._leds.update(
                Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))

        else:
            self._leds.update(Leds.rgb_off())
Exemple #5
0
class MultiColorLed:
    Config = namedtuple('Config', ['channels', 'pattern'])

    OFF         = Config(channels=lambda color: Leds.rgb_off(),
                         pattern=None)
    ON          = Config(channels=Leds.rgb_on,
                         pattern=None)
    BLINK       = Config(channels=Leds.rgb_pattern,
                         pattern=Pattern.blink(500))
    BLINK_3     = BLINK
    BEACON      = BLINK
    BEACON_DARK = BLINK
    DECAY       = BLINK
    PULSE_SLOW  = Config(channels=Leds.rgb_pattern,
                         pattern=Pattern.breathe(500))
    PULSE_QUICK = Config(channels=Leds.rgb_pattern,
                         pattern=Pattern.breathe(100))

    def _update(self, state, brightness):
        with self._lock:
            if state is not None:
                self._state = state
            if brightness is not None:
                self._brightness = brightness

            color = (int(255 * self._brightness), 0, 0)
            if self._state.pattern:
                self._leds.pattern = self._state.pattern
            self._leds.update(self._state.channels(color))

    def __init__(self, channel):
        self._lock = threading.Lock()
        self._brightness = 1.0  # Read and written atomically.
        self._state = self.OFF
        self._leds = Leds()

    def close(self):
        self._leds.reset()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        self.close()

    @property
    def brightness(self):
        return self._brightness

    @brightness.setter
    def brightness(self, value):
        if value < 0.0 or value > 1.0:
            raise ValueError('Brightness must be between 0.0 and 1.0.')
        self._update(state=None, brightness=value)

    def _set_state(self, state):
        self._update(state=state, brightness=None)
    state = property(None, _set_state)
Exemple #6
0
def main():
    button.when_pressed = run
    leds.update(Leds.rgb_on(WHITE))

    try:
        while True:
            pass
    except KeyboardInterrupt:
        leds.update(Leds.rgb_off())
        pass
def main():
    button.when_pressed = run
    leds.update(Leds.rgb_on(WHITE))
    manual_screen()

    while True:
        for event in pygame.event.get():
            if event.type == KEYDOWN:
                if event.key == K_ESCAPE:
                    leds.update(Leds.rgb_off())
                    sys.exit()
def run():
    if KeepWatchForSeconds(3):
        print("Going shutdown by GPIO")
        leds.update(Leds.rgb_off())
        os.system("/sbin/shutdown -h now 'Poweroff by GPIO'")

    else:
        print("Beep sound")
        toneplayer.play(*BEEP_SOUND)

        leds.update(Leds.rgb_on(RED))
        print("process")

        print("Done")
        leds.update(Leds.rgb_on(WHITE))
def run():
    if KeepWatchForSeconds(3):
        print("Going shutdown by GPIO")
        leds.update(Leds.rgb_off())
        os.system("/sbin/shutdown -h now 'Poweroff by GPIO'")

    else:
        print("Beep sound")
        toneplayer.play(*BEEP_SOUND)

        leds.update(Leds.rgb_on(RED))
        print("Taking photo")
        sh.cameraLoad()
        sh.shutter()
        sh.cameraSave()

        print("Done")
        leds.update(Leds.rgb_on(WHITE))
Exemple #10
0
def send_signal_to_servos(result0):
    if 'stop' in result0:
        tuned_servoA.value = 0.5
        tuned_servoB.value = 0.5
        leds.update(Leds.rgb_on(RED))
    elif 'left' in result0:
        tuned_servoA.min()
        tuned_servoB.min()
        leds.update(Leds.rgb_on(BLUE))
    elif 'right' in result0:
        tuned_servoA.max()
        tuned_servoB.max()
        leds.update(Leds.rgb_on(PURPLE))
    elif 'slow' in result0:
        tuned_servoA.value = 0.6
        tuned_servoB.value = 0.3
        leds.update(Leds.rgb_on(GREEN))
    else:
        tuned_servoA.max()
        tuned_servoB.min()
        leds.update(Leds.rgb_off())
    time.sleep(0.002)
Exemple #11
0
def run():
    if KeepWatchForSeconds(3):
        print("Going shutdown by GPIO")
        leds.update(Leds.rgb_off())
        os.system("/sbin/shutdown -h now 'Poweroff by GPIO'")

    else:
        print("Beep sound")
        toneplayer.play(*BEEP_SOUND)

        leds.update(Leds.rgb_on(RED))
        print("Taking photo")
        with picamera.PiCamera() as camera:
            camera.resolution = (640, 480)
            camera.start_preview()
            sleep(3.000)
            camera.capture(photo_filename)

        leds.update(Leds.rgb_on(GREEN))
        print("Dish classifier")
        with ImageInference(dish_classification.model()) as inference:
            image = Image.open(photo_filename)
            classes = dish_classification.get_classes(
                inference.run(image),
                max_num_objects=5,
                object_prob_threshold=0.1)
            dish_name = ''
            for i, (label, score) in enumerate(classes):
                dish_name += label + '/'
                print('Result %d: %s (prob=%f)' % (i, label, score))

        leds.update(Leds.rgb_on(BLUE))
        print("Post to slack")
        slack.files.upload(photo_filename,
                           channels='#food_diary',
                           title=dish_name)

        leds.update(Leds.rgb_on(WHITE))
Exemple #12
0
def main():
    with Leds() as leds:

        print('Windows Up')
        tuned_servo.min()
        #               blueLED1.blink(.2,.2) # risk of servo burning if kept
        #               blueLED2.blink(.2,.2)
        leds.pattern = Pattern.blink(500)
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('Windows Down')
        tuned_servo.max()
        interior.on()
        yellowLED.on()
        leds.pattern = Pattern.breathe(1000)
        leds.update(Leds.rgb_pattern(Color.YELLOW))

        # Fade from yellow to red
        for i in range(32):
            color = Color.blend(Color.RED, Color.YELLOW, i / 32)
            leds.update(Leds.rgb_on(color))
            time.sleep(0.1)


#               leds.update({
#                       1: Leds.Channel(Leds.Channel.PATTERN, 64),
#                       2: Leds.Channel(Leds.Channel.OFF, 128),
#                       3: Leds.Channel(Leds.Channel.ON, 128),
#                       4: Leds.Channel(Leds.Channel.PATTERN, 64),
#               })

        time.sleep(5)
        leds.update(Leds.rgb_off())
        tuned_servo.close()
        yellowLED.close()
        interior.close()
        blueLED2.close()
Exemple #13
0
def main():
    parser = argparse.ArgumentParser(
        'Image classification camera inference example.')
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--num_objects',
                        '-c',
                        type=int,
                        default=3,
                        help='Sets the number of object interences to print.')
    parser.add_argument('--nopreview',
                        dest='preview',
                        action='store_false',
                        default=True,
                        help='Enable camera preview')
    args = parser.parse_args()

    with Leds() as leds:
        with PiCamera(sensor_mode=4, framerate=30) as camera, \
             CameraPreview(camera, enabled=args.preview), \
             CameraInference(image_classification.model()) as inference:
            for result in inference.run(args.num_frames):
                classes = image_classification.get_classes(
                    result, top_k=args.num_objects)
                print(classes_info(classes))
                #print("my class: "+classes[0][0])
                if classes:
                    camera.annotate_text = '%s (%.2f)' % classes[0]
                    if "mouse" in str(classes[0][0]):
                        leds.update(Leds.rgb_on(Color.RED))
                    else:
                        leds.update(Leds.rgb_off())
Exemple #14
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--label',
        '-lbl',
        type=str,
        dest='label',
        required=True,
        help='Specifies the class (label) of training images (e.g. no_hangs).')

    parser.add_argument('--num_images',
                        '-nimg',
                        type=int,
                        dest='num_images',
                        default=10,
                        help='Sets the number of training images to make.')

    args = parser.parse_args()

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        # Stage #1: Capture and store raw images
        # Create foler to store raw images
        path_to_raw_img_folder = path_to_training_folder + 'raw/'
        if not os.path.exists(path_to_raw_img_folder):
            os.makedirs(path_to_raw_img_folder)
        time.sleep(2)

        # Create list to store hand boxes location for each image
        hand_boxes_locations = []

        with CameraInference(face_detection.model()) as inference:
            leds.update(Leds.rgb_on(RED))
            time.sleep(3)
            counter = 1
            start = time.time()

            for result in inference.run():
                faces = face_detection.get_faces(result)
                face = select_face(faces)
                if face:
                    if counter > args.num_images:
                        break
                    face_box = transform(face.bounding_box)
                    hands = hand_box(face_box)

                    # Capture raw image
                    img_name = path_to_raw_img_folder + 'img' + str(
                        counter) + '.jpg'
                    camera.capture(img_name)
                    time.sleep(0.2)

                    # Record position of hands
                    hand_boxes_locations.append([counter, hands])

                    print('Captured ', str(counter), " out of ",
                          str(args.num_images))
                    counter += 1
            print('Stage #1: It took', str(round(time.time() - start, 1)),
                  'sec to record', str(args.num_images), 'raw images')
        camera.stop_preview()

        # Stage #2: Crop training images from the raw ones and store them in class (label) subfolder
        leds.update(Leds.rgb_on(BLUE))
        start = time.time()
        for i, entry in enumerate(hand_boxes_locations):
            img_number = entry[0]
            hands = entry[1]
            raw_img_name = path_to_raw_img_folder + 'img' + str(
                img_number) + '.jpg'
            if os.path.isfile(raw_img_name):
                raw_image = Image.open(raw_img_name)
                crop_and_store_images(args.label, hands, raw_image)
                raw_image.close()
                time.sleep(0.5)
                os.remove(raw_img_name)
            print('Processed ', str(i + 1), " out of ", str(args.num_images))
        print('Stage #2: It took ', str(round(time.time() - start, 1)),
              'sec to process', str(args.num_images), 'images')
        time.sleep(3)
        # Delete empty folder for raw images
        if os.listdir(path_to_raw_img_folder) == []:
            os.rmdir(path_to_raw_img_folder)
        leds.update(Leds.rgb_off())
Exemple #15
0
 def shutdown(self):
     self._leds.update(Leds.rgb_off())
Exemple #16
0
def main():
    with Leds() as leds:
        print('RGB: Solid RED for 1 second')
        leds.update(Leds.rgb_on(Color.RED))
        time.sleep(1)

        print('RGB: Solid GREEN for 1 second')
        leds.update(Leds.rgb_on(Color.GREEN))
        time.sleep(1)

        print('RGB: Solid YELLOW for 1 second')
        leds.update(Leds.rgb_on(Color.YELLOW))
        time.sleep(1)

        print('RGB: Solid BLUE for 1 second')
        leds.update(Leds.rgb_on(Color.BLUE))
        time.sleep(1)

        print('RGB: Solid PURPLE for 1 second')
        leds.update(Leds.rgb_on(Color.PURPLE))
        time.sleep(1)

        print('RGB: Solid CYAN for 1 second')
        leds.update(Leds.rgb_on(Color.CYAN))
        time.sleep(1)

        print('RGB: Solid WHITE for 1 second')
        leds.update(Leds.rgb_on(Color.WHITE))
        time.sleep(1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=default)')
            leds.update(Leds.privacy_on())
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=5)')
            leds.update(Leds.privacy_on(5))
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        print('Set blink pattern: period=500ms (2Hz)')
        leds.pattern = Pattern.blink(500)

        print('RGB: Blink RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Blink GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Blink BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('Set breathe pattern: period=1000ms (1Hz)')
        leds.pattern = Pattern.breathe(1000)

        print('RGB: Breathe RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Breathe GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Breathe BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('RGB: Increase RED brightness for 3.2 seconds')
        for i in range(32):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Decrease RED brightness for 3.2 seconds')
        for i in reversed(range(32)):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Blend between GREEN and BLUE for 3.2 seconds')
        for i in range(32):
            color = Color.blend(Color.BLUE, Color.GREEN, i / 32)
            leds.update(Leds.rgb_on(color))
            time.sleep(0.1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        print('Privacy: On for 2 seconds')
        with PrivacyLed(leds):
            time.sleep(2)

        print('RGB: Solid GREEN for 2 seconds')
        with RgbLeds(leds, Leds.rgb_on(Color.GREEN)):
            time.sleep(2)

        print('Custom configuration for 5 seconds')
        leds.update({
            1: Leds.Channel(Leds.Channel.PATTERN, 128),  # Red channel
            2: Leds.Channel(Leds.Channel.OFF, 0),  # Green channel
            3: Leds.Channel(Leds.Channel.ON, 128),  # Blue channel
            4: Leds.Channel(Leds.Channel.PATTERN, 64),  # Privacy channel
        })
        time.sleep(5)

        print('Done')
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_path',
        required=True,
        help='Path to converted model file that can run on VisionKit.')
    parser.add_argument(
        '--label_path',
        required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument(
        '--input_height', type=int, required=True, help='Input height.')
    parser.add_argument(
        '--input_width', type=int, required=True, help='Input width.')
    parser.add_argument(
        '--input_layer', required=True, help='Name of input layer.')
    parser.add_argument(
        '--output_layer', required=True, help='Name of output layer.')
    parser.add_argument(
        '--num_frames',
        type=int,
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument(
        '--input_mean', type=float, default=128.0, help='Input mean.')
    parser.add_argument(
        '--input_std', type=float, default=128.0, help='Input std.')
    parser.add_argument(
        '--input_depth', type=int, default=3, help='Input depth.')
    parser.add_argument(
        '--threshold', type=float, default=0.6,
        help='Threshold for classification score (from output tensor).')
    parser.add_argument(
        '--preview',
        action='store_true',
        default=False,
        help='Enables camera preview in addition to printing result to terminal.')
    parser.add_argument(
        '--gpio_logic',
        default='NORMAL',
        help='Indicates if NORMAL or INVERSE logic is used in GPIO pins.')
    parser.add_argument(
        '--show_fps',
        action='store_true',
        default=False,
        help='Shows end to end FPS.')
    args = parser.parse_args()


    # Model & labels
    model = ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, args.input_height, args.input_width, args.input_depth),
        input_normalizer=(args.input_mean, args.input_std),
        compute_graph=utils.load_compute_graph(args.model_path))
    labels = read_labels(args.label_path)

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = (1640, 1232)

        # Start the camera stream.
        camera.framerate = 30
        camera.start_preview()

        while True:
            while True:
                long_buffer = []
                short_buffer = []
                pinStatus(pin_A,'LOW',args.gpio_logic)
                pinStatus(pin_B,'LOW',args.gpio_logic)
                pinStatus(pin_C,'LOW',args.gpio_logic)
                leds.update(Leds.rgb_on(GREEN))
                face_box = detect_face()
                print("Entered the loop of face classifier")
                hand_box_params = determine_hand_box_params(face_box)
                if image_boundary_check(hand_box_params):
                    print("Hand gesture identified")
                    break

            # Start hand classifier
            is_active = False
            leds.update(Leds.rgb_on(PURPLE))
            start_timer = time.time()
            with ImageInference(model) as img_inference:
                while True:
                    print("Entered the loop of gesture classifier")
                    #check_termination_trigger()
                    if is_active:
                        leds.update(Leds.rgb_on(RED))
                    hands_image = capture_hands_image(camera,hand_box_params)
                    output = classify_hand_gestures(img_inference,hands_image,model=model,labels=labels,output_layer=args.output_layer,threshold = args.threshold)

                    short_guess, num_short_guess = buffer_update(output,short_buffer,short_buffer_length)
                    long_guess, num_long_guess = buffer_update(output,long_buffer,long_buffer_length)

                    # Activation of classifier                  
                    if (long_guess == activation_index or long_guess == deactivation_index) and not is_active and num_long_guess >= (long_buffer_length - 3):
                        is_active = True
                        leds.update(Leds.rgb_on(RED))
                        send_signal_to_pins(activation_index,args.gpio_logic)
                        long_buffer = []                      
                        num_long_guess = 0                     
                        time.sleep(1)

                    # Deactivation of classifier (go back to stable face detection)                  
                    if (long_guess == activation_index or long_guess == deactivation_index) and is_active and num_long_guess >= (long_buffer_length - 3):
                        is_active = False
                        leds.update(Leds.rgb_off())
                        long_buffer = []
                        num_long_guess = 0                     
                        send_signal_to_pins(deactivation_index,args.gpio_logic)                      
                        time.sleep(1)
                        break

                    # If not activated within max_no_activity_period seconds, go back to stable face detection
                    if not is_active:
                        timer = time.time()-start_timer
                        if timer >= max_no_activity_period:
                            leds.update(Leds.rgb_off())
                            send_signal_to_pins(deactivation_index,args.gpio_logic)                      
                            time.sleep(1)
                            break
                    else:
                        start_timer = time.time()  

                        # Displaying classified hand gesture commands
                        if num_short_guess >= (short_buffer_length-1) and is_active:
                            print_hand_command(short_guess)
                            send_signal_to_pins(short_guess,args.gpio_logic)
 
        camera.stop_preview()
 def shutdown(self):
     self._leds.update(Leds.rgb_off())
Exemple #19
0
def check_termination_trigger():
    if button.is_pressed:
        print('Terinating session...')
        leds.update(Leds.rgb_off())
        time.sleep(5)
        os.system("sudo shutdown -h now") 
def main():
    global video_thread
    global audio_thread
    global tmp_dir
    global final_dir

    # Creates tmp directory if does not exist
    tmp_dir = os.path.expanduser(tmp_dir)
    if(os.path.isdir(tmp_dir) == False):
        print("Can't find tmp media directory, creating...")
        os.mkdir(tmp_dir)

    # Creates final media directory if does not exist
    final_dir = os.path.expanduser(final_dir)
    if(os.path.isdir(final_dir) == False):
        print("Can't find media directory, creating...")
        os.mkdir(final_dir)

    # Initializes threads
    video_thread = VideoRecorder(timestamp_fontcolor=colors.WHITE, timestamp_pos=(10,50)) 
    # optional params (defaults) : 
        # res_x=640, 
        # res_y=480, 
        # framerate=25, 
        # timestamp=True, 
        # timestamp_pos=(50,50),
        # timestamp_fontcolor=(255, 255, 255), 
        # timestamp_fontthickness=1, 
        # timestamp_fontsize=1, 
        # timestamp_format="%d/%m/%Y, %H:%M:%S"
        
    audio_thread = AudioRecorder(device=1) 
    # optional params (defaults) : 
        # device=0, 
        # channels=2, 
        # samplerate=44100

    # Allows time for camera to boot up
    time.sleep(2)

    # button.when_pressed = record_ten_seconds
    button.when_pressed = toggle_recording
    print("ready for action!")
    
    with Leds() as leds:
        while True:
            if recording:
                leds.update(Leds.rgb_off())
                for i in range(8):
                    leds.update(Leds.rgb_on((2 * i, 0, 0)))
                    time.sleep(0.1)
                for i in reversed(range(8)):
                    leds.update(Leds.rgb_on((2 * i, 0, 0)))
                    time.sleep(0.1)
            else:
                leds.update(Leds.rgb_off())
                for i in range(8):
                    leds.update(Leds.rgb_on((0, 2 * i, 0)))
                    time.sleep(0.1)
                for i in reversed(range(8)):
                    leds.update(Leds.rgb_on((0, 2 * i, 0)))
                    time.sleep(0.1)
Exemple #21
0
def main():
    """Face detection camera inference example."""

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    blink_led(color=BLUE, period=0.5, n_blinks=10)

    # Initialize variables for automatic shutdown
    shuttdown_flag = False
    min_frames_w_arduino_power = 10
    max_frames_w_no_arduino_power = 10
    counter_frames_w_power = 0
    counter_frames_w_no_power = 0

    focal_length = 1320  # focal length in pixels for 1640 x 1232 resolution - found by calibration
    camera_resolution = (1640, 1232)
    x_center = int(camera_resolution[0] / 2)
    real_face_width_inch = 11  # width/height of bounding box of human face in inches
    min_angle = atan2(
        -x_center, focal_length
    )  # min angle where face can be detected (leftmost area) in radians
    max_angle = atan2(
        x_center, focal_length
    )  # max angle where face can be detected (rightmost area) in radians
    min_distance = 20  # min distance to detected face in inches
    max_distance = 200  # max distance to detected face in inches
    face_detected_on_prev_frame = False  # Flag indicated if face was detected on the previous frame
    a = (0.9 - 0.2) / (
        max_distance - min_distance
    )  # Coefficient a (slope) for coding distance value from range [min_distance, max_distance] to [0.2, 0.9]
    b = 0.9 - a * max_distance  # Coefficient b (intercept) for coding distance value from range [min_distance, max_distance] to [0.2, 0.9]

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4

        # Scaled and cropped resolution. If different from sensor mode implied
        # resolution, inference results must be adjusted accordingly. This is
        # true in particular when camera.start_recording is used to record an
        # encoded h264 video stream as the Pi encoder can't encode all native
        # sensor resolutions, or a standard one like 1080p may be desired.
        camera.resolution = camera_resolution

        # Start the camera stream.
        camera.framerate = 30

        #camera.start_preview()

        # Calculate face data (x_mean, y_mean, width, height).
        def face_data(face):
            x, y, width, height = face.bounding_box
            x_mean = int(x + width / 2)
            angle = atan2(x_mean - x_center, focal_length)
            distance = 0
            if width > 0:
                distance = focal_length * real_face_width_inch / width
            return angle, distance

        with CameraInference(face_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                if i == args.num_frames:
                    break
                faces = face_detection.get_faces(result)
                face = select_face(faces)
                if face:
                    if face_detected_on_prev_frame:
                        angle, distance = face_data(face)
                        if angle < min_angle:
                            angle = min_angle
                        if angle > max_angle:
                            angle = max_angle
                        angle_to_send = (angle - min_angle) / (max_angle -
                                                               min_angle)
                        pin_A.value = 0.8 * angle_to_send + 0.1
                        if distance < min_distance:
                            distance = min_distance
                        if distance > max_distance:
                            distance = max_distance
                        pin_B.value = a * distance + b
                        leds.update(Leds.rgb_on(GREEN))
                    face_detected_on_prev_frame = True
                else:
                    if not face_detected_on_prev_frame:
                        pin_A.value = 0.5
                        pin_B.value = 0.1
                        leds.update(Leds.rgb_off())
                    face_detected_on_prev_frame = False
                clock = i % 80 + 11
                pin_C.value = clock / 100
                print('Iteration #', str(i), ' A=', str(pin_A.value), ' B=',
                      str(pin_B.value), ' C=', str(pin_C.value), ' D=',
                      str(pin_D.value))
                print('face_detected_on_prev_frame = ',
                      face_detected_on_prev_frame)

                if pin_D.is_pressed:
                    counter_frames_w_no_power += 1
                else:
                    counter_frames_w_power += 1
                print('released, shuttdown_flag = ', shuttdown_flag,
                      ' frames with power=', str(counter_frames_w_power))
                print('pressed,  shuttdown_flag = ', shuttdown_flag,
                      ' frames with no power=', str(counter_frames_w_no_power))
                if counter_frames_w_power >= min_frames_w_arduino_power and not shuttdown_flag:
                    shuttdown_flag = True
                    counter_frames_w_no_power = 0
                if counter_frames_w_no_power >= max_frames_w_no_arduino_power and shuttdown_flag:
                    shut_aiy_kit_down()
                sleep(0.1)
 def process(self, joy_score):
     if joy_score > 0:
         self._leds.update(Leds.rgb_on(Color.blend(JOY_COLOR, SAD_COLOR, joy_score)))
     else:
         self._leds.update(Leds.rgb_off())
Exemple #23
0
def main():
    with Leds() as leds:
        print('RGB: Solid RED for 1 second')
        leds.update(Leds.rgb_on(Color.RED))
        time.sleep(1)

        print('RGB: Solid GREEN for 1 second')
        leds.update(Leds.rgb_on(Color.GREEN))
        time.sleep(1)

        print('RGB: Solid YELLOW for 1 second')
        leds.update(Leds.rgb_on(Color.YELLOW))
        time.sleep(1)

        print('RGB: Solid BLUE for 1 second')
        leds.update(Leds.rgb_on(Color.BLUE))
        time.sleep(1)

        print('RGB: Solid PURPLE for 1 second')
        leds.update(Leds.rgb_on(Color.PURPLE))
        time.sleep(1)

        print('RGB: Solid CYAN for 1 second')
        leds.update(Leds.rgb_on(Color.CYAN))
        time.sleep(1)

        print('RGB: Solid WHITE for 1 second')
        leds.update(Leds.rgb_on(Color.WHITE))
        time.sleep(1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=default)')
            leds.update(Leds.privacy_on())
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=5)')
            leds.update(Leds.privacy_on(5))
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        print('Set blink pattern: period=500ms (2Hz)')
        leds.pattern = Pattern.blink(500)

        print('RGB: Blink RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Blink GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Blink BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('Set breathe pattern: period=1000ms (1Hz)')
        leds.pattern = Pattern.breathe(1000)

        print('RGB: Breathe RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Breathe GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Breathe BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('RGB: Increase RED brightness for 3.2 seconds')
        for i in range(32):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Decrease RED brightness for 3.2 seconds')
        for i in reversed(range(32)):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Blend between GREEN and BLUE for 3.2 seconds')
        for i in range(32):
            color = Color.blend(Color.BLUE, Color.GREEN, i / 32)
            leds.update(Leds.rgb_on(color))
            time.sleep(0.1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        print('Privacy: On for 2 seconds')
        with PrivacyLed(leds):
            time.sleep(2)

        print('RGB: Solid GREEN for 2 seconds')
        with RgbLeds(leds, Leds.rgb_on(Color.GREEN)):
            time.sleep(2)

        print('Custom configuration for 5 seconds')
        leds.update({
            1: Leds.Channel(Leds.Channel.PATTERN, 128),  # Red channel
            2: Leds.Channel(Leds.Channel.OFF, 0),        # Green channel
            3: Leds.Channel(Leds.Channel.ON, 128),       # Blue channel
            4: Leds.Channel(Leds.Channel.PATTERN, 64),   # Privacy channel
        })
        time.sleep(5)

        print('Done')
Exemple #24
0
    def main_loop(self):
        while True:
            with Leds() as leds:
                leds.update(Leds.rgb_on(Color.RED))

                with Board() as board:
                    print("Waiting for input")
                    board.button.wait_for_press()
                    leds.update(Leds.rgb_on((0, 0, 250)))
                    #print('ON')
                    self.start = True
                    self.counter = 0
                    self.completed = False
                    self.stopwatch = time.time()
                    board.button.wait_for_release()
                    #print('OFF')
                    leds.update(Leds.rgb_off())

                while self.start:

                    classes = currentState
                    #print("current State: ", classes)
                    if classes == 0 and self.state != 0:
                        self.standing()
                    elif classes == 1 and self.state != 1:
                        self.empty()
                    elif classes == 2 and self.state != 2 and self.last_detected_state != 2:
                        self.squat()

                    # Selecting a State
                    if (time.time()-self.stopwatch) > 0.15:
                        print("State:\t ",states_names[self.state] , "\t| [selected]")

                        if self.state == 2 and self.last_detected_state != 2:  # Squat detected
                            self.counter += 1
                            leds.update(Leds.rgb_on((0, 0, 250)))
                            self._newSqaut()
                            #print("###  Current Score: ", self.counter,"###")

                        if self.state == 2 or self.state == 0:
                            #self.stopwatch = time.time()
                            leds.update(Leds.rgb_on(Color.WHITE))

                        if self.state == 1 and ((time.time()-self.stopwatch) > 1):
                            leds.update(Leds.rgb_off())
                        
                        self.last_detected_state = self.state
                        
                    # Resting the counter if nobody is in the frame
                    if (time.time()-self.stopwatch) > 10:
                        if self.state == 1:  # if nobody is in the frame reset counter
                            print("###  Reset Score   ###")
                            self.counter = 0
                            self.start = False
                        
                            leds.pattern = Pattern.blink(500)
                            leds.update(Leds.rgb_pattern(Color.RED))
                            time.sleep(2)
                            leds.update(Leds.rgb_off())
                            self.stopwatch = time.time()

                    # Checking of the finish
                    if self.counter >= TOTAL_SQUATS:
                        self.completed = True
                        self.output.on()
                        self.counter = 0

                        print("Completed Workout")
                        self.start = False
                        
                        leds.pattern = Pattern.blink(500)
                        leds.update(Leds.rgb_pattern(Color.GREEN))
                        time.sleep(2)
                        leds.update(Leds.rgb_on(Color.GREEN))

                        with Board() as board:
                            print("Waiting for input")
                            board.button.wait_for_press()
                            print('ON')
                            board.led.state = Led.ON
                            self.start = False
                            self.counter = 0
                            self.completed = False
                            self.stopwatch = time.time()
                            board.button.wait_for_release()
                            print('OFF')

                            self.output.off()
                            board.led.state = Led.OFF
                            leds.pattern = Pattern.blink(500)
                            leds.update(Leds.rgb_pattern(Color.RED))
                            time.sleep(2)
                            leds.update(Leds.rgb_off())
time.sleep(1)

print('RGB: Solid PURPLE for 1 second')
leds.update(Leds.rgb_on(PURPLE))
time.sleep(1)

print('RGB: Solid CYAN for 1 second')
leds.update(Leds.rgb_on(CYAN))
time.sleep(1)

print('RGB: Solid WHITE for 1 second')
leds.update(Leds.rgb_on(WHITE))
time.sleep(1)

print('RGB: Off for 1 second')
leds.update(Leds.rgb_off())
time.sleep(1)

for _ in range(3):
    print('Privacy: On (brightness=default)')
    leds.update(Leds.privacy_on())
    time.sleep(1)
    print('Privacy: Off')
    leds.update(Leds.privacy_off())
    time.sleep(1)

for _ in range(3):
    print('Privacy: On (brightness=5)')
    leds.update(Leds.privacy_on(5))
    time.sleep(1)
    print('Privacy: Off')
def blink_led(color=RED,period=1,num_blinks=5):
   for _ in range(num_blinks):
       leds.update(Leds.rgb_on(color))
       time.sleep(period/2)
       leds.update(Leds.rgb_off())
       time.sleep(period/2)