Example #1
0
 def __init__(self, gpio, bpm):
     super().__init__()
     self._toneplayer = TonePlayer(gpio, bpm)
     self.JOY_SOUND = ('C5q', 'E5q', 'C6q')
     self.SAD_SOUND = ('C6q', 'E5q', 'C5q')
     self.MODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w')
     self.BEEP_SOUND = ('E6q', 'C6q')
Example #2
0
class Player(Service):
    """Controls buzzer."""
    def __init__(self, gpio, bpm):
        super().__init__()
        self._toneplayer = TonePlayer(gpio, bpm)

    def process(self, sound):
        self._toneplayer.play(*sound)

    def play(self, sound):
        self.submit(sound)
Example #3
0
class Player(Service):
    """Controls buzzer."""

    def __init__(self, gpio, bpm):
        super().__init__()
        self._toneplayer = TonePlayer(gpio, bpm)

    def process(self, sound):
        self._toneplayer.play(*sound)

    def play(self, sound):
        self.submit(sound)
Example #4
0
class Player(Service):
    """Controls buzzer."""

    def __init__(self, gpio, bpm):
        super().__init__()
        self._toneplayer = TonePlayer(gpio, bpm)
        self.JOY_SOUND = ('C5q', 'E5q', 'C6q')
        self.SAD_SOUND = ('C6q', 'E5q', 'C5q')
        self.MODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w')
        self.BEEP_SOUND = ('E6q', 'C6q')


    def process(self, sound):
        self._toneplayer.play(*sound)

    def play(self, sound):
        self.submit(sound)
Example #5
0
 def startup(self):
     with Board() as board, Leds() as leds:
         colors = [Color.RED, Color.YELLOW, Color.GREEN, Color.CYAN,
                   Color.BLUE, Color.PURPLE, Color.BLACK, Color.WHITE]
         board.led.state = Led.ON
         for color in colors:
             leds.update(Leds.rgb_on(color))
             time.sleep(0.25)
         TonePlayer(22).play(*jingleBells(6))
         board.led.state = Led.OFF
Example #6
0
 def __init__(self, num_frames, preview_alpha):
     self._rgbled = RGBLED(debug=False)
     self._num_frames = num_frames
     self._preview_alpha = preview_alpha
     self._toneplayer = TonePlayer(22, bpm=10)
     self._sound_played = False
     self._detector = threading.Thread(target=self._run_detector)
     self._animator = threading.Thread(target=self._run_animator)
     self._joy_score_lock = threading.Lock()
     self._joy_score = 0.0
     self._joy_score_window = collections.deque(maxlen=WINDOW_SIZE)
     self._run_event = threading.Event()
     signal.signal(signal.SIGINT, lambda signal, frame: self.stop())
     signal.signal(signal.SIGTERM, lambda signal, frame: self.stop())
def startup():
    with Board() as board, Leds() as leds:
        colors = [Color.RED, Color.YELLOW, Color.GREEN, Color.CYAN,
                  Color.BLUE, Color.PURPLE, Color.BLACK, Color.WHITE]
        board.led.state = Led.ON
        for color in colors:
            leds.update(Leds.rgb_on(color))
            time.sleep(0.5)
        TonePlayer(22).play(*[
            'Be',
            'rs',
            'C5e',
            'rs',
            'D5e',
        ])
        board.led.state = Led.OFF
Example #8
0
    def menu(self):
        print('Press Arcade Button to begin photo shoot.' + '\n')
        with Board() as board, Leds() as leds:
            while True:
                # pulse LED to indicate ready state
                leds.pattern = Pattern.blink(1000)
                leds.update(Leds.rgb_pattern(Color.WHITE))
                board.button.wait_for_press()
                startTime = datetime.datetime.now()
                board.led.state = Led.ON
                print('LED is on...')
                # update LED to green indicating shoot is live
                leds.update(Leds.rgb_on((107, 255, 0)))
                self.shoot()
                leds.pattern = Pattern.blink(1000)
                leds.update(Leds.rgb_pattern(Color.WHITE))
                print('Press Arcade Button to start again' + '\n' + 'OR....' +
                      '\n' + 'Press and HOLD the Arcade Button for 5 seconds to quit')
                board.button.wait_for_press()
                pressTime = datetime.datetime.now()
                board.button.wait_for_release()
                releaseTime = datetime.datetime.now()
                board.led.state = Led.OFF
                print('OFF')

                pressDuration = releaseTime - pressTime
                sessionDuration = releaseTime - startTime
                if pressDuration.seconds >= 5:
                    leds.update(Leds.rgb_on(Color.PURPLE))
                    print('Photo booth session ran for ' +
                          str(sessionDuration.seconds) + ' seconds')
                    time.sleep(3)
                    TonePlayer(22).play(*[
                        'D5e',
                        'rq',
                        'C5e',
                        'rq',
                        'Be',
                        'rq',
                        'Be',
                        'C5e',
                        'D5e'
                    ])
                    break
                print('Done')
def main():
    startup()
    print('Press Button start. Press Button to stop camera.'
          + 'Press Button again (or press Ctrl-C) to quit.')

    pressDuration = 0

    with Board() as board, Leds() as leds:
        while True:
            board.button.wait_for_press()
            pressTime = datetime.datetime.now()
            board.led.state = Led.ON
            print('ON')
            print('Running facedetect')
            facedetect()

            leds.update(Leds.rgb_on((107, 255, 0)))

            board.button.wait_for_release()
            releaseTime = datetime.datetime.now()
            board.led.state = Led.OFF
            print('OFF')

            pressDuration = releaseTime - pressTime
            print('Program ran for ' + str(pressDuration.seconds) + ' seconds')
            if pressDuration.seconds >= 5:
                leds.update(Leds.rgb_on(Color.PURPLE))
                time.sleep(3)
                TonePlayer(22).play(*[
                    'D5e',
                    'rq',
                    'C5e',
                    'rq',
                    'Be',
                    'rq',
                    'Be',
                    'C5e',
                    'D5e'
                ])
                break
            print('Done')
Example #10
0
def main():
    print("Play tune")
    player = TonePlayer(gpio=BUZZER_GPIO_PIN, bpm=10)
    player.play(*START_SOUND)

    print("Initialize robot")
    robot = Robot()
    robot.resetPosition()

    print("Switch on leds")
    with Leds() as leds:
        leds.update(Leds.rgb_on(Color.GREEN))

        print("Switch on camera")
        with PiCamera(sensor_mode=4,
                      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                      framerate=30) as camera:

            if ENABLE_DISPLAY:
                camera.start_preview()
                annotator = Annotator(camera, dimensions=(320, 240))
            else:
                annotator = None

            print("Load model")
            with CameraInference(face_detection.model()) as inference:

                loop(inference=inference,
                     robot=robot,
                     annotator=annotator,
                     leds=leds)

            if ENABLE_DISPLAY:
                camera.stop_preview()

    player.play(*STOP_SOUND)

    # Give time for the user to remote its finger.
    sleep(3)
    robot.resetPosition()
Example #11
0
 def __init__(self, gpio, bpm):
     super().__init__()
     self._toneplayer = TonePlayer(gpio, bpm)
Example #12
0
from picamera import PiCamera
from time import time, strftime

from aiy.vision.leds import Leds
from aiy.vision.leds import PrivacyLed
from aiy.toneplayer import TonePlayer

from aiy.vision.inference import CameraInference
from aiy.vision.annotator import Annotator
import pikachu_object_detection

# Sound setup
MODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w')
BEEP_SOUND = ('E6q', 'C6q')
player = TonePlayer(gpio=22, bpm=30)


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-f',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')

    parser.add_argument(
        '--num_pics',
        '-p',
Example #13
0
from aiy.vision.models import dish_classification
from aiy.toneplayer import TonePlayer
from aiy.leds import Leds

from gpiozero import Button
from time import sleep
import picamera
from slacker import Slacker
import time
import os

BUZZER_GPIO = 22
BUTTON_GPIO = 23

slack = Slacker('xxx')
toneplayer = TonePlayer(BUZZER_GPIO)
leds = Leds()
button = Button(BUTTON_GPIO)

photo_filename = 'dish.jpg'
BEEP_SOUND = ('E6q', 'C6q')
RED = (0xFF, 0x00, 0x00)
GREEN = (0x00, 0xFF, 0x00)
BLUE = (0x00, 0x00, 0xFF)
WHITE = (0xFF, 0xFF, 0xFF)


def KeepWatchForSeconds(seconds):
    GoFlag = True
    while seconds > 0:
        time.sleep(0.1)
Example #14
0
def main():

    def face_data(face):
        x, y, width, height = face.bounding_box
        x_mean = int(x + width/2)
        angle = atan2(x_mean - x_center,focal_length)
        distance = 0
        if width > 0:
            distance = focal_length * real_face_width_inch / width
        return angle, distance

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    focal_length = 1320                             # focal length in pixels for 1640 x 1232 resolution - found by calibration
    camera_resolution = (1640, 1232)
    x_center = int(camera_resolution[0] / 2)
    real_face_width_inch = 11                       # width/height of bounding box of human face in inches
    min_angle = atan2(-x_center,focal_length)       # min angle where face can be detected (leftmost area) in radians
    max_angle = atan2(x_center,focal_length)
    face_detected_on_prev_frame = False
    previous_angle = 0

    LOAD_SOUND = ('G5e', 'f5e', 'd5e', 'A5e', 'g5e', 'E5e', 'g5e', 'C6e')
    BUZZER_GPIO = 22


    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera,\
                        Leds() as leds:
        leds.update(Leds.privacy_on())
        myCorrectionMin=0.2
        myCorrectionMax=0.2
        maxPW=(2.0+myCorrectionMax)/1000
        minPW=(1.0-myCorrectionMin)/1000
        camera.start_preview()
        tone_player = TonePlayer(BUZZER_GPIO, bpm=70)
        tone_player.play(*LOAD_SOUND)
        #servo = AngularServo(PIN_A, min_pulse_width=minPW, max_pulse_width=maxPW)
        servo = AngularServo(PIN_A, max_pulse_width = maxPW)
        #servo = AngularServo(PIN_A)



        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                if i == args.num_frames:
                    break
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box), fill=0)
                annotator.update()
                print('Iteration #%d: num_faces=%d' % (i, len(faces)))

                if faces:
                    previous_angle = 0
                    leds.update(Leds.rgb_on(Color.BLUE))
                    if face_detected_on_prev_frame:
                        angle, distance = face_data(face)
                        #if angle < min_angle:
                        #    angle = min_angle
                        #if angle > max_angle:
                        #    angle = max_angle
                        servo.angle = angle*(-100)
                        previous_angle = angle*(-100)
                        print('Angle:' + str(angle))
                        sleep(.05)
                    face_detected_on_prev_frame = True
                else:
                    leds.update(Leds.rgb_on(Color.RED))
                    if not face_detected_on_prev_frame:
                        servo.angle = previous_angle
                        sleep(.05)
                        pass
                    face_detected_on_prev_frame = False
        camera.stop_preview()
Example #15
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera,\
                        Leds() as leds:
        leds.update(Leds.privacy_on())
        leds.update(Leds.rgb_on(Color.BLUE))
        camera.start_preview()
        tone_player = TonePlayer(BUZZER_GPIO, bpm=70)
        #tone_player.play(*LOAD_SOUND)

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                    x, y, width, height = face.bounding_box

                annotator.update()

                if len(faces) >= 1:
                    print(
                        '#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f, x=%.2f, y=%.2f, width=%.2f, height=%.2f'
                        % (inference.count, inference.rate, len(faces),
                           avg_joy_score(faces), x, y, width, height))
                    distance = focal_length * real_face_width_inches / width

                    if x > 0:
                        alpha = x / float(1200)
                        brightness = 254 - (distance * 2)
                    else:
                        alpha = .5
                        brightness = 254
                    try:
                        leds.update(
                            Leds.rgb_on(
                                Color.blend(Color.BLUE, Color.RED, alpha)))
                        b.set_light(2, 'bri', brightness)
                    except:
                        pass
                    camera.annotate_text = '%d inches' % distance
                else:
                    pass

        camera.stop_preview()
Example #16
0
def main():
    model_path = '/opt/aiy/models/retrained_graph.binaryproto'
    #model_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet.binaryproto'
    label_path = '/opt/aiy/models/retrained_labels_new.txt'
    #label_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet_labels.txt'
    model_path = '/opt/aiy/models/rg_v3_new.binaryproto'
    label_path = '/opt/aiy/models/retrained_labels_new.txt'
    input_height = 160
    input_width = 160
    input_layer = 'input'
    output_layer = 'final_result'
    threshold = 0.8
    # Model & labels
    model = ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, input_height, input_width, 3),
        input_normalizer=(128.0, 128.0),
        compute_graph=utils.load_compute_graph(model_path))
    labels = read_labels(label_path)
    new_labels = []
    for eachLabel in labels:
        if len(eachLabel)>1:
            new_labels.append(eachLabel)
    labels = new_labels
    #print(labels)
    s = xmlrpc.client.ServerProxy("http://aiy.mdzz.info:8000/")
    player = TonePlayer(BUZZER_GPIO, 10)
    player.play(*MODEL_LOAD_SOUND)
    while True:
        while True:
            if s.camera() == 1:
                print('vision kit is woken up')
                with Leds() as leds:
                    leds.pattern = Pattern.blink(100)
                    leds.update(Leds.rgb_pattern(Color.RED))
                    time.sleep(2.0)
                start_time = round(time.time())
                break
            time.sleep(0.2)
            print('no signal, sleeping...')

        with PiCamera() as camera:
            # Configure camera
            camera.sensor_mode = 4
            camera.resolution = (1664, 1232)  # Full Frame, 16:9 (Camera v2)
            camera.framerate = 30
            camera.start_preview()
            while True:
                # Do inference on VisionBonnet
                #print('Start capturing')
                with CameraInference(face_detection.model()) as inference:
                    for result in inference.run():
                        #print(type(result))
                        faces = face_detection.get_faces(result)
                        if len(faces) >= 1:
                            #print('camera captures...')
                            extension = '.jpg'
                            filename = time.strftime('%Y-%m-%d %H:%M:%S') + extension
                            camera.capture(filename)
                            image_npp = np.empty((1664 * 1232 * 3,), dtype=np.uint8)
                            camera.capture(image_npp, 'rgb')
                            image_npp = image_npp.reshape((1232, 1664, 3))
                            image_npp = image_npp[:1232, :1640, :]
                            # image = Image.open('jj.jpg')
                            # draw = ImageDraw.Draw(image)
                            faces_data = []
                            faces_cropped = []
                            for i, face in enumerate(faces):
                                # print('Face #%d: %s' % (i, face))
                                x, y, w, h = face.bounding_box
                                #print(x,y,w,h)
                                w_rm = int(0.3 * w / 2)
                                face_cropped = crop_np((x, y, w, h), w_rm, image_npp)
                                if face_cropped is None: continue #print('face_cropped None'); continue
                                # faces_data.append(image[y: y + h, x + w_rm: x + w - w_rm])
                                # image[y: y + h, x + w_rm: x + w - w_rm].save('1.jpg')
                                face_cropped.save('face_cropped_'+str(i)+'.jpg')
                                faces_cropped.append(face_cropped)
                                #break
                            break
                        # else:
                        #     tt = round(time.time()) - start_time
                        #     if tt > 10:
                        #         break
                    #print('face cutting finishes')

                #print(type(faces_cropped), len(faces_cropped))
                player.play(*BEEP_SOUND)
                flag = 0
                for eachFace in faces_cropped:
                    #print(type(eachFace))
                    if eachFace is None: flag = 1
                if (len(faces_cropped)) <= 0: flag = 1
                if flag == 1: continue
                with ImageInference(model) as img_inference:
                #with CameraInference(model) as img_inference:
                    print('Entering classify_hand_gestures()')
                    output = classify_hand_gestures(img_inference, faces_cropped, model=model, labels=labels,
                                                    output_layer=output_layer, threshold=threshold)
                #print(output)
                if (output == 3):
                    player.play(*JOY_SOUND)
                    print('Yani face detected')
                    print(s.result("Owner", filename))
                else:
                    player.play(*SAD_SOUND)
                    print('Suspicious face detected')
                    print(s.result("Unknown Face", filename))
                upload(filename)
                # Stop preview #
                #break
                while (s.camera()==0):
                    print('sleeping')
                    time.sleep(.2)
                print('Waken up')
Example #17
0
 def __init__(self, gpio, bpm):
     super().__init__()
     self._toneplayer = TonePlayer(gpio, bpm)