コード例 #1
0
ファイル: joy_fire.py プロジェクト: ritwikbiswas/argos
class Player(Service):
    """Controls buzzer."""
    def __init__(self, gpio, bpm):
        super().__init__()
        self._toneplayer = TonePlayer(gpio, bpm)

    def process(self, sound):
        self._toneplayer.play(*sound)

    def play(self, sound):
        self.submit(sound)
コード例 #2
0
class Player(Service):
    """Controls buzzer."""

    def __init__(self, gpio, bpm):
        super().__init__()
        self._toneplayer = TonePlayer(gpio, bpm)

    def process(self, sound):
        self._toneplayer.play(*sound)

    def play(self, sound):
        self.submit(sound)
コード例 #3
0
ファイル: oseimeter.py プロジェクト: llealgt/AIY_experiments
class Player(Service):
    """Controls buzzer."""

    def __init__(self, gpio, bpm):
        super().__init__()
        self._toneplayer = TonePlayer(gpio, bpm)
        self.JOY_SOUND = ('C5q', 'E5q', 'C6q')
        self.SAD_SOUND = ('C6q', 'E5q', 'C5q')
        self.MODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w')
        self.BEEP_SOUND = ('E6q', 'C6q')


    def process(self, sound):
        self._toneplayer.play(*sound)

    def play(self, sound):
        self.submit(sound)
コード例 #4
0
def main():
    print("Play tune")
    player = TonePlayer(gpio=BUZZER_GPIO_PIN, bpm=10)
    player.play(*START_SOUND)

    print("Initialize robot")
    robot = Robot()
    robot.resetPosition()

    print("Switch on leds")
    with Leds() as leds:
        leds.update(Leds.rgb_on(Color.GREEN))

        print("Switch on camera")
        with PiCamera(sensor_mode=4,
                      resolution=(CAMERA_WIDTH, CAMERA_HEIGHT),
                      framerate=30) as camera:

            if ENABLE_DISPLAY:
                camera.start_preview()
                annotator = Annotator(camera, dimensions=(320, 240))
            else:
                annotator = None

            print("Load model")
            with CameraInference(face_detection.model()) as inference:

                loop(inference=inference,
                     robot=robot,
                     annotator=annotator,
                     leds=leds)

            if ENABLE_DISPLAY:
                camera.stop_preview()

    player.play(*STOP_SOUND)

    # Give time for the user to remote its finger.
    sleep(3)
    robot.resetPosition()
コード例 #5
0
def main():

    def face_data(face):
        x, y, width, height = face.bounding_box
        x_mean = int(x + width/2)
        angle = atan2(x_mean - x_center,focal_length)
        distance = 0
        if width > 0:
            distance = focal_length * real_face_width_inch / width
        return angle, distance

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    focal_length = 1320                             # focal length in pixels for 1640 x 1232 resolution - found by calibration
    camera_resolution = (1640, 1232)
    x_center = int(camera_resolution[0] / 2)
    real_face_width_inch = 11                       # width/height of bounding box of human face in inches
    min_angle = atan2(-x_center,focal_length)       # min angle where face can be detected (leftmost area) in radians
    max_angle = atan2(x_center,focal_length)
    face_detected_on_prev_frame = False
    previous_angle = 0

    LOAD_SOUND = ('G5e', 'f5e', 'd5e', 'A5e', 'g5e', 'E5e', 'g5e', 'C6e')
    BUZZER_GPIO = 22


    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera,\
                        Leds() as leds:
        leds.update(Leds.privacy_on())
        myCorrectionMin=0.2
        myCorrectionMax=0.2
        maxPW=(2.0+myCorrectionMax)/1000
        minPW=(1.0-myCorrectionMin)/1000
        camera.start_preview()
        tone_player = TonePlayer(BUZZER_GPIO, bpm=70)
        tone_player.play(*LOAD_SOUND)
        #servo = AngularServo(PIN_A, min_pulse_width=minPW, max_pulse_width=maxPW)
        servo = AngularServo(PIN_A, max_pulse_width = maxPW)
        #servo = AngularServo(PIN_A)



        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                if i == args.num_frames:
                    break
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box), fill=0)
                annotator.update()
                print('Iteration #%d: num_faces=%d' % (i, len(faces)))

                if faces:
                    previous_angle = 0
                    leds.update(Leds.rgb_on(Color.BLUE))
                    if face_detected_on_prev_frame:
                        angle, distance = face_data(face)
                        #if angle < min_angle:
                        #    angle = min_angle
                        #if angle > max_angle:
                        #    angle = max_angle
                        servo.angle = angle*(-100)
                        previous_angle = angle*(-100)
                        print('Angle:' + str(angle))
                        sleep(.05)
                    face_detected_on_prev_frame = True
                else:
                    leds.update(Leds.rgb_on(Color.RED))
                    if not face_detected_on_prev_frame:
                        servo.angle = previous_angle
                        sleep(.05)
                        pass
                    face_detected_on_prev_frame = False
        camera.stop_preview()
コード例 #6
0
def main():
    model_path = '/opt/aiy/models/retrained_graph.binaryproto'
    #model_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet.binaryproto'
    label_path = '/opt/aiy/models/retrained_labels_new.txt'
    #label_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet_labels.txt'
    model_path = '/opt/aiy/models/rg_v3_new.binaryproto'
    label_path = '/opt/aiy/models/retrained_labels_new.txt'
    input_height = 160
    input_width = 160
    input_layer = 'input'
    output_layer = 'final_result'
    threshold = 0.8
    # Model & labels
    model = ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, input_height, input_width, 3),
        input_normalizer=(128.0, 128.0),
        compute_graph=utils.load_compute_graph(model_path))
    labels = read_labels(label_path)
    new_labels = []
    for eachLabel in labels:
        if len(eachLabel)>1:
            new_labels.append(eachLabel)
    labels = new_labels
    #print(labels)
    s = xmlrpc.client.ServerProxy("http://aiy.mdzz.info:8000/")
    player = TonePlayer(BUZZER_GPIO, 10)
    player.play(*MODEL_LOAD_SOUND)
    while True:
        while True:
            if s.camera() == 1:
                print('vision kit is woken up')
                with Leds() as leds:
                    leds.pattern = Pattern.blink(100)
                    leds.update(Leds.rgb_pattern(Color.RED))
                    time.sleep(2.0)
                start_time = round(time.time())
                break
            time.sleep(0.2)
            print('no signal, sleeping...')

        with PiCamera() as camera:
            # Configure camera
            camera.sensor_mode = 4
            camera.resolution = (1664, 1232)  # Full Frame, 16:9 (Camera v2)
            camera.framerate = 30
            camera.start_preview()
            while True:
                # Do inference on VisionBonnet
                #print('Start capturing')
                with CameraInference(face_detection.model()) as inference:
                    for result in inference.run():
                        #print(type(result))
                        faces = face_detection.get_faces(result)
                        if len(faces) >= 1:
                            #print('camera captures...')
                            extension = '.jpg'
                            filename = time.strftime('%Y-%m-%d %H:%M:%S') + extension
                            camera.capture(filename)
                            image_npp = np.empty((1664 * 1232 * 3,), dtype=np.uint8)
                            camera.capture(image_npp, 'rgb')
                            image_npp = image_npp.reshape((1232, 1664, 3))
                            image_npp = image_npp[:1232, :1640, :]
                            # image = Image.open('jj.jpg')
                            # draw = ImageDraw.Draw(image)
                            faces_data = []
                            faces_cropped = []
                            for i, face in enumerate(faces):
                                # print('Face #%d: %s' % (i, face))
                                x, y, w, h = face.bounding_box
                                #print(x,y,w,h)
                                w_rm = int(0.3 * w / 2)
                                face_cropped = crop_np((x, y, w, h), w_rm, image_npp)
                                if face_cropped is None: continue #print('face_cropped None'); continue
                                # faces_data.append(image[y: y + h, x + w_rm: x + w - w_rm])
                                # image[y: y + h, x + w_rm: x + w - w_rm].save('1.jpg')
                                face_cropped.save('face_cropped_'+str(i)+'.jpg')
                                faces_cropped.append(face_cropped)
                                #break
                            break
                        # else:
                        #     tt = round(time.time()) - start_time
                        #     if tt > 10:
                        #         break
                    #print('face cutting finishes')

                #print(type(faces_cropped), len(faces_cropped))
                player.play(*BEEP_SOUND)
                flag = 0
                for eachFace in faces_cropped:
                    #print(type(eachFace))
                    if eachFace is None: flag = 1
                if (len(faces_cropped)) <= 0: flag = 1
                if flag == 1: continue
                with ImageInference(model) as img_inference:
                #with CameraInference(model) as img_inference:
                    print('Entering classify_hand_gestures()')
                    output = classify_hand_gestures(img_inference, faces_cropped, model=model, labels=labels,
                                                    output_layer=output_layer, threshold=threshold)
                #print(output)
                if (output == 3):
                    player.play(*JOY_SOUND)
                    print('Yani face detected')
                    print(s.result("Owner", filename))
                else:
                    player.play(*SAD_SOUND)
                    print('Suspicious face detected')
                    print(s.result("Unknown Face", filename))
                upload(filename)
                # Stop preview #
                #break
                while (s.camera()==0):
                    print('sleeping')
                    time.sleep(.2)
                print('Waken up')
コード例 #7
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera,\
                        Leds() as leds:
        leds.update(Leds.privacy_on())
        leds.update(Leds.rgb_on(Color.BLUE))
        camera.start_preview()
        tone_player = TonePlayer(BUZZER_GPIO, bpm=70)
        tone_player.play(*LOAD_SOUND)

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(face_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                faces = face_detection.get_faces(result)
                annotator.clear()
                for face in faces:
                    annotator.bounding_box(transform(face.bounding_box),
                                           fill=0)
                    x, y, width, height = face.bounding_box

                annotator.update()

                if len(faces) >= 1:
                    print(
                        '#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f, x=%.2f, y=%.2f, width=%.2f, height=%.2f'
                        % (inference.count, inference.rate, len(faces),
                           avg_joy_score(faces), x, y, width, height))
                    if x > 0:
                        alpha = x / float(1200)
                    else:
                        alpha = .5
                    try:
                        leds.update(
                            Leds.rgb_on(
                                Color.blend(LEFT_COLOR, Color.GREEN, alpha)))
                    except:
                        pass
                    distance = focal_length * real_face_width_inches / width
                    camera.annotate_text = '%d inches' % distance
                else:
                    pass

        camera.stop_preview()