コード例 #1
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--input', '-i', dest='input', required=True,
                        help='Input image file.')
    parser.add_argument('--output', '-o', dest='output',
                        help='Output image file with bounding boxes.')
    parser.add_argument('--sparse', '-s', action='store_true', default=False,
                        help='Use sparse tensors.')
    parser.add_argument('--threshold', '-t', type=float, default=0.3,
                        help='Detection probability threshold.')
    args = parser.parse_args()

    with ImageInference(object_detection.model()) as inference:
        image = Image.open(args.input)
        image_center, offset = crop_center(image)

        if args.sparse:
            result = inference.run(image_center,
                                   sparse_configs=object_detection.sparse_configs(args.threshold))
            objects = object_detection.get_objects_sparse(result, offset)
        else:
            result = inference.run(image_center)
            objects = object_detection.get_objects(result, args.threshold, offset)

        for i, obj in enumerate(objects):
            print('Object #%d: %s' % (i, obj))

        if args.output:
            draw = ImageDraw.Draw(image)
            for i, obj in enumerate(objects):
                x, y, width, height = obj.bounding_box
                draw.rectangle((x, y, x + width, y + height), outline='red')
            image.save(args.output)
コード例 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
        camera.start_preview()
        last_time = datetime.now()
        with CameraInference(object_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                objects = object_detection.get_objects(result)
                #print('#%05d (%5.2f fps): num_objects=%d, objects=%s' %
                #       (inference.count, inference.rate, len(objects), objects))
                if len(objects) > 0:
                    print(f"num_objects={len(objects)}, objects={[objectLabel(obj.kind) for obj in objects]}")
                    if hasPerson(objects):
                        diff_time = datetime.now() - last_time
                        if diff_time.seconds > 3: 
                            print(diff_time)
                            last_time = datetime.now()
                            camera.capture('/home/pi/Pictures/person_%d%02d%02d-%02d%02d%02d.jpg' % 
                                    (last_time.year, last_time.month, last_time.day, last_time.hour, last_time.minute, last_time.second))

        camera.stop_preview()
コード例 #3
0
def main():

    print('Human detection')

    # Turn on the LED so we know the box is ready
    leds.pattern = Pattern.breathe(1000)
    leds.update(Leds.rgb_pattern(RED))

    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    with PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        camera.sensor_mode = 4
        # Set camera to match
        camera.resolution = (1640, 1232)
        camera.framerate = 30

        with CameraInference(object_detection.model()) as inference:
            for i, result in enumerate(inference.run()):
                for i, obj in enumerate(
                        object_detection.get_objects(result, 0.3)):
                    if obj.score > 0.7 and obj.kind == 1:  # Person
                        print('Human detected #%d: %s' % (i, str(obj)))
                        x, y, width, height = obj.bounding_box
                        squirt((x + (width / 2) - (1640 / 2)) / 1640)
コード例 #4
0
 def testDog(self):
     with TestImage('dog.jpg') as image:
         image_center, offset = _crop_center(image)
         with ImageInference(object_detection.model()) as inference:
             objects = object_detection.get_objects(
                 inference.run(image_center), 0.3, offset)
             self.assertEqual(1, len(objects))
             self.assertEqual(object_detection.Object.DOG, objects[0].kind)
             self.assertAlmostEqual(0.914, objects[0].score, delta=0.001)
             self.assertEqual((52, 116, 570, 485), objects[0].bounding_box)
コード例 #5
0
 def test_detection(self):
     with TestImage(self.image_file) as image:
         image_center, offset = crop_center(image)
         with ImageInference(od.model()) as inference:
             if self.sparse:
                 sparse_configs = od.sparse_configs(threshold=self.THRESHOLD)
                 result = inference.run(image_center, sparse_configs=sparse_configs)
                 objects = od.get_objects_sparse(result, offset)
             else:
                 result = inference.run(image_center)
                 objects = od.get_objects(result, self.THRESHOLD, offset)
             self.check(objects)
コード例 #6
0
 def testCat(self):
     with TestImage('cat.jpg') as image:
         image_center, offset = _crop_center(image)
         with ImageInference(object_detection.model()) as inference:
             objects = object_detection.get_objects(
                 inference.run(image_center), 0.3, offset)
             print(objects[0])
             self.assertEqual(1, len(objects))
             self.assertEqual(object_detection.Object.CAT, objects[0].kind)
             self.assertAlmostEqual(0.672, objects[0].score, delta=0.001)
             self.assertEqual((575, 586, 2187, 1758),
                              objects[0].bounding_box)
コード例 #7
0
    def write(self, b):
        '''
        Here is where the image data is received and made available at self.output
        '''

        try:
            # b is the numpy array of the image, 3 bytes of color depth
            self.output = np.reshape(
                np.frombuffer(b, dtype=np.uint8),
                (self.args.input_height, self.args.input_width, 3))

            image_center, offset = crop_center(Image.fromarray(self.output))

            if self.args.sparse:
                result = self.inference.run(
                    image_center,
                    sparse_configs=object_detection.sparse_configs(
                        self.args.threshold))
                objects = object_detection.get_objects_sparse(result, offset)
            else:
                result = self.inference.run(image_center)
                objects = object_detection.get_objects(result,
                                                       self.args.threshold,
                                                       offset)

            if self.DEBUG:
                for i, obj in enumerate(objects):
                    x, y, width, height = obj.bounding_box

                    x = x + int(width / 2)
                    y = y + int(height / 2)
                    print(
                        'ImgCap - Object #{}: kind={} score={:.3f} pos(x,y)=({},{})'
                        .format(i, obj.kind, obj.score, x, y))
            if len(objects):
                with self.lock:
                    self.new_obj = True  # indicates a new object is available
                    self.last_objects = deepcopy(objects)
                    self.last_image = np.copy(self.output)

            if self.DEBUG:
                print("ImgCap - Image.shape {}".format(self.output.shape))
                print("ImgCap - Running at {:2.2f} Hz".format(
                    1 / (time.time() - self.prev_time)))

            self.prev_time = time.time()

        except Exception as e:
            print("ImgCap error: {}".format(e))

        finally:
            return len(b)
コード例 #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', '-i', dest='input', required=True)
    parser.add_argument('--output', '-o', dest='output')
    args = parser.parse_args()

    with ImageInference(object_detection.model()) as inference:
        image = Image.open(args.input)
    image_center, offset = _crop_center(image)
    draw = ImageDraw.Draw(image)
    result = inference.run(image_center)
    for i, obj in enumerate(object_detection.get_objects(result, 0.3, offset)):
        print('Object #%d: %s' % (i, str(obj)))
    x, y, width, height = obj.bounding_box
    draw.rectangle((x, y, x + width, y + height), outline='red')
    if args.output: image.save(args.output)
コード例 #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=30) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        with CameraInference(object_detection.model()) as inference:
            for result in inference.run(args.num_frames):
                objects = object_detection.get_objects(result)
                annotator.clear()
                for obj in objects:
                    rect = transform(obj.bounding_box)
                    annotator.bounding_box(rect, fill=0)
                    loc = (rect[0] + 4, rect[1])
                    annotator.text(loc, objectLabel(obj.kind))
                annotator.update()
                #print('#%05d (%5.2f fps): num_objects=%d, objects=%s' %
                #       (inference.count, inference.rate, len(objects), objects))
                if len(objects) > 0:
                    print(
                        f"num_objects={len(objects)}, objects={[objectLabel(obj.kind) for obj in objects]}"
                    )

        camera.stop_preview()
コード例 #10
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=10) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        def textXYTransform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y)

        with CameraInference(object_detection.model()) as inference:
            for result in inference.run():
                objs = object_detection.get_objects(result, 0.3);
                annotator.clear()
                for obj in objs:
                    # blue for person, green for cat, purple for dog, red for anything else
                    outlineColor = "blue" if obj.kind == 1 else "green" if obj.kind == 2 else "purple" if obj.kind == 3 else "red"
                    print(obj.kind)
                    annotator.bounding_box(transform(obj.bounding_box), fill=0 , outline=outlineColor)
                    annotator.text(textXYTransform(obj.bounding_box), "person" if obj.kind == 1 else "cat" if obj.kind == 2 else "dog" if obj.kind == 3 else "other", color=outlineColor)

                annotator.update()

        camera.stop_preview()
コード例 #11
0
def main():

    # run forever
    while (True):

        # Google object detection code adapted to use image from camera instead of
        # one entered at command line
        with ImageInference(object_detection.model()) as inference:

            # take a photo to check overwrite last image
            with picamera.PiCamera() as camera:
                h, w = camera.resolution
                camera.capture('test.jpg')

            # google code, open image check for objects
            image = Image.open('test.jpg')

            image_center, offset = _crop_center(image)
            draw = ImageDraw.Draw(image)
            result = inference.run(image_center)

            # draw boxes around any cat, dogs or humans
            count_objects = 0
            for i, obj in enumerate(
                    object_detection.get_objects(result, 0.3, offset)):

                print('Object #%d: %s' % (i, str(obj)))
                x, y, width, height = obj.bounding_box
                draw.rectangle((x, y, x + width, y + height), outline='red')

                # if a cat, dog or human in image save it with a timestamp
                count_objects += 1

            # save images with timestmp if cat,dog,human in image
            if count_objects > 0:

                time_string = time.strftime("%m%d-%H%M%S")
                filename = '/home/pi/security_camera/images/' + time_string + '.jpg'
                #print(filename)
                image.save(filename)

            # take a 15 second break between photos
            time.sleep(15)
コード例 #12
0
ファイル: awscat.py プロジェクト: guoshanglin/Smart-Pet-Care
def recognize(inputfile, outputfile, outputfile_detected):
    threshold = 0.3
    if inputfile == None:
        # camera capture
        with PiCamera() as camera:
            camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
            camera.start_preview()

            while True:
                camera.capture(outputfile)
                image = Image.open(outputfile)
                image_center, offset = crop_center(image)
                draw = ImageDraw.Draw(image)

                is_pet = False
                with ImageInference(object_detection.model()) as inference:
                    result = inference.run(image_center)
                    objects = object_detection.get_objects(result, threshold, offset)
                    for i, obj in enumerate(objects):
                        print('Object #%d kind%d: %s' % (i, obj.kind, obj))
                        if obj.kind>1:
                            is_pet = True
                        x0, y0, width, height = obj.bounding_box
                        x1 = x0+width
                        y1 = y0+height
                        d = 5
                        draw.rectangle((x0, y0, x0+d, y1), fill='red', outline='red')
                        draw.rectangle((x0, y0, x1, y0+d), fill='red', outline='red')
                        draw.rectangle((x0, y1-d, x1, y1), fill='red', outline='red')
                        draw.rectangle((x1-d, y0, x1, y1), fill='red', outline='red')
                image.save(outputfile)
                time.sleep(1)
                # if pet deteced, update pet image in AWS S3
                if is_pet:
                    s3.meta.client.upload_file(outputfile, 'iot6765project', 
                        'webapp/'+outputfile_detected, ExtraArgs={'ACL':'public-read'})
                else:
                    print('No Pet Detected')
                # update result image in AWS S3
                s3.meta.client.upload_file(outputfile, 'iot6765project', 
                    'webapp/'+outputfile, ExtraArgs={'ACL':'public-read'})
            camera.stop_preview()
コード例 #13
0
def run_inference(run_event,
                  model="face",
                  framerate=15,
                  cammode=5,
                  hres=1640,
                  vres=922,
                  stats=True):
    # See the Raspicam documentation for mode and framerate limits:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # Default to the highest resolution possible at 16:9 aspect ratio

    global socket_connected, time_log

    leds = Leds()

    with PiCamera() as camera, PrivacyLed(leds):
        camera.sensor_mode = cammode
        camera.resolution = (hres, vres)
        camera.framerate = framerate
        camera.video_stabilization = True
        camera.start_preview()  # fullscreen=True)

        def model_selector(argument):
            options = {
                "object": object_detection.model(),
                "face": face_detection.model(),
                "class": image_classification.model()
            }
            return options.get(argument, "nothing")

        tf_model = model_selector(model)

        # this is not needed because the function defaults to "face"
        if tf_model == "nothing":
            print("No tensorflow model or invalid model specified - exiting..")
            camera.stop_preview()
            os._exit(0)
            return

        with CameraInference(tf_model) as inference:
            print("%s model loaded" % model)

            last_time = time()  # measure inference time

            for result in inference.run():

                # exit on shutdown
                if not run_event.is_set():
                    camera.stop_preview()
                    return

                output = ApiObject()

                # handler for the AIY Vision object detection model
                if model == "object":
                    output.threshold = 0.3
                    objects = object_detection.get_objects(
                        result, output.threshold)

                    for obj in objects:
                        # print(object)
                        item = {
                            'name': 'object',
                            'class_name': obj._LABELS[obj.kind],
                            'score': obj.score,
                            'x': obj.bounding_box[0] / capture_width,
                            'y': obj.bounding_box[1] / capture_height,
                            'width': obj.bounding_box[2] / capture_width,
                            'height': obj.bounding_box[3] / capture_height
                        }

                        output.numObjects += 1
                        output.objects.append(item)

                # handler for the AIY Vision face detection model
                elif model == "face":
                    faces = face_detection.get_faces(result)

                    for face in faces:
                        # print(face)
                        item = {
                            'name': 'face',
                            'score': face.face_score,
                            'joy': face.joy_score,
                            'x': face.bounding_box[0] / capture_width,
                            'y': face.bounding_box[1] / capture_height,
                            'width': face.bounding_box[2] / capture_width,
                            'height': face.bounding_box[3] / capture_height,
                        }

                        output.numObjects += 1
                        output.objects.append(item)

                elif model == "class":
                    output.threshold = 0.3
                    classes = image_classification.get_classes(result)

                    s = ""

                    for (obj, prob) in classes:
                        if prob > output.threshold:
                            s += '%s=%1.2f\t|\t' % (obj, prob)

                            item = {
                                'name': 'class',
                                'class_name': obj,
                                'score': prob
                            }

                            output.numObjects += 1
                            output.objects.append(item)

                    # print('%s\r' % s)

                now = time()
                output.timeStamp = now
                output.inferenceTime = (now - last_time)
                last_time = now

                # No need to do anything else if there are no objects
                if output.numObjects > 0:
                    output_json = output.to_json()
                    print(output_json)

                    # Send the json object if there is a socket connection
                    if socket_connected is True:
                        q.put(output_json)

                # Additional data to measure inference time
                if stats is True:
                    time_log.append(output.inferenceTime)
                    time_log = time_log[-10:]  # just keep the last 10 times
                    print("Avg inference time: %s" %
                          (sum(time_log) / len(time_log)))
コード例 #14
0
def main():
    parser = argparse.ArgumentParser(
        'Image classification camera inference example.')
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--num_objects',
                        '-c',
                        type=int,
                        default=3,
                        help='Sets the number of object interences to print.')
    parser.add_argument('--nopreview',
                        dest='preview',
                        action='store_false',
                        default=True,
                        help='Enable camera preview')
    args = parser.parse_args()

    #    servo.standup()

    with PiCamera(sensor_mode=4, framerate=30, resolution=(1640, 1232)) as camera, \
         CameraPreview(camera, enabled=args.preview), \
         CameraInference(object_detection.model()) as inference:
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        for result in inference.run(args.num_frames):
            objs = object_detection.get_objects(result,
                                                threshold=0.3,
                                                offset=(0, 0))
            print(objs)
            #            annotator.clear()
            #            for obj in objs:
            #                annotator.bounding_box(transform(obj.bounding_box), fill=0)
            #            annotator.update()
            if objs:
                obj = objs[0]
                x, y, width, height = obj.bounding_box

                print(obj.bounding_box[0])
                x0, y0, width, height = obj.bounding_box
                x = float((x0 + width / 2) - 1640 / 2)  #x range is -820 to 820
                y = float(1232 / 2 -
                          (y0 + height / 2))  #y range is -616 to 616
                print(obj.kind)
                print(obj.score)

                if obj.kind == 1 and obj.score > 0.5:
                    LED.color(0, 0, 255)
                    if -400 < x < 400:
                        t = Thread(target=servo.triwalk, args=(x / 41, ))
                    else:
                        t = Thread(target=servo.rotate, args=(x / 18.222, ))
                        print('rotate!')
                    t.start()
                    time.sleep(.3)

                    #                    servo.triwalk(x/41) #820/20
                    #                    print('xval: %f', x/41)
                    #                    servo.rotate(x/18.222) #820/45
                    #                    print('xval: %f', x/18.222)
                    if -50 < x < 50:
                        LED.color(0, 255, 0)  #Green = I'm aiming at you
                else:
                    LED.color(255, 0, 0)  #Red = where are you?
コード例 #15
0
def main():
    """Face detection camera inference example."""
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--sparse',
                        '-s',
                        action='store_true',
                        default=False,
                        help='Use sparse tensors.')
    parser.add_argument('--threshold',
                        '-t',
                        type=float,
                        default=0.3,
                        help='Detection probability threshold.')
    parser.add_argument('--cam_width',
                        type=int,
                        default=1640,
                        help='Camera Width')
    parser.add_argument('--cam_height',
                        type=int,
                        default=1232,
                        help='Camera Height')
    parser.add_argument('--fps',
                        type=int,
                        default=30,
                        help='Camera Frames Per Second')
    args = parser.parse_args()

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4,
                  resolution=(args.cam_width, args.cam_height),
                  framerate=args.fps) as camera:
        camera.start_preview()

        width = args.cam_width
        height = args.cam_height

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / width
        scale_y = 240 / height

        size = min(width, height)
        offset = (((width - size) / 2), ((height - size) / 2))

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        while True:
            with CameraInference(face_detection.model()) as inference, \
                 CameraInference(object_detection.model()) as inference2:

                for result in inference.run(args.num_frames):
                    faces = face_detection.get_faces(result)
                    annotator.clear()
                    for face in faces:
                        annotator.bounding_box(transform(face.bounding_box),
                                               fill=0)
                    #annotator.update()

                    print(
                        '#%05d (%5.2f fps): num_faces=%d, avg_joy_score=%.2f' %
                        (inference.count, inference.rate, len(faces),
                         avg_joy_score(faces)))

                for result in inference2.run(args.num_frames):
                    objects = object_detection.get_objects(
                        result, args.threshold, offset)

                    #annotator.clear()
                    for i, obj in enumerate(objects):
                        annotator.bounding_box(transform(obj.bounding_box),
                                               fill=0)
                        print('Object #%d: %s' % (i, obj))

                    annotator.update()

        camera.stop_preview()
コード例 #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-f',
        type=int,
        dest='num_frames',
        default=-1,
        help='Sets the number of frames to run for, otherwise runs forever.')

    parser.add_argument(
        '--num_pics',
        '-p',
        type=int,
        dest='num_pics',
        default=-1,
        help='Sets the max number of pictures to take, otherwise runs forever.'
    )

    args = parser.parse_args()

    with PiCamera() as camera, PrivacyLed(Leds()):
        # See the Raspicam documentation for mode and framerate limits:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # Set to the highest resolution possible at 16:9 aspect ratio
        camera.sensor_mode = 5
        camera.resolution = (1640, 922)
        camera.start_preview(fullscreen=True)

        with CameraInference(object_detection.model()) as inference:
            print("Camera inference started")
            player.play(*MODEL_LOAD_SOUND)

            last_time = time()
            pics = 0
            save_pic = False

            for f, result in enumerate(inference.run()):

                for i, obj in enumerate(
                        object_detection.get_objects(result, score_threshold)):

                    print('%s Object #%d: %s' %
                          (strftime("%Y-%m-%d-%H:%M:%S"), i, str(obj)))
                    x, y, width, height = obj.bounding_box
                    # if obj.label == 'CAT':
                    if obj.label == '2m':
                        save_pic = True
                        player.play(*BEEP_SOUND)

                # save the image if there was 1 or more cats detected
                if save_pic:
                    # save the clean image
                    camera.capture("images/image_%s.jpg" %
                                   strftime("%Y%m%d-%H%M%S"))
                    pics += 1
                    save_pic = False

                if f == args.num_frames or pics == args.num_pics:
                    break

                now = time()
                duration = (now - last_time)

                # The Movidius chip runs at 35 ms per image.
                # Then there is some additional overhead for the object detector to
                # interpret the result and to save the image. If total process time is
                # running slower than 50 ms it could be a sign the CPU is geting overrun
                if duration > 0.50:
                    print(
                        "Total process time: %s seconds. Bonnet inference time: %s ms "
                        % (duration, result.duration_ms))

                last_time = now

        camera.stop_preview()
コード例 #17
0
def main():
    """object detection camera inference example."""
    parser = argparse.ArgumentParser()
    countdown = 20
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    args = parser.parse_args()

    servoX = AngularServo(PIN_B)
    servoY = AngularServo(PIN_A)
    relay = DigitalOutputDevice(PIN_C, active_high=True, initial_value=True)
    #relay.blink(n=1)
    relay.blink(on_time=0.05, n=1)

    # Forced sensor mode, 1640x1232, full FoV. See:
    # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
    # This is the resolution inference run on.
    with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                  framerate=10) as camera:
        camera.start_preview()

        # Annotator renders in software so use a smaller size and scale results
        # for increased performace.
        annotator = Annotator(camera, dimensions=(320, 240))
        scale_x = 320 / 1640
        scale_y = 240 / 1232

        # Incoming boxes are of the form (x, y, width, height). Scale and
        # transform to the form (x1, y1, x2, y2).
        def transform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y, scale_x * (x + width),
                    scale_y * (y + height))

        def textXYTransform(bounding_box):
            x, y, width, height = bounding_box
            return (scale_x * x, scale_y * y)

        with CameraInference(object_detection.model()) as inference:
            for result in inference.run():
                objs = object_detection.get_objects(result, 0.3)
                annotator.clear()
                for obj in objs:
                    # blue for person, green for cat, purple for dog, red for anything else
                    outlineColor = "blue" if obj.kind == 1 else "green" if obj.kind == 2 else "purple" if obj.kind == 3 else "red"
                    print(obj.kind)
                    tBoundingBox = transform(obj.bounding_box)
                    annotator.bounding_box(tBoundingBox,
                                           fill=0,
                                           outline=outlineColor)
                    annotator.text(
                        textXYTransform(obj.bounding_box),
                        "person" if obj.kind == 1 else "cat" if obj.kind == 2
                        else "dog" if obj.kind == 3 else "other",
                        color=outlineColor)

                    if len(objs) == 1:
                        x1, y1, x2, y2 = transform(obj.bounding_box)
                        midX = ((x2 - x1) / 2) + x1
                        midY = ((y2 - y1) / 2) + y1
                        servoPosX = remap(midX, 0, 320, 75, -75)
                        servoPosY = remap(midY, 0, 240, -90,
                                          80)  # 90 is low, -90 is high
                        servoPosX = min(90, servoPosX)
                        servoPosX = max(-90, servoPosX)
                        servoPosY = min(90, servoPosY)
                        servoPosY = max(-90, servoPosY)
                        print("x", midX, servoPosX)
                        print("y", midY, servoPosY)
                        servoX.angle = servoPosX
                        servoY.angle = servoPosY

                        countdown -= 1

                        if countdown == -1:
                            # squirt
                            annotator.text((midX, midY),
                                           "Squirt!!",
                                           color=outlineColor)
                            relay.blink(on_time=0.5, n=1)
                            countdown = 20
                        else:
                            annotator.text((midX, midY),
                                           str(countdown),
                                           color=outlineColor)
                if len(objs) == 0:
                    countdown = 20
                annotator.update()

        camera.stop_preview()
コード例 #18
0
def process_inference(model, result, params):

    output = ApiObject()

    # handler for the AIY Vision object detection model
    if model == "object":
        output.threshold = 0.3
        objects = object_detection.get_objects(result, output.threshold)

        for obj in objects:
            # print(object)
            item = {
                'name': 'object',
                'class_name': obj._LABELS[obj.kind],
                'score': obj.score,
                'x': obj.bounding_box[0] / params['width'],
                'y': obj.bounding_box[1] / params['height'],
                'width': obj.bounding_box[2] / params['width'],
                'height': obj.bounding_box[3] / params['height']
            }

            output.numObjects += 1
            output.objects.append(item)

    # handler for the AIY Vision face detection model
    elif model == "face":
        faces = face_detection.get_faces(result)

        for face in faces:
            # print(face)
            item = {
                'name': 'face',
                'score': face.face_score,
                'joy': face.joy_score,
                'x': face.bounding_box[0] / params['width'],
                'y': face.bounding_box[1] / params['height'],
                'width': face.bounding_box[2] / params['width'],
                'height': face.bounding_box[3] / params['height']
            }

            output.numObjects += 1
            output.objects.append(item)

    elif model == "class":
        output.threshold = 0.3
        classes = image_classification.get_classes(result)

        s = ""

        for (obj, prob) in classes:
            if prob > output.threshold:
                s += '%s=%1.2f\t|\t' % (obj, prob)

                item = {'name': 'class', 'class_name': obj, 'score': prob}

                output.numObjects += 1
                output.objects.append(item)

        # print('%s\r' % s)

    return output