Beispiel #1
0
def svg_overlay(faces, frame_size, joy_score):
    width, height = frame_size
    doc = svg.Svg(width=width, height=height)

    for face in faces:
        x, y, w, h = face.bounding_box
        doc.add(
            svg.Rect(x=int(x),
                     y=int(y),
                     width=int(w),
                     height=int(h),
                     rx=10,
                     ry=10,
                     fill_opacity=0.3 * face.face_score,
                     style='fill:red;stroke:white;stroke-width:4px'))

        doc.add(
            svg.Text('Joy: %.2f' % face.joy_score,
                     x=x,
                     y=y - 10,
                     fill='red',
                     font_size=30))

    doc.add(
        svg.Text('Faces: %d Avg. joy: %.2f' % (len(faces), joy_score),
                 x=10,
                 y=50,
                 fill='red',
                 font_size=40))
    return str(doc)
def plot_svg_chart(svg_doc, location, data):
    x, y, x2, y2 = LOCATIONS[location]
    plotted_data = sorted(list(data[location].keys()))[-20:]

    for pos, time in enumerate(plotted_data):
        start_x = (x + 8 * pos) * SVG_SCALE_FACTOR
        width = 4 * SVG_SCALE_FACTOR
        start_y = (y - 20 * data[location][time])
        height = (y - start_y) * SVG_SCALE_FACTOR
        start_y = start_y * SVG_SCALE_FACTOR

        color = 'red' if data[location][time] == 2 else 'green'

        svg_doc.add(
            svg.Rect(x=int(start_x),
                     y=int(start_y),
                     width=int(width),
                     height=int(height),
                     fill_opacity=0.3,
                     style='fill:' + color + ';stroke:' + color +
                     ';stroke-width:4px'))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dog_park_model_path',
                        help='Path to the model file for the dog park.')
    parser.add_argument('--vb1_model_path',
                        help='Path to the model file for volley ball court 1.')
    parser.add_argument('--vb2_model_path',
                        help='Path to the model file for volley ball court 1.')
    parser.add_argument(
        '--label_path',
        required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument('--input_mean',
                        type=float,
                        default=128.0,
                        help='Input mean.')
    parser.add_argument('--input_std',
                        type=float,
                        default=128.0,
                        help='Input std.')
    parser.add_argument('--input_depth',
                        type=int,
                        default=3,
                        help='Input depth.')
    parser.add_argument('--enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server')
    parser.add_argument('--streaming_bitrate',
                        type=int,
                        default=1000000,
                        help='Streaming server video bitrate (kbps)')
    parser.add_argument('--mdns_name',
                        default='',
                        help='Streaming server mDNS name')
    parser.add_argument(
        '--preview',
        action='store_true',
        default=False,
        help=
        'Enables camera preview in addition to printing result to terminal.')
    parser.add_argument(
        '--time_interval',
        type=int,
        default=10,
        help='Time interval at which to store data in seconds.')
    parser.add_argument(
        '--gather_data',
        action='store_true',
        default=False,
        help='Also save images according to the assigned category.')
    parser.add_argument(
        '--timelapse',
        action='store_true',
        default=False,
        help='Also save some timelapses of the entire scene, every 120 seconds.'
    )
    parser.add_argument('--image_folder',
                        default='/home/pi/Pictures/Data',
                        help='Folder to save captured images')
    args = parser.parse_args()

    labels = read_labels(args.label_path)

    # At least one model needs to be passed in.
    assert args.dog_park_model_path or args.vb1_model_path or args.vb2_model_path

    # Check that the folder exists
    if args.gather_data:
        expected_subfolders = ['dog_park', 'court_one', 'court_two']
        subfolders = os.listdir(args.image_folder)
        for folder in expected_subfolders:
            assert folder in subfolders

    with ExitStack() as stack:

        dog_park = {
            'location_name': 'dog_park',
            'path': args.dog_park_model_path,
        } if args.dog_park_model_path else None
        vb1 = {
            'location_name': 'court_one',
            'path': args.vb1_model_path,
        } if args.vb1_model_path else None
        vb2 = {
            'location_name': 'court_two',
            'path': args.vb2_model_path,
        } if args.vb2_model_path else None

        # Get the list of models, filter to only the ones that were passed in.
        models = [dog_park, vb1, vb2]
        models = list(filter(lambda model: model, models))

        # Initialize models and add them to the context
        for model in models:
            print('Initializing {model_name}...'.format(
                model_name=model["location_name"]))
            descriptor = inference.ModelDescriptor(
                name='mobilenet_based_classifier',
                input_shape=(1, 160, 160, args.input_depth),
                input_normalizer=(args.input_mean, args.input_std),
                compute_graph=utils.load_compute_graph(model['path']))

            model['descriptor'] = descriptor

        if dog_park:
            dog_park['image_inference'] = stack.enter_context(
                inference.ImageInference(dog_park['descriptor']))
        if vb1:
            vb1['image_inference'] = stack.enter_context(
                inference.ImageInference(vb1['descriptor']))
        if vb2:
            vb2['image_inference'] = stack.enter_context(
                inference.ImageInference(vb2['descriptor']))

        camera = stack.enter_context(
            PiCamera(sensor_mode=4, resolution=(820, 616), framerate=30))

        server = None
        if args.enable_streaming:
            server = stack.enter_context(
                StreamingServer(camera,
                                bitrate=args.streaming_bitrate,
                                mdns_name=args.mdns_name))

        if args.preview:
            # Draw bounding boxes around locations
            # Load the arbitrarily sized image
            img = Image.new('RGB', (820, 616))
            draw = ImageDraw.Draw(img)

            for location in LOCATIONS.values():
                x1, y1, x2, y2 = location
                draw_rectangle(draw, x1, y1, x2, y2, 3, outline='white')

            # Create an image padded to the required size with
            # mode 'RGB'
            pad = Image.new('RGB', (
                ((img.size[0] + 31) // 32) * 32,
                ((img.size[1] + 15) // 16) * 16,
            ))
            # Paste the original image into the padded one
            pad.paste(img, (0, 0))

            # Add the overlay with the padded image as the source,
            # but the original image's dimensions
            camera.add_overlay(pad.tobytes(), alpha=64, layer=3, size=img.size)

            camera.start_preview()

        data_filename = _make_filename(args.image_folder, 'data', None, 'json')
        data_generator = commit_data_to_long_term(args.time_interval,
                                                  data_filename)
        data_generator.send(None)

        # Capture one picture of entire scene each time it's started again.
        time.sleep(2)
        date = time.strftime('%Y-%m-%d')
        scene_filename = _make_filename(args.image_folder, date, None)
        camera.capture(scene_filename)

        # Draw bounding box on image showing the crop locations
        with Image.open(scene_filename) as scene:
            draw = ImageDraw.Draw(scene)

            for location in LOCATIONS.values():
                x1, y1, x2, y2 = location
                draw_rectangle(draw, x1, y1, x2, y2, 3, outline='white')

            scene.save(scene_filename)

        # Constantly get cropped images
        for cropped_images in get_cropped_images(camera, args.timelapse):

            svg_doc = None
            if args.enable_streaming:
                width = 820 * SVG_SCALE_FACTOR
                height = 616 * SVG_SCALE_FACTOR
                svg_doc = svg.Svg(width=width, height=height)

                for location in LOCATIONS.values():
                    x, y, x2, y2 = location
                    w = (x2 - x) * SVG_SCALE_FACTOR
                    h = (y2 - y) * SVG_SCALE_FACTOR
                    x = x * SVG_SCALE_FACTOR
                    y = y * SVG_SCALE_FACTOR
                    svg_doc.add(
                        svg.Rect(
                            x=int(x),
                            y=int(y),
                            width=int(w),
                            height=int(h),
                            rx=10,
                            ry=10,
                            fill_opacity=0.3,
                            style='fill:none;stroke:white;stroke-width:4px'))

            # For each inference model, crop and process a different thing.
            for model in models:
                location_name = model['location_name']
                image_inference = model['image_inference']

                cropped_image = cropped_images[location_name]

                # TODO: (Image Comparison) If False,return no activity.
                if cropped_image:
                    # then run image_inference on them.
                    result = image_inference.run(cropped_image)
                    processed_result = process(result, labels, 'final_result')
                    data_generator.send(
                        (location_name, processed_result, svg_doc))
                    message = get_message(processed_result)

                    # Print the message
                    # print('\n')
                    # print('{location_name}:'.format(location_name=location_name))
                    # print(message)
                else:
                    # Fake processed_result
                    processed_result = [('inactive', 1.00), ('active', 0.00)]
                    data_generator.send(
                        (location_name, processed_result, svg_doc))

                label = processed_result[0][0]
                timestamp = time.strftime('%Y-%m-%d_%H.%M.%S')
                # print(timestamp)
                # print('\n')

                if args.gather_data and cropped_image:
                    # Gather 1% data on 'no activity' since it's biased against that.
                    # Gather 0.1% of all images.
                    if (
                            # (label == 'no activity' and random.random() > 0.99) or
                            # (random.random() > 0.999)
                            # (location_name != 'dog_park' and random.random() > 0.99) or
                        (random.random() > 0.9)):
                        subdir = '{location_name}/{label}'.format(
                            location_name=location_name, label=label)
                        filename = _make_filename(args.image_folder, timestamp,
                                                  subdir)
                        cropped_image.save(filename)

                # if svg_doc:
                #     ## Plot points out
                #     ## 160 x 80 grid
                #     ## 16px width
                #     ## 20, 40, 60 for 0, 1, 2
                #     lines = message.split('\n')
                #     y_correction = len(lines) * 20
                #     for line in lines:
                #         svg_doc.add(svg.Text(line,
                #         x=(LOCATIONS[location_name][0]) * SVG_SCALE_FACTOR,
                #         y=(LOCATIONS[location_name][1] - y_correction) * SVG_SCALE_FACTOR,
                #         fill='white', font_size=20))

                #         y_correction = y_correction - 20

                # TODO: Figure out how to annotate at specific locations.
                # if args.preview:
                #     camera.annotate_foreground = Color('black')
                #     camera.annotate_background = Color('white')
                #     # PiCamera text annotation only supports ascii.
                #     camera.annotate_text = '\n %s' % message.encode(
                #         'ascii', 'backslashreplace').decode('ascii')

            if server:
                server.send_overlay(str(svg_doc))

        if args.preview:
            camera.stop_preview()