コード例 #1
0
ファイル: app.py プロジェクト: 26medias/rpi_vision
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Model:\n{}\n".format(semantic_segmentation.model_id))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            image = cv2.imread(image_path)

            results = semantic_segmentation.segment_image(image)

            # Generate text to display on streamer
            text = ["Model: {}".format(semantic_segmentation.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))
            text.append("Legend:")
            text.append(semantic_segmentation.build_legend())

            mask = semantic_segmentation.build_image_mask(results.class_map)
            blended = edgeiq.blend_images(image, mask, alpha=0.5)

            streamer.send_data(blended, text)
            streamer.wait()

        print("Program Ending")
コード例 #2
0
    def semantic_base(self, model, image_array):
        semantic_segmentation = edgeiq.SemanticSegmentation(model)
        semantic_segmentation.load(engine=edgeiq.Engine.DNN)

        # Build legend into image, save it to a file and crop the whitespace
        legend_html = semantic_segmentation.build_legend()

        config = imgkit.config(
            wkhtmltoimage="wkhtmltopdf/bin/wkhtmltoimage.exe")
        options = {"quiet": ""}
        imgkit.from_string(legend_html,
                           "data/legend.png",
                           config=self.config,
                           options=options)

        legend_image = Image.open("data/legend.png")
        width, height = legend_image.size
        legend_image.crop((0, 0, 0.61 * width, height)).save("data/legend.png")

        # Apply the semantic segmentation mask onto the given image
        results = semantic_segmentation.segment_image(image_array)
        mask = semantic_segmentation.build_image_mask(results.class_map)
        image = edgeiq.blend_images(image_array, mask, 0.5)

        return image, results
コード例 #3
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation(
        "alwaysai/fcn_resnet18_pascal_voc_512x320")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA,
                               accelerator=edgeiq.Accelerator.NVIDIA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    class_list = ['bottle']

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=320)
                results = semantic_segmentation.segment_image(frame)

                object_map = semantic_segmentation.build_object_map(
                    results.class_map, class_list)

                object_mask = semantic_segmentation.build_image_mask(
                    object_map)

                # object_mask[np.where((object_mask==[0,0,0]).all(axis=2))] = [255,255,255]

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                blended = edgeiq.blend_images(frame, object_mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
コード例 #4
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.FileVideoStream('toronto.mp4', play_realtime=True) as video_stream, \
                edgeiq.Streamer() as streamer:  # play_realtime simulates video feed from a camera
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = semantic_segmentation.segment_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                mask = semantic_segmentation.build_image_mask(
                    results.class_map)
                blended = edgeiq.blend_images(frame, mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
コード例 #5
0
ファイル: app.py プロジェクト: johancc/AromaV2
def runSegmentationModel(segmentator, frame):
    results = segmentator.segment_image(frame)

    color_mask = segmentator.build_image_mask(results.class_map)
    blended_image = edgeiq.blend_images(frame, color_mask, alpha=0.5)
    return blended_image, ""