Пример #1
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Model:\n{}\n".format(semantic_segmentation.model_id))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            image = cv2.imread(image_path)

            results = semantic_segmentation.segment_image(image)

            # Generate text to display on streamer
            text = ["Model: {}".format(semantic_segmentation.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))
            text.append("Legend:")
            text.append(semantic_segmentation.build_legend())

            mask = semantic_segmentation.build_image_mask(results.class_map)
            blended = edgeiq.blend_images(image, mask, alpha=0.5)

            streamer.send_data(blended, text)
            streamer.wait()

        print("Program Ending")
Пример #2
0
    def semantic_base(self, model, image_array):
        semantic_segmentation = edgeiq.SemanticSegmentation(model)
        semantic_segmentation.load(engine=edgeiq.Engine.DNN)

        # Build legend into image, save it to a file and crop the whitespace
        legend_html = semantic_segmentation.build_legend()

        config = imgkit.config(
            wkhtmltoimage="wkhtmltopdf/bin/wkhtmltoimage.exe")
        options = {"quiet": ""}
        imgkit.from_string(legend_html,
                           "data/legend.png",
                           config=self.config,
                           options=options)

        legend_image = Image.open("data/legend.png")
        width, height = legend_image.size
        legend_image.crop((0, 0, 0.61 * width, height)).save("data/legend.png")

        # Apply the semantic segmentation mask onto the given image
        results = semantic_segmentation.segment_image(image_array)
        mask = semantic_segmentation.build_image_mask(results.class_map)
        image = edgeiq.blend_images(image_array, mask, 0.5)

        return image, results
Пример #3
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation(
        "alwaysai/fcn_resnet18_pascal_voc_512x320")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA,
                               accelerator=edgeiq.Accelerator.NVIDIA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    class_list = ['bottle']

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=320)
                results = semantic_segmentation.segment_image(frame)

                object_map = semantic_segmentation.build_object_map(
                    results.class_map, class_list)

                object_mask = semantic_segmentation.build_image_mask(
                    object_map)

                # object_mask[np.where((object_mask==[0,0,0]).all(axis=2))] = [255,255,255]

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                blended = edgeiq.blend_images(frame, object_mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #4
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation(
            "alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN)
    print('Outside of try')
    try:
        with edgeiq.FileVideoStream("driving_downtown.mp4", play_realtime=False) as video_stream, \
            edgeiq.VideoWriter(output_path="output.avi") as video_writer:
            print('Inside of try')

            if video_stream is None:
                print('no video stream!')
            else:
                print('video stream available of type {}'.format(type(video_stream)))

            if video_writer is None:
                print('no video writer!')
            else:
                print('video writer available of type {}'.format(type(video_writer)))

            more = getattr(video_stream, "more", None)
            if callable(more):
                print('video_stream has an attribute called more')
            else:
                print('video_stream has no MORE function!?')
            if video_stream.more():
                print('At least one video frame available before we bgin')

            while video_stream.more():
                print('Inside of while')
                # image = video_stream.read()
                # # video_writer.write_frame(image)
                # if image is None:
                #     print('no image')
                # else:
                #     print('image available of type {}'.format(type(image)))
                # results = semantic_segmentation.segment_image(image)
                # if results is None:
                #     print('no results')
                # else:
                #     print('results available of type {}'.format(type(results)))
                # mask = semantic_segmentation.build_image_mask(results.class_map)
                # if mask is None:
                #     print('no mask')
                # else:
                #     print('mask available of type {}'.format(type(mask)))
                # blended = edgeiq.blend_images(image, mask, alpha=0.5)
                # if blended is None:
                #     print('no blended')
                # else:
                #     print('blended available of type {}'.format(type(blended)))
                # video_writer.write_frame(image)

    finally:
        print("Program Ending")
Пример #5
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.FileVideoStream('toronto.mp4', play_realtime=True) as video_stream, \
                edgeiq.Streamer() as streamer:  # play_realtime simulates video feed from a camera
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = semantic_segmentation.segment_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                mask = semantic_segmentation.build_image_mask(
                    results.class_map)
                blended = edgeiq.blend_images(frame, mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #6
0
def main():
    # load the configuration data from config.json
    config = load_json(CONFIG_FILE)
    labels_to_mask = config.get(TARGETS)
    model_id = config.get(MODEL_ID)
    background_image = config.get(BACKGROUND_IMAGES) + config.get(IMAGE)
    blur = config.get(BLUR)
    use_background_image = config.get(USE_BACKGROUND_IMAGE)
    blur_level = config.get(BLUR_LEVEL)

    semantic_segmentation = edgeiq.SemanticSegmentation(model_id)
    semantic_segmentation.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Model:\n{}\n".format(semantic_segmentation.model_id))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                # read in the video stream
                frame = video_stream.read()

                results = semantic_segmentation.segment_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                # build the color mask, making all colors the same except for background
                semantic_segmentation.colors = [
                    (0, 0, 0) for i in semantic_segmentation.colors
                ]

                # iterate over all the desired items to identify, labeling those white
                for label in labels_to_mask:
                    index = semantic_segmentation.labels.index(label)
                    semantic_segmentation.colors[index] = (255, 255, 255)

                # build the color mask
                mask = semantic_segmentation.build_image_mask(
                    results.class_map)

                # Enlarge the mask
                dilatation_size = 15
                # Options: cv.MORPH_RECT, cv.MORPH_CROSS, cv.MORPH_ELLIPSE
                dilatation_type = cv.MORPH_CROSS
                element = cv.getStructuringElement(
                    dilatation_type,
                    (2 * dilatation_size + 1, 2 * dilatation_size + 1),
                    (dilatation_size, dilatation_size))
                mask = cv.dilate(mask, element)

                # apply smoothing to the mask
                mask = cv.blur(mask, (blur_level, blur_level))

                # the background defaults to just the original frame
                background = frame

                if use_background_image:
                    # read in the image
                    img = cv.imread(background_image)

                    # get 2D the dimensions of the frame (need to reverse for compatibility with cv2)
                    shape = frame.shape[:2]

                    # resize the image
                    background = cv.resize(img, (shape[1], shape[0]),
                                           interpolation=cv.INTER_NEAREST)

                if blur:
                    # blur the background
                    background = cv.blur(background, (blur_level, blur_level))

                frame = overlay_image(frame, background, mask)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #7
0
def main():
    # load the configuration data from config.json
    config = load_json(CONFIG_FILE)
    labels_to_mask = config.get(TARGETS)
    model_id = config.get(MODEL_ID)
    background_image = config.get(BACKGROUND_IMAGES) + config.get(IMAGE)
    blur = config.get(BLUR)
    blur_level = config.get(BLUR_LEVEL)
    use_background_image = config.get(USE_BACKGROUND_IMAGE)

    semantic_segmentation = edgeiq.SemanticSegmentation(model_id)
    semantic_segmentation.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Model:\n{}\n".format(semantic_segmentation.model_id))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                # read in the video stream
                frame = video_stream.read()

                segmentation_results = semantic_segmentation.segment_image(
                    frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    segmentation_results.duration))

                label_map = np.array(semantic_segmentation.labels)[
                    segmentation_results.class_map]

                filtered_class_map = np.zeros(
                    segmentation_results.class_map.shape).astype(int)

                for label in labels_to_mask:
                    filtered_class_map += segmentation_results.class_map * (
                        label_map == label).astype(int)

                # just the part of the map that is people
                detection_map = (filtered_class_map != 0)

                # the background defaults to just the original frame
                background = frame

                if use_background_image:
                    # read in the image
                    img = cv.imread(background_image)

                    # get 2D the dimensions of the frame (need to reverse for compatibility with cv2)
                    shape = frame.shape[:2]

                    # resize the image
                    background = cv.resize(img, (shape[1], shape[0]),
                                           interpolation=cv.INTER_NEAREST)

                if blur:
                    # blur the background:
                    background = cv.blur(background, (blur_level, blur_level))

                # replace the area of the new frame that corresponds to the person in the original
                background[detection_map] = frame[detection_map].copy()
                streamer.send_data(background, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #8
0
def generateSegmentationModel():
    segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    segmentation.load(engine=edgeiq.Engine.DNN)
    return segmentation