Пример #1
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation(
            "alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN)
    print('Outside of try')
    try:
        with edgeiq.FileVideoStream("driving_downtown.mp4", play_realtime=False) as video_stream, \
            edgeiq.VideoWriter(output_path="output.avi") as video_writer:
            print('Inside of try')

            if video_stream is None:
                print('no video stream!')
            else:
                print('video stream available of type {}'.format(type(video_stream)))

            if video_writer is None:
                print('no video writer!')
            else:
                print('video writer available of type {}'.format(type(video_writer)))

            more = getattr(video_stream, "more", None)
            if callable(more):
                print('video_stream has an attribute called more')
            else:
                print('video_stream has no MORE function!?')
            if video_stream.more():
                print('At least one video frame available before we bgin')

            while video_stream.more():
                print('Inside of while')
                # image = video_stream.read()
                # # video_writer.write_frame(image)
                # if image is None:
                #     print('no image')
                # else:
                #     print('image available of type {}'.format(type(image)))
                # results = semantic_segmentation.segment_image(image)
                # if results is None:
                #     print('no results')
                # else:
                #     print('results available of type {}'.format(type(results)))
                # mask = semantic_segmentation.build_image_mask(results.class_map)
                # if mask is None:
                #     print('no mask')
                # else:
                #     print('mask available of type {}'.format(type(mask)))
                # blended = edgeiq.blend_images(image, mask, alpha=0.5)
                # if blended is None:
                #     print('no blended')
                # else:
                #     print('blended available of type {}'.format(type(blended)))
                # video_writer.write_frame(image)

    finally:
        print("Program Ending")
Пример #2
0
def start_detection(config,
                    did_start_callback,
                    enter_callback,
                    exit_callback,
                    did_end_object_callback,
                    should_log=True):
    """
    Enable features dependent on config and pass callback down to actual detection functions
    """
    global ENTER_CALLBACK
    global EXIT_CALLBACK
    ENTER_CALLBACK = enter_callback
    EXIT_CALLBACK = exit_callback
    print('alwaysai.py: start_detection: enter_callback: {}'.format(
        ENTER_CALLBACK))

    # Configs
    od_config = alwaysai_configs.ObjectDetector(config)
    ct_config = alwaysai_configs.CentroidTracker(config)
    vs_config = alwaysai_configs.VideoStream(config)
    od = object_detector(od_config.model_id)
    t = edgeiq.CentroidTracker(deregister_frames=ct_config.deregister_frames,
                               max_distance=ct_config.max_distance)
    en_zones_config = config.get('entry_zones', [])
    ex_zones_config = config.get('exit_zones', [])
    entry_zones = zones_from_config(en_zones_config)
    exit_zones = zones_from_config(ex_zones_config)
    vs = None

    # print('alwaysai.py: start_detection: en_zones_config: {}'.format(en_zones_config))
    # print('alwaysai.py: start_detection: entry_zones: {}'.format(entry_zones))

    # Inits
    if vs_config.mode == 'camera':
        if should_log:
            print('alwaysai.py: start_detection: enabling camera w/ id: {}'.
                  format(vs_config.camera_id))
        vs = edgeiq.WebcamVideoStream(cam=vs_config.camera_id)
    if vs_config.mode == 'file':
        if should_log:
            print('alwaysai.py: start_detection: reading from file')
        vs = edgeiq.FileVideoStream(vs_config.filename, play_realtime=True)
    enable_streamer = config.get('enable_streamer', False)
    streamer = alwaysai_configs.DummyStreamer()
    if enable_streamer:
        print('alwaysai.py: start_detection: ENABLING streamer')
        streamer = edgeiq.Streamer()

    # Start
    start_video_detection_with_streamer(vs, od_config, od, streamer, t,
                                        entry_zones, exit_zones,
                                        did_start_callback, did_detect,
                                        did_end_object_callback)
Пример #3
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.FileVideoStream('toronto.mp4', play_realtime=True) as video_stream, \
                edgeiq.Streamer() as streamer:  # play_realtime simulates video feed from a camera
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = semantic_segmentation.segment_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                mask = semantic_segmentation.build_image_mask(
                    results.class_map)
                blended = edgeiq.blend_images(frame, mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #4
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/yolo_v3")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    video_path = f"data/inputs/{difficulty}.mp4"
    stream_context = edgeiq.FileVideoStream(f"{video_path}",
                                            play_realtime=True)

    with stream_context as video_stream, edgeiq.Streamer() as streamer:
        while video_stream.more():

            image = video_stream.read()
            results = obj_detect.detect_objects(image, confidence_level=.5)
            specific_predictions = [
                r for r in results.predictions if r.label == 'person'
            ]

            res = tracker.update(specific_predictions)

            image = draw_tracked_boxes(image, res)
            # image = edgeiq.markup_image(image, people_predictions)

            streamer.send_data(image)
Пример #5
0
def _video_file_stream(filepath):
    if filepath is None:
        raise Exception(
            "alwaysai_helper.py: video_file_stream: filepath not provided")
    return edgeiq.FileVideoStream(filepath)
Пример #6
0
def main():
    # The current frame index
    frame_idx = 0
    # The number of frames to skip before running detector
    detect_period = 30

    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/vehicle_license_mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    tracker = edgeiq.CorrelationTracker(max_objects=5)
    fps = edgeiq.FPS()

    try:
        video_paths = edgeiq.list_files(base_path="./video/",
                                        valid_exts=".mp4")
        streamer = edgeiq.Streamer().setup()

        for video_path in video_paths:
            with edgeiq.FileVideoStream(video_path) as video_stream:

                # Allow Webcam to warm up
                time.sleep(2.0)
                fps.start()

                # loop detection
                while video_stream.more():
                    frame = video_stream.read()
                    predictions = []

                    # if using new detections, update 'predictions'
                    if frame_idx % detect_period == 0:
                        results = obj_detect.detect_objects(
                            frame, confidence_level=.5)

                        # Generate text to display on streamer
                        text = ["Model: {}".format(obj_detect.model_id)]
                        text.append("Inference time: {:1.3f} s".format(
                            results.duration))
                        text.append("Objects:")

                        # Stop tracking old objects
                        if tracker.count:
                            tracker.stop_all()

                        # Set predictions to the new predictions
                        predictions = results.predictions

                        if not predictions:
                            text.append("no predictions")

                        # use 'number' to identify unique objects
                        number = 0
                        for prediction in predictions:
                            number = number + 1
                            text.append("{}_{}: {:2.2f}%".format(
                                prediction.label, number,
                                prediction.confidence * 100))
                            tracker.start(frame, prediction)

                    else:
                        # otherwise, set 'predictions' to the tracked predictions
                        if tracker.count:
                            predictions = tracker.update(frame)

                    # either way, use 'predictions' to mark up the image and update text
                    frame = edgeiq.markup_image(frame,
                                                predictions,
                                                show_labels=True,
                                                show_confidences=False,
                                                colors=obj_detect.colors,
                                                line_thickness=6,
                                                font_size=2,
                                                font_thickness=6)

                    streamer.send_data(frame, text)
                    frame_idx += 1

                    fps.update()

                    if streamer.check_exit():
                        break

    finally:
        fps.stop()
        streamer.close()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #7
0
def start_file_detection_and_tracking(delivery_object,
                                      filter_for,
                                      model_name,
                                      filename,
                                      detection_confidence=.5,
                                      enable_streamer=True,
                                      streamer_show_labels=True,
                                      tracker_deregister_frames=20,
                                      tracker_max_distance=50,
                                      should_log=False):
    """Starts a detection loop"""
    obj_detect = object_detector(model_name)
    tracker = edgeiq.CentroidTracker(
        deregister_frames=tracker_deregister_frames,
        max_distance=tracker_max_distance)
    fps = edgeiq.FPS()

    try:
        # Enables video camera and streamer

        # TODO: add streamer disable feature here

        with edgeiq.FileVideoStream(
                filename) as video_stream, edgeiq.Streamer() as streamer:

            # Start tracking of frames per second
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                text = []
                # Run detection
                # detect human faces
                results = obj_detect.detect_objects(
                    frame, confidence_level=detection_confidence)

                # TODO: Add filter option here

                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_labels=streamer_show_labels)

                # Generate text to display on streamer
                text.append("Model: {}".format(obj_detect.model_id))
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                predictions = []
                objects = tracker.update(results.predictions)
                # predictions = results.predictions
                for (object_id, prediction) in objects.items():
                    # print(vars(prediction))
                    text.append("{}: {}: {:2.2f}%".format(
                        object_id, prediction.label,
                        prediction.confidence * 100))
                    predictions.append(prediction)

                    if delivery.should_send_image(object_id):
                        # Extract image
                        face_image = edgeiq.cutout_image(frame, prediction.box)
                        # Send data to server
                        delivery.send_image(object_id, prediction.label,
                                            face_image)
                    elif delivery.should_send_data(object_id):
                        delivery.send_data(object_id, prediction.label)

                    # if delivery.should_send_data(object_id):
                    #     delivery.send_data(object_id, prediction.label)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()
                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        if should_log == True:
            print("[INFO] elapsed time: {:.2f}".format(
                fps.get_elapsed_seconds()))
            print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))
            print("Program Ending")