示例#1
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=4) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)

            results = obj_detect.detect_objects(image, confidence_level=.5)
            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=[(255, 255, 255)])

            # Generate text to display on streamer
            text = ["<b>Model:</b> {}".format(obj_detect.model_id)]
            text.append("<b>Inference time:</b> {:1.3f} s".format(
                results.duration))
            text.append("<b>Objects:</b>")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))
            if image_path == 'images/example_08.jpg':
                text.append("<br><br><b><em>Hello, World!</em></b>")

            streamer.send_data(image, text)
        streamer.wait()

    print("Program Ending")
    def _run_detection(self):
        obj_detect = edgeiq.ObjectDetection(self._model_id)
        obj_detect.load(engine=self._engine)

        print("Loaded model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

        with edgeiq.WebcamVideoStream(cam=self.idx) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            self._fps.start()

            while True:
                frame = video_stream.read()

                if self._stop_event.is_set():
                    break

                results = obj_detect.detect_objects(frame, confidence_level=.5)

                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                output_results = {
                    "idx": self.idx,
                    "frame": frame,
                    "results": results,
                    "model_id": obj_detect.model_id
                }
                self._results_q.put(output_results)

                self._fps.update()
示例#3
0
def main():
    #ardunio = serial.Serial('COM1', 115200, timeout = 1)
    GPIO.setmode(GPIO.BCM)
    buzzer = 23
    GPIO.setup(buzzer, GPIO.OUT)
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))
    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                predictions = edgeiq.filter_predictions_by_label(
                    results.predictions, ["bottle"])
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_confidences=False,
                                            colors=obj_detect.colors)
                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")
                for prediction in predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))
                    GPIO.output(buzzer, GPIO.HIGH)
                    GPIO.input(buzzer)
                    sleep(0.5)
                    GPIO.output(buzzer, GPIO.LOW)
                    GPIO.input(buzzer)
                    sleep(0.5)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
    def __init__(self, host, port, handler):

        self.inferencer = edgeiq.ObjectDetection(
            'alwaysai/ssd_mobilenet_v1_coco_2018_01_28')
        self.inferencer.load(edgeiq.Engine.DNN)
        print('Model Loaded')

        print(f'Serving on {host}:{port}')
        super().__init__((host, port), handler)
示例#5
0
文件: app.py 项目: Ctakayama/HARDHACK
def main():

    label_defs = {}

    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)


    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:

                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(
                        frame, labelToString(label_defs, results.predictions), show_labels = False,
                        show_confidences = False, colors=obj_detect.colors, 
                        line_thickness = 0)
                frame = addNotes(frame, results.predictions)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")
                text.append("fps:{:2.2f}".format(fps.compute_fps()))
                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#6
0
def main(camera, use_streamer, server_addr):
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        streamer = None
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#7
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))

    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                              max_distance=50)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection and centroid tracker
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=400)
                results = obj_detect.detect_objects(frame, confidence_level=.5)

                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                objects = centroid_tracker.update(results.predictions)

                # Update the label to reflect the object ID
                predictions = []
                for (object_id, prediction) in objects.items():
                    new_label = 'face {}'.format(object_id)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#8
0
def main():
    # Set up object detection API
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    # Set up rpi ports
    GPIO.setmode(GPIO.BOARD)
    GPIO.setup(LEFT_PORT, GPIO.OUT)
    GPIO.setup(RIGHT_PORT, GPIO.OUT)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.8) # Maybe filter the result to bottles or bags for demo?
                
                image_Centering(results.predictions)

                # Debug information
                if(debug_On):
                    frame = edgeiq.markup_image(
                            frame, results.predictions, colors=obj_detect.colors)

                    # Generate text to display on streamer
                    text = ["Model: {}".format(obj_detect.model_id)]
                    text.append(
                            "Inference time: {:1.3f} s".format(results.duration))
                    text.append("Objects:")

                    for prediction in results.predictions:
                        text.append("{}: {:2.2f}%".format(
                            prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                time.sleep(FRAME_A_RATE)

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
示例#9
0
def object_detector(model):
    # print("alwaysai_helper.py: object_detector")
    if model is None:
        raise Exception(
            "alwaysai_helper.py: object_detector: model name parameter not found"
        )
    od = edgeiq.ObjectDetection(model)
    engine = edgeiq.Engine.DNN
    if is_accelerator_available() == True:
        engine = edgeiq.Engine.DNN_OPENVINO
    od.load(engine)
    return od
示例#10
0
    def load_model(self, model):
        # start up a first object detection model
        obj_detect = edgeiq.ObjectDetection(model)
        obj_detect.load(engine=edgeiq.Engine.DNN, accelerator=edgeiq.Accelerator.CPU)

        # print the details of each model to the console
        print("Model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

        return obj_detect	
示例#11
0
def main():
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()
                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=facial_detector.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Faces:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
def main():
    """Run csi video stream and object detector."""
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN_CUDA)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    try:
        with enhanced_csi.JetsonVideoStream(cam=0,
                                            rotation=enhanced_csi.
                                            FrameRotation.ROTATE_180,
                                            camera_mode=enhanced_csi.
                                            JetsonCameraMode.
                                            IMX477_4032x3040_30_0,
                                            display_width=640,
                                            display_height=480) as video_stream,\
                edgeiq.Streamer() as streamer:
            time.sleep(2.0)
            video_stream.start_counting_fps()

            # loop detection
            while True:
                frame = enhanced_csi.read_camera(video_stream, True)
                results = obj_detect.detect_objects(frame, confidence_level=.4)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                video_stream.frames_displayed += 1

                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break
            video_stream.release_fps_stats()

    finally:
        print("Program Ending")
示例#13
0
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/ssd_mobilenet_v2_coco_2018_03_29")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))


    try:
        with edgeiq.RealSense() as video_stream, \
                edgeiq.Streamer() as streamer:

            print("starting RealSense camera")
            time.sleep(2.0)

            # loop detection
            while True:
                distances = []
                depth_image, color_image = video_stream.read()

                roi = video_stream.roi(depth_image, color_image, min=None, max=0.9)

                # frame = edgeiq.resize(color_image, width=416)
                results = obj_detect.detect_objects(roi, confidence_level=.6)
                roi = edgeiq.markup_image(
                        roi, results.predictions, colors=obj_detect.colors)
                for prediction in results.predictions:
                    distances.append(video_stream.compute_object_distance(prediction.box,depth_image))


                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                for i, prediction in enumerate(results.predictions):
                    text.append("{}: {:2.1f}% Distance = {:2.2f}m".format(
                        prediction.label, prediction.confidence * 100, distances[i]))

                streamer.send_data(roi, text)


                if streamer.check_exit():
                    break

    finally:
        print("Program Ending")
示例#14
0
def main():
    obj_det = edgeiq.ObjectDetection("tester2204/CE-Recog")
    if edgeiq.is_jetson():
        obj_det.load(engine=edgeiq.Engine.DNN_CUDA)
        print("Nvidia Jetson Detected\n")
    else:
        obj_det.load(engine=edgeiq.Engine.DNN)
        print("Device is not a Nvidia Jetson Board\n")
    print("Initializing Application...\n")
    print("Model:\n{}\n".format(obj_det.model_id))
    print("Engine:\n{}\n".format(obj_det.engine))
    print("Labels:\n{}\n".format(obj_det.labels))

    #imgURL = "https://specials-images.forbesimg.com/imageserve/5e88b867e2bb040006427704/0x0.jpg"
    #urllib.request.urlretrieve(imgURL, "this.jpg") #Change based on OS and User

    #image = "Images/this.jpg"

    image_lists = sorted(list(edgeiq.list_images("Images/")))

    with edgeiq.Streamer(queue_depth=len(image_lists),
                         inter_msg_time=7) as streamer:
        i = 0
        while i < 3:
            for image_list in image_lists:
                show_image = cv2.imread(image_list)
                image = show_image.copy()

                results = obj_det.detect_objects(image, confidence_level=.5)

                image = edgeiq.markup_image(image,
                                            results.predictions,
                                            colors=obj_det.colors)

                shown = ["Model: {}".format(obj_det.model_id)]
                shown.append("Inference time: {:1.3f} s".format(
                    results.duration))
                shown.append("Objects:")

                for prediction in results.predictions:
                    shown.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))
                streamer.send_data(image, shown)
            streamer.wait()
            i = i + 1

    #if streamer.check_exit():
    print("That's it folks!")
    print("Thanks for using Ben's Object Recognition Model & Software")
    print("Sponsored by: Darien's Face")
示例#15
0
def object_detector(model, should_log=True):
    """Return an object detector for a given model"""
    if model is None:
        raise Exception(
            "alwaysai.py: object_detector: model name parameter not found")
    od = edgeiq.ObjectDetection(model)
    e = engine()
    od.load(e)
    if should_log == True:
        print("alwaysai.py: object_detector: Engine: {}".format(od.engine))
        print("alwaysai.py: object_detector: Accelerator: {}\n".format(
            od.accelerator))
        print("alwaysai.py: object_detector: Model:\n{}\n".format(od.model_id))
    return od
示例#16
0
    def detection_base(model, confidence, image_array):
        detector = edgeiq.ObjectDetection(
            model)  # model example: "alwaysai/res10_300x300_ssd_iter_140000"
        detector.load(engine=edgeiq.Engine.DNN)

        centroid_tracker = edgeiq.CentroidTracker(deregister_frames=100,
                                                  max_distance=50)
        results = detector.detect_objects(image_array,
                                          confidence_level=confidence)
        objects = centroid_tracker.update(results.predictions)

        predictions = []
        for (object_id, prediction) in objects.items():
            prediction.label = "{}: {}".format(prediction.label, object_id)
            predictions.append(prediction)

        image = edgeiq.markup_image(image_array, predictions)

        return image, results, None
示例#17
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/yolo_v3")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    video_path = f"data/inputs/{difficulty}.mp4"
    stream_context = edgeiq.FileVideoStream(f"{video_path}",
                                            play_realtime=True)

    with stream_context as video_stream, edgeiq.Streamer() as streamer:
        while video_stream.more():

            image = video_stream.read()
            results = obj_detect.detect_objects(image, confidence_level=.5)
            specific_predictions = [
                r for r in results.predictions if r.label == 'person'
            ]

            res = tracker.update(specific_predictions)

            image = draw_tracked_boxes(image, res)
            # image = edgeiq.markup_image(image, people_predictions)

            streamer.send_data(image)
示例#18
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)

            results = obj_detect.detect_objects(image, confidence_level=.5)
            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=obj_detect.colors)

            # Generate text to display on streamer
            text = ["Model: {}".format(obj_detect.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))
            text.append("Objects:")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))

            streamer.send_data(image, text)
        streamer.wait()

    print("Program Ending")
示例#19
0
def main():

    # The current frame index
    frame_idx = 0

    # The number of frames to skip before running detector
    detect_period = 50

    # if you would like to test an additional model, add one to the list below:
    models = [
        "alwaysai/ssd_mobilenet_v2_oidv4",
        "alwaysai/ssd_inception_v2_coco_2018_01_28"
    ]

    # include any labels that you wish to detect from any models (listed above in 'models') here in this list
    detected_contraband = [
        "Pen", "cell phone", "backpack", "book", "Book", "Ring binder",
        "Headphones", "Calculator", "Mobile phone", "Telephone", "Microphone",
        "Ipod", "Remote control"
    ]

    # load all the models (creates a new object detector for each model)
    detectors = []
    for model in models:

        # start up a first object detection model
        obj_detect = edgeiq.ObjectDetection(model)
        obj_detect.load(engine=edgeiq.Engine.DNN)

        # track the generated object detection items by storing them in detectors
        detectors.append(obj_detect)

        # print the details of each model to the console
        print("Model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

    tracker = edgeiq.CorrelationTracker(max_objects=5)
    fps = edgeiq.FPS()
    contraband_summary = ContrabandSummary()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                predictions_to_markup = []
                text = [""]

                # only analyze every 'detect_period' frame (i.e. every 50th in original code)
                if frame_idx % detect_period == 0:

                    # gather data from the all the detectors
                    for i in range(0, len(detectors)):
                        results = detectors[i].detect_objects(
                            frame, confidence_level=.2)

                        # Stop tracking old objects
                        if tracker.count:
                            tracker.stop_all()

                        # append each prediction
                        predictions = results.predictions
                        for prediction in predictions:

                            if (prediction.label.strip()
                                    in detected_contraband):
                                contraband_summary.contraband_alert(
                                    prediction.label, frame)
                                predictions_to_markup.append(prediction)
                                tracker.start(frame, prediction)
                else:

                    # if there are objects being tracked, update the tracker with the new frame
                    if tracker.count:

                        # get the new predictions for the objects being tracked, used to markup the frame
                        predictions_to_markup = tracker.update(frame)

                # mark up the frame with the predictions for the contraband objects
                frame = edgeiq.markup_image(frame,
                                            predictions_to_markup,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=obj_detect.colors)

                # send the collection of contraband detection points (string and video frame) to the streamer
                text = contraband_summary.get_contraband_string()

                streamer.send_data(frame, text)
                frame_idx += 1
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
示例#20
0
def main():

    # if you would like to test an additional model, add one to the list below:
    models = ["alwaysai/mobilenet_ssd", "alwaysai/ssd_inception_v2_coco_2018_01_28"]

    # if you've added a model, add a new color in as a list of tuples in BGR format
    # to make visualization easier (e.g. [(B, G, R)]).
    colors = [[(66, 68, 179)], [(50, 227, 62)]]

    detectors = []

    # load all the models (creates a new object detector for each model)
    for model in models:

        # start up a first object detection model
        obj_detect = edgeiq.ObjectDetection(model)
        obj_detect.load(engine=edgeiq.Engine.DNN)

        # track the generated object detection items by storing them in detectors
        detectors.append(obj_detect)

        # print the details of each model to the console
        print("Model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                text = [""]

                # gather data from the all the detectors
                for i in range(0, len(detectors)):
                    results = detectors[i].detect_objects(
                        frame, confidence_level=.5)
                    object_frame = edgeiq.markup_image(
                        frame, results.predictions, show_labels=False, colors=colors[i])

                    # for the first frame, overwrite the input feed
                    if i == 0:
                        display_frame = object_frame
                    else:

                        # otherwise, append the new marked-up frame to the previous one
                        display_frame = numpy.concatenate((object_frame, display_frame))

                    # append each prediction
                    for prediction in results.predictions:
                        text.append(
                                "Model {} detects {}: {:2.2f}%".format(detectors[i].model_id,
                                prediction.label, prediction.confidence * 100))

                # send the image frame and the predictions for both
                # prediction models to the output stream
                streamer.send_data(display_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#21
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            r, g, b = cv2.split(image)
            fig = plt.figure()
            axis = fig.add_subplot(1, 1, 1, projection="3d")

            pixel_colors = image.reshape(
                (np.shape(image)[0] * np.shape(image)[1], 3))
            norm = colors.Normalize(vmin=-1., vmax=1.)
            norm.autoscale(pixel_colors)
            pixel_colors = norm(pixel_colors).tolist()

            axis.scatter(r.flatten(),
                         g.flatten(),
                         b.flatten(),
                         facecolors=pixel_colors,
                         marker=".")
            axis.set_xlabel("Red")
            axis.set_ylabel("Green")
            axis.set_zlabel("Blue")
            plt.show()

            #convert from rgb to hsv and pick out 2 shades
            hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
            hsv_drot = (18, 24, 61)
            hsv_lrot = (13, 203, 55)

            #build the color mask
            mask = cv2.inRange(hsv_image, hsv_lrot, hsv_drot)
            res = cv2.bitwise_and(image, image, mask=mask)
            plt.subplot(1, 2, 1)
            plt.imshow(mask, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(res)
            plt.show()

            #2nd layer mask, did not display
            hsv_olive = (34, 32, 120)
            hsv_dolive = (37, 240, 27)
            mask_ol = cv2.inRange(hsv_image, hsv_olive, hsv_dolive)
            res_w = cv2.bitwise_and(image, image, mask=mask_ol)
            plt.subplot(1, 2, 1)
            plt.imshow(mask_ol, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(res_w)
            plt.show()

            #final mask
            final_mask = mask + mask_ol
            final_result = cv2.bitwise_and(image, image, mask=final_mask)
            plt.subplot(1, 2, 1)
            plt.imshow(final_mask, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(final_result)
            plt.show()

            #testing .shape and typecast image
            print("The type of this input is {}".format(type(image)))
            print("Shape: {}".format(image.shape))

            #piee
            ##text.append(get_colors(get_image(image_path), 4, True))

            # Generate text to display on streamer
            text = ["Model: {}".format(obj_detect.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))

            #need to convert from bgr to rgb
            swapped_colors = swap(obj_detect.colors)
            text.append("Colors printed!")
            # text.append(swapped_colors)

            print(swapped_colors)

            # print(obj_detect.colors)

            # converted = np.array([np.array(rgb) for rgb in swapped_colors]) // numpy arrays with lists (like numpy contained within itself, list of lists)

            # print(converted.shape)

            results = obj_detect.detect_objects(image, confidence_level=.5)

            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=obj_detect.colors)
            # print(rgb2hex(swapped_colors))

            # print(converted)

            # iterate through tuple list and convert
            # for x in obj_detect.colors:
            #     text.append(rgb2hex(swapped_colors))
            #     text.append(format(x))

            text.append("Objects:")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))

            streamer.send_data(image, text)

        streamer.wait()

    print("Program Ending")
示例#22
0
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)
    tracker = edgeiq.CentroidTracker(deregister_frames=30)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            prev_tracked_people = {}
            logs = []
            currentPeople = 0

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                people = edgeiq.filter_predictions_by_label(results.predictions, ['person'])
                tracked_people = tracker.update(people)

                people = []
                for (object_id, prediction) in tracked_people.items():
                    new_label = 'Person {}'.format(object_id)
                    prediction.label = new_label
                    people.append(prediction)

                frame = edgeiq.markup_image(
                        frame, people, colors=obj_detect.colors)

                new_entries = set(tracked_people) - set(prev_tracked_people)
                for entry in new_entries:
                    save_snapshot(frame, entry)
                    logs.append('Person {} entered'.format(entry))
                    currentPeople += 1

                new_exits = set(prev_tracked_people) - set(tracked_people)
                for exit in new_exits:
                    logs.append('Person {} exited'.format(exit))
                    currentPeople -= 1

                prev_tracked_people = dict(tracked_people)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                for prediction in people:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                
                text.append('Current Occupancy:')
                text += str(currentPeople)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#23
0
def main():

    # Step 1b: first make a detector to detect facial objects
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    # Step 2a: then make a classifier to classify the age of the image
    classifier = edgeiq.Classification("alwaysai/agenet")
    classifier.load(engine=edgeiq.Engine.DNN)

    # Step 2b: descriptions printed to console
    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:

                # Step 3a: track how many faces are detected in a frame
                count = 1

                # read in the video stream
                frame = video_stream.read()

                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)

                # Step 3b: altering the labels to show which face was detected
                for p in results.predictions:
                    p.label = "Face " + str(count)
                    count = count + 1

                # Step 3c: alter the original frame mark up to just show labels
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_labels=True,
                                            show_confidences=False)

                # generate labels to display the face detections on the streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                # Step 3d:
                text.append("Faces:")

                # Step 4a: add a counter for the face detection label
                age_label = 1

                # append each predication to the text output
                for prediction in results.predictions:

                    # Step 4b: append labels for face detection & classification
                    text.append("Face {} ".format(age_label))

                    age_label = age_label + 1

                    ## to show confidence, use the following instead of above:
                    # text.append("Face {}: detected with {:2.2f}% confidence,".format(
                    #count, prediction.confidence * 100))

                    # Step 4c: cut out the face and use for the classification
                    face_image = edgeiq.cutout_image(frame, prediction.box)

                    # Step 4d: attempt to classify the image in terms of age
                    age_results = classifier.classify_image(face_image)

                    # Step 4e: if there are predictions for age classification,
                    # generate these labels for the output stream
                    if age_results.predictions:
                        text.append("is {}".format(
                            age_results.predictions[0].label, ))
                    else:
                        text.append("No age prediction")

                    ## to append classification confidence, use the following
                    ## instead of the above if/else:

                    # if age_results.predictions:
                    #     text.append("age: {}, confidence: {:.2f}\n".format(
                    #         age_results.predictions[0].label,
                    #         age_results.predictions[0].confidence))
                    # else:
                    #     text.append("No age prediction")

                # send the image frame and the predictions to the output stream
                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#24
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v2_coco_2018_03_29")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    seconds = int(0)
    minutes = int(0)
    hours = int(0)
    timer = int

    df = pd.DataFrame([['alwaysAI_MAU', 0, 0, 0]],
                      index=[0],
                      columns=['Session', 'Hours', 'Minutes', 'Seconds'])

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                text.append("Timer: ")
                text.append(timer)

                text.append("Objects:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                    if prediction.label == 'cell phone ':
                        seconds = seconds + 1
                        if seconds > 59:
                            seconds = 0
                            minutes = minutes + 1
                        if minutes > 59:
                            minutes = 0
                            hours = hours + 1
                        time.sleep(1)
                        timer = hours, minutes, seconds

                        df.at[0, 'Hours'] = hours
                        df.at[0, 'Minutes'] = minutes
                        df.at[0, 'Seconds'] = seconds
                        print(df)

                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#25
0
def gen():
    #Load in our machine learning models!
    detector_config = {
        "engine": edgeiq.Engine.DNN_OPENVINO,
        "accelerator": edgeiq.Accelerator.MYRIAD
    }

    #Get the face detector:
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(**detector_config)
    describe_model(facial_detector, "Face")

    #Get the gender detector
    gender_detector = edgeiq.Classification("alwaysai/gendernet")
    gender_detector.load(**detector_config)
    describe_model(gender_detector, "Gender")

    #Get the age detector
    age_detector = edgeiq.Classification("alwaysai/agenet")
    age_detector.load(**detector_config)
    describe_model(age_detector, "Age")

    texts = ["No patient detected!"]

    with edgeiq.WebcamVideoStream(cam=0) as webcam:
        # loop detection
        while True:
            frame = webcam.read()

            #Flip the image upside down bc of how we orient the camera
            frame = np.flipud(frame)

            # detect human faces
            face_results = facial_detector.detect_objects(frame,
                                                          confidence_level=.5)

            if len(face_results.predictions) > 0:
                face = frame[face_results.predictions[0].box.
                             start_y:face_results.predictions[0].box.end_y,
                             face_results.predictions[0].box.
                             start_x:face_results.predictions[0].box.end_x]

                #Detect gender and age
                gender_results = gender_detector.classify_image(
                    face, confidence_level=.9)
                age_results = age_detector.classify_image(face)

                frame = blur_detections(frame, face_results.predictions)

                # Find the index of highest confidence
                if len(gender_results.predictions) > 0 and len(
                        age_results.predictions) > 0:
                    top_prediction1 = gender_results.predictions[0]
                    top_prediction2 = age_results.predictions[0]
                    texts = []
                    texts.append("Gender Classification:")
                    texts.append("{}, {:.1f}%".format(
                        top_prediction1.label,
                        top_prediction1.confidence * 100))
                    texts.append("Age Classification:")
                    texts.append("{}, {:.1f}%".format(
                        top_prediction2.label,
                        top_prediction2.confidence * 100))
            else:
                texts = ["No patient detected!"]

            #HACK: Add a panel to the right side of the image
            label_panel = np.zeros(
                (frame.shape[0], frame.shape[1] // 2, frame.shape[2])) + 255
            org_coords = [(frame.shape[0] // 15, i * frame.shape[1] // 10)
                          for i in range(1, 5)]
            for i, text in enumerate(texts):
                label_panel = imwrite(label_panel,
                                      text,
                                      org_coords[i],
                                      thickness=1 + ((i % 2) == 0))

            frame = np.concatenate((frame, label_panel), axis=1)

            #Encode and deploy
            ret, jpeg = cv2.imencode('.jpg', frame)
            frame = jpeg.tobytes()
            #yield frame
            yield (b'\r\n--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
示例#26
0
def main():
    # Spin up the object detector
    obj_detect = edgeiq.ObjectDetection("alwaysai/" + OBJECT_DETECTION_MODEL)
    obj_detect.load(engine=edgeiq.Engine.DNN_CUDA,
                    accelerator=edgeiq.Accelerator.NVIDIA)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    # Prepare to track frames per second calculations
    fps = edgeiq.FPS()

    # Load any prior instance of the tracker, otherwise spin up a new one
    centroid_tracker = file_manager.load(
        CENTROID_TRACKER,
        edgeiq.CentroidTracker(deregister_frames=TRACKER_DEREGISTER_FRAMES,
                               max_distance=TRACKER_MAX_DISTANCE))
    # Load any prior instance of the metrics data, otherwise start a new one
    metrics = file_manager.load(METRICS_MANAGER,
                                metrics_manager.MetricsManager())

    try:
        if IP_CAMERA_FEED is not None:
            stream_details = edgeiq.IPVideoStream(IP_CAMERA_FEED)
        else:
            stream_details = edgeiq.WebcamVideoStream(cam=0)

        with stream_details as video_stream, \
        edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection and centroid tracker
            while True:
                metrics.newLoop()
                frame = video_stream.read()
                results = obj_detect.detect_objects(
                    frame, confidence_level=DETECT_CONFIDENCE_THRESHOLD)

                # Ignore detections of anything other than people
                filter = edgeiq.filter_predictions_by_label(
                    results.predictions, ['person'])

                # Adding info for streamer display
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("People currently detected:")

                objects = centroid_tracker.update(filter)

                # Store active predictions for just this loop
                predictions = []

                # Store the active object ids for just this loop
                if len(objects.items()) == 0:
                    # No people detected
                    text.append("-- NONE")

                for (object_id, prediction) in objects.items():
                    metrics.addTimeFor(object_id)
                    timeForId = metrics.timeForId(object_id)
                    # Correcting for fact that index 0 is first object in an array
                    idAdjusted = object_id + 1
                    # Display text with bounding box in video
                    new_label = "Person {i} | {t} sec".format(i=idAdjusted,
                                                              t=timeForId)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                # Add metrics to text going to streamer
                m = metrics.currentMetrics()
                text.append("")  # Spacing
                text.append("Total people seen: {}".format(m["count"]))
                text.append("Total time: {} sec".format(m["total"]))
                text.append("Average time: {0:.1f} sec".format(m["avg"]))
                text.append("Longest individual time: {} sec".format(m["max"]))

                # Update output streamer
                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        # TODO: Update to save every few seconds in case a crash occurs
        file_manager.save(metrics, METRICS_MANAGER)
        file_manager.save(centroid_tracker, CENTROID_TRACKER)
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
示例#27
0
def main():
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()
            counter = 1
            counterWord = 0
            WordList = [
                'Beautiful!', 'Lovely!', 'Amazing!', 'Charming!', 'Gorgeous!',
                'Heavenly!', 'Stunning!', 'Wow!!!', 'Perfect!', 'Flawless!',
                'OMG!'
            ]

            # loop detection
            while True:
                frame = webcam.read()
                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)
                predictions_edit = results.predictions
                #print(str(results.predictions[0]))
                counter = counter + 1
                if counter > 100:
                    counter = 1
                    counterWord = (counterWord + 1) % (len(WordList))
                for prediction in results.predictions:
                    prediction.confidence = 1
                    #prediction.label = "Stunning"
                    prediction.label = WordList[counterWord]
                #for i in range(len(results.predictions)):
                #results.predictions[i].confidence = 1
                #prediction.label = "Stunning"
                #results.predictions[i].label = WordList[(counterWord+i)%11]
                frame = edgeiq.markup_image(frame, results.predictions)
                #frame = edgeiq.markup_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Stunning People:")

                for prediction in results.predictions:
                    #text.append("{:2.2f}%".format(prediction.confidence * 100))
                    text.append("{:2.2f}%".format(100))

                streamer.send_data(frame, text)
                #streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#28
0
def main():
    # The current frame index
    frame_idx = 0
    # The number of frames to skip before running detector
    detect_period = 30

    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    tracker = edgeiq.CorrelationTracker(max_objects=5)
    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            while True:
                frame = video_stream.read()
                predictions = []
                if frame_idx % detect_period == 0:
                    results = obj_detect.detect_objects(frame,
                                                        confidence_level=.5)
                    # Generate text to display on streamer
                    text = ["Model: {}".format(obj_detect.model_id)]
                    text.append("Inference time: {:1.3f} s".format(
                        results.duration))
                    text.append("Objects:")

                    # Stop tracking old objects
                    if tracker.count:
                        tracker.stop_all()

                    predictions = results.predictions
                    for prediction in predictions:
                        text.append("{}: {:2.2f}%".format(
                            prediction.label, prediction.confidence * 100))
                        tracker.start(frame, prediction)
                else:
                    if tracker.count:
                        predictions = tracker.update(frame)

                frame = edgeiq.markup_image(frame,
                                            predictions,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=obj_detect.colors)
                streamer.send_data(frame, text)
                frame_idx += 1
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        tracker.stop_all()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#29
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))
    print("Detecting:\n{}\n".format(OBJECTS))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                predictions = edgeiq.filter_predictions_by_label(
                    results.predictions, OBJECTS)
                frame = edgeiq.markup_image(frame,
                                            predictions,
                                            show_confidences=False,
                                            colors=obj_detect.colors)

                # Print date and time on frame
                current_time_date = str(datetime.datetime.now())
                (h, w) = frame.shape[:2]
                cv2.putText(frame, current_time_date, (10, h - 5),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

                # Count objects
                counter = {obj: 0 for obj in OBJECTS}

                for prediction in predictions:
                    # increment the counter of the detected object
                    counter[prediction.label] += 1

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Object counts:")

                for label, count in counter.items():
                    text.append("{}: {}".format(label, count))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#30
0
def main():
    # The current frame index
    frame_idx = 0
    # The number of frames to skip before running detector
    detect_period = 30

    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/vehicle_license_mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    tracker = edgeiq.CorrelationTracker(max_objects=5)
    fps = edgeiq.FPS()

    try:
        video_paths = edgeiq.list_files(base_path="./video/",
                                        valid_exts=".mp4")
        streamer = edgeiq.Streamer().setup()

        for video_path in video_paths:
            with edgeiq.FileVideoStream(video_path) as video_stream:

                # Allow Webcam to warm up
                time.sleep(2.0)
                fps.start()

                # loop detection
                while video_stream.more():
                    frame = video_stream.read()
                    predictions = []

                    # if using new detections, update 'predictions'
                    if frame_idx % detect_period == 0:
                        results = obj_detect.detect_objects(
                            frame, confidence_level=.5)

                        # Generate text to display on streamer
                        text = ["Model: {}".format(obj_detect.model_id)]
                        text.append("Inference time: {:1.3f} s".format(
                            results.duration))
                        text.append("Objects:")

                        # Stop tracking old objects
                        if tracker.count:
                            tracker.stop_all()

                        # Set predictions to the new predictions
                        predictions = results.predictions

                        if not predictions:
                            text.append("no predictions")

                        # use 'number' to identify unique objects
                        number = 0
                        for prediction in predictions:
                            number = number + 1
                            text.append("{}_{}: {:2.2f}%".format(
                                prediction.label, number,
                                prediction.confidence * 100))
                            tracker.start(frame, prediction)

                    else:
                        # otherwise, set 'predictions' to the tracked predictions
                        if tracker.count:
                            predictions = tracker.update(frame)

                    # either way, use 'predictions' to mark up the image and update text
                    frame = edgeiq.markup_image(frame,
                                                predictions,
                                                show_labels=True,
                                                show_confidences=False,
                                                colors=obj_detect.colors,
                                                line_thickness=6,
                                                font_size=2,
                                                font_thickness=6)

                    streamer.send_data(frame, text)
                    frame_idx += 1

                    fps.update()

                    if streamer.check_exit():
                        break

    finally:
        fps.stop()
        streamer.close()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")