def main():
    """Run csi video stream and object detector."""
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN_CUDA)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    try:
        with enhanced_csi.JetsonVideoStream(cam=0,
                                            rotation=enhanced_csi.
                                            FrameRotation.ROTATE_180,
                                            camera_mode=enhanced_csi.
                                            JetsonCameraMode.
                                            IMX477_4032x3040_30_0,
                                            display_width=640,
                                            display_height=480) as video_stream,\
                edgeiq.Streamer() as streamer:
            time.sleep(2.0)
            video_stream.start_counting_fps()

            # loop detection
            while True:
                frame = enhanced_csi.read_camera(video_stream, True)
                results = obj_detect.detect_objects(frame, confidence_level=.4)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                video_stream.frames_displayed += 1

                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break
            video_stream.release_fps_stats()

    finally:
        print("Program Ending")
Exemple #2
0
def main():
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()
                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=facial_detector.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Faces:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/ssd_mobilenet_v2_coco_2018_03_29")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))


    try:
        with edgeiq.RealSense() as video_stream, \
                edgeiq.Streamer() as streamer:

            print("starting RealSense camera")
            time.sleep(2.0)

            # loop detection
            while True:
                distances = []
                depth_image, color_image = video_stream.read()

                roi = video_stream.roi(depth_image, color_image, min=None, max=0.9)

                # frame = edgeiq.resize(color_image, width=416)
                results = obj_detect.detect_objects(roi, confidence_level=.6)
                roi = edgeiq.markup_image(
                        roi, results.predictions, colors=obj_detect.colors)
                for prediction in results.predictions:
                    distances.append(video_stream.compute_object_distance(prediction.box,depth_image))


                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                for i, prediction in enumerate(results.predictions):
                    text.append("{}: {:2.1f}% Distance = {:2.2f}m".format(
                        prediction.label, prediction.confidence * 100, distances[i]))

                streamer.send_data(roi, text)


                if streamer.check_exit():
                    break

    finally:
        print("Program Ending")
Exemple #4
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v2_coco_2018_03_29")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
def main():

    text = "Facial Landmarks with Dlib"

    fps = edgeiq.FPS()

    shape_predictor = "shape_predictor_68_face_landmarks.dat"
    dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)


    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()

                resized_frame, gray_resized_frame = dlib_flm.image_preprocessor(frame)
                facial_coordinates, rectangles =dlib_flm.detect_faces_shapes(gray_resized_frame)


                # Loop to markup resized_frame
                for(i, rectangle) in enumerate(rectangles):
                 (x, y, w, h) = dlib_flm.dlib_rectangle_to_cv_bondingbox(rectangle)
                 cv2.rectangle(resized_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                 cv2.putText(resized_frame, "Face #{}".format(i + 1), (x - 10, y - 10),
             		cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

                for facial_coordinate in facial_coordinates:
                    for (x,y) in facial_coordinate:
                        cv2.circle(resized_frame, (x, y), 1, (255, 0, 0), -1)

                streamer.send_data(resized_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #6
0
def main():
    obj_det = edgeiq.ObjectDetection("tester2204/CE-Recog")
    if edgeiq.is_jetson():
        obj_det.load(engine=edgeiq.Engine.DNN_CUDA)
        print("Nvidia Jetson Detected\n")
    else:
        obj_det.load(engine=edgeiq.Engine.DNN)
        print("Device is not a Nvidia Jetson Board\n")
    print("Initializing Application...\n")
    print("Model:\n{}\n".format(obj_det.model_id))
    print("Engine:\n{}\n".format(obj_det.engine))
    print("Labels:\n{}\n".format(obj_det.labels))

    #imgURL = "https://specials-images.forbesimg.com/imageserve/5e88b867e2bb040006427704/0x0.jpg"
    #urllib.request.urlretrieve(imgURL, "this.jpg") #Change based on OS and User

    #image = "Images/this.jpg"

    image_lists = sorted(list(edgeiq.list_images("Images/")))

    with edgeiq.Streamer(queue_depth=len(image_lists),
                         inter_msg_time=7) as streamer:
        i = 0
        while i < 3:
            for image_list in image_lists:
                show_image = cv2.imread(image_list)
                image = show_image.copy()

                results = obj_det.detect_objects(image, confidence_level=.5)

                image = edgeiq.markup_image(image,
                                            results.predictions,
                                            colors=obj_det.colors)

                shown = ["Model: {}".format(obj_det.model_id)]
                shown.append("Inference time: {:1.3f} s".format(
                    results.duration))
                shown.append("Objects:")

                for prediction in results.predictions:
                    shown.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))
                streamer.send_data(image, shown)
            streamer.wait()
            i = i + 1

    #if streamer.check_exit():
    print("That's it folks!")
    print("Thanks for using Ben's Object Recognition Model & Software")
    print("Sponsored by: Darien's Face")
def main():

    cameras = []
    # This is the list of camera indices to use. If you increase the length of this list,
    # you'll also need to update the `concatenate` step for displaying the frames.
    camera_idxs = [0, 1]
    for i in camera_idxs:
        cameras.append(
            CameraThread(i, edgeiq.Engine.DNN, "alwaysai/mobilenet_ssd"))

    for c in cameras:
        c.start()

    try:
        with edgeiq.Streamer() as streamer:
            while True:
                results = []
                for c in cameras:
                    results.append(c.get_results())

                # Generate text to display on streamer
                text = []
                for r in results:
                    if r is not None:
                        text.append("Camera {}:".format(r["idx"]))
                        text.append("Model: {}".format(r["model_id"]))
                        text.append("Inference time: {:1.3f} s".format(
                            r["results"].duration))
                        text.append("Objects:")

                        for prediction in r["results"].predictions:
                            text.append("{}: {:2.2f}%".format(
                                prediction.label, prediction.confidence * 100))

                # Join the incoming frames vertically into a single image to be shown on
                # the Streamer
                frame = np.concatenate(
                    (results[0]["frame"], results[1]["frame"]), axis=0)

                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break

    finally:
        for c in cameras:
            c.stop()
            c.join()
        print("Program Ending")
Exemple #8
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                pdict = dict()
                myList = [None] * 15

                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append(
                    "Inference time: {:1.3f} s".format(results.duration) +
                    "\nFPS: {:.2f}".format(fps.compute_fps()))
                for ind, pose in enumerate(results.poses):
                    pdict["Person {}".format(ind)] = pose.key_points
                    df = pd.DataFrame(data=pdict,
                                      index=pose.key_points,
                                      columns=pdict)
                    print(df)

                streamer.send_data(results.draw_poses(frame), text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #9
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.FileVideoStream('toronto.mp4', play_realtime=True) as video_stream, \
                edgeiq.Streamer() as streamer:  # play_realtime simulates video feed from a camera
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = semantic_segmentation.segment_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                mask = semantic_segmentation.build_image_mask(
                    results.class_map)
                blended = edgeiq.blend_images(frame, mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #10
0
def main():
    classifier = edgeiq.Classification("alwaysai/googlenet")
    classifier.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))
    print("Labels:\n{}\n".format(classifier.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(
            queue_depth=len(image_paths), inter_msg_time=3) as streamer:
        black_img= cv2.imread('black.jpg')
        for image_path in image_paths:
            image_display = cv2.imread(image_path)
            image = image_display.copy()

            results = classifier.classify_image(image)

            # Generate text to display on streamer
            text = ["Model: {}".format(classifier.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))

            if results.predictions:
                image_text = "Label: {}, {:.2f}".format(
                        results.predictions[0].label,
                        results.predictions[0].confidence)
                cv2.putText(
                        image_display, image_text, (5, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                for idx, prediction in enumerate(results.predictions[:5]):
                    text.append("{}. label: {}, confidence: {:.5}".format(
                        idx + 1, prediction.label, prediction.confidence))
                    if prediction.label == "Not Safe For Work":
                        resized_black_image = edge_tools.resize(black_img, image.shape[1], image.shape[0], keep_scale=False)
                        image_display = edge_tools.blend_images(resized_black_image, image, 0.1)
                
            else:
                text.append("No classification for this image.")

            streamer.send_data(image_display, text)
        streamer.wait()

    print("Program Ending")
Exemple #11
0
def main():

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                # HSV
                frame_hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
                frame_value = frame_hsv[:, :, 2]

                # bilateral filter - edge-preserving image smoothing method
                blurredBrightness = cv2.bilateralFilter(
                    frame_value, 9, 150, 150)

                # Canny edge detector
                thresh = 50
                edges = cv2.Canny(blurredBrightness,
                                  thresh,
                                  thresh * 2,
                                  L2gradient=True)

                # Generate text to display on streamer
                text = "Thermal Edge Detector"

                streamer.send_data(edges, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #12
0
def main():
    """Run object detector and centroid tracker."""
    tracker = edgeiq.CorrelationTracker(
            max_objects=5, enter_cb=face_enters, exit_cb=face_exits)

    fps = edgeiq.FPS()

    try:
        with edgeiq.oak.Oak('alwaysai/face_detection_0200_oak',
                        sensor=edgeiq.Sensor.res_1080,
                        video_mode=edgeiq.VideoMode.preview) as oak_camera, \
                        edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection and tracking
            while True:
                frame = oak_camera.get_frame()
                results = oak_camera.get_model_result(confidence_level=.6)
                if results:
                    fps.update()
                    text = ["Faces Detected:"]

                    objects = tracker.update(results.predictions, frame)

                    # Update the label to reflect the object ID
                    predictions = []
                    for (object_id, prediction) in objects.items():
                        prediction.label = "face {}".format(object_id)
                        text.append("{}".format(prediction.label))
                        predictions.append(prediction)
                    text.append(("approx. FPS: {:.2f}".
                                format(fps.compute_fps())))
                    frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("Program Ending")
Exemple #13
0
def main():
    """Run object detector."""
    fps = edgeiq.FPS()

    try:
        with edgeiq.Oak('alwaysai/ssd_v2_coco_oak') as oak_camera,\
                edgeiq.Streamer() as streamer:
            # Allow Oak camera to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = oak_camera.get_frame()
                results = oak_camera.get_model_result(confidence_level=.8)
                if results:
                    fps.update()

                    text = ["Oak Camera Detections:"]
                    text.append("approx. FPS: {:.2f}".format(
                        fps.compute_fps()))
                    text.append("Objects:")
                    for prediction in results.predictions:
                        center = tuple(
                            int(round(val)) for val in prediction.box.center)
                        b, g, r = frame[center[1], center[0]]
                        cname = getColorName(r, g, b)
                        text.append("{}: {:2.2f}% color = {}".format(
                            prediction.label, prediction.confidence * 100,
                            cname))

                    # Mark up image for display
                    frame = edgeiq.markup_image(frame, results.predictions)

                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("Program Ending")
def main():
    fps = edgeiq.FPS()

    try:
        streamer = edgeiq.Streamer()
        streamer.setup()
        video_stream = edgeiq.WebcamVideoStream(
            cam=0)  # replace with FileVideoStream if need be

        # Allow application to warm up
        video_stream.start()
        time.sleep(2.0)
        fps.start()
        text = [""]

        # initialize Vaccine Trakcer
        vaccine_tracker = VaccineTracker()

        # loop detection
        while True:
            frame = video_stream.read()
            vaccine_tracker.update(frame)

            # draw the vaccination box in the frame
            frame = edgeiq.markup_image(frame, [
                edgeiq.ObjectDetectionPrediction(label="vaccination",
                                                 index=0,
                                                 box=vaccine_tracker.box,
                                                 confidence=100.00)
            ])
            streamer.send_data(frame, text)
            fps.update()

            if streamer.check_exit():
                break
    finally:
        fps.stop()
        streamer.close()
        video_stream.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #15
0
def pose_estimation():
    """
    Oak.get_model_result can return different results based on the purpose of the model running on the camera.

    This function shows how to work with pose estimation models.
    """
    fps = edgeiq.FPS()

    with edgeiq.Oak('alwaysai/human_pose_oak') as camera, edgeiq.Streamer(
    ) as streamer:

        fps.start()
        while True:

            text = ['FPS: {:2.2f}'.format(fps.compute_fps())]

            frame = camera.get_frame()

            result = camera.get_model_result()

            # Check for inferencing results. Oak.get_model_result is a non-blocking call and will return None when new data is not available.
            if result:
                frame = result.draw_poses(frame)

                text.append("Poses:")

                for ind, pose in enumerate(result.poses):
                    text.append("Person {}".format(ind))
                    text.append('-' * 10)
                    text.append("Key Points:")
                    for key_point in pose.key_points:
                        text.append(str(key_point))

            streamer.send_data(frame, text)

            if streamer.check_exit():
                break

            fps.update()

        print('fps = {}'.format(fps.compute_fps()))
Exemple #16
0
def depth_stream():
    """
    This function shows how to retrieve the depth stream from the camera.
    """

    fps = edgeiq.FPS()
    with Oak('alwaysai/mobilenet_ssd_oak',
             capture_depth=True) as camera, edgeiq.Streamer() as streamer:

        fps.start()
        while True:
            text = ['FPS: {:2.2f}'.format(fps.compute_fps())]

            depth = camera.get_depth()
            if depth is not None:
                streamer.send_data(depth, text)
                fps.update()

            if streamer.check_exit():
                break

        print('fps = {}'.format(fps.compute_fps()))
Exemple #17
0
def main(camera, use_streamer, server_addr, stream_fps):
    fps = edgeiq.FPS()
    try:
        streamer = None
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr, stream_fps, sio).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            prevModelChoice = None

            while True:
                if model_choice == "q":
                    print('Exiting program...')
                    if streamer is not None:
                        streamer.close()
                    fps.stop()
                    return
                model = models[model_choice]
                if model_choice != prevModelChoice and prevModelChoice is not None:
                    displayRuntimeStatistics(model)
                frame = video_stream.read()
                frame, text = runModel(model, frame, model_choice)
                streamer.send_data(frame, text)
                fps.update()
                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Exemple #18
0
def object_detection():
    """
    Oak.get_model_result can return different results based on the purpose of the model running on the camera.

    This function shows how to work with object detection models.
    """
    fps = edgeiq.FPS()

    with edgeiq.Oak('alwaysai/mobilenet_ssd_oak') as camera, edgeiq.Streamer(
    ) as streamer:

        fps.start()
        while True:

            text = ['FPS: {:2.2f}'.format(fps.compute_fps())]

            frame = camera.get_frame()

            result = camera.get_model_result(confidence_level=.75)

            # Check for inferencing results. Oak.get_model_result is a non-blocking call and will return None when new data is not available.
            if result:
                frame = edgeiq.markup_image(frame, result.predictions)

                text.append("Objects:")

                for prediction in result.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

            streamer.send_data(frame, text)

            if streamer.check_exit():
                break

            fps.update()

        print('fps = {}'.format(fps.compute_fps()))
def main():
    """Run CSI camera."""
    try:
        with edgeiq.JetsonVideoStream(cam=0,
                                      rotation=edgeiq.FrameRotation.ROTATE_180,
                                      camera_mode=edgeiq.JetsonCameraMode.
                                      IMX219_1920x1080_30_2, display_width=640,
                                      display_height=480) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            # loop detection
            while True:
                frame = video_stream.read()

                # Generate text to display on streamer
                text = ["Jetson CSI Camera"]
                streamer.send_data(frame, text)
                if streamer.check_exit():
                    break

    finally:
        print("Program Ending")
Exemple #20
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/yolo_v3")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    video_path = f"data/inputs/{difficulty}.mp4"
    stream_context = edgeiq.FileVideoStream(f"{video_path}",
                                            play_realtime=True)

    with stream_context as video_stream, edgeiq.Streamer() as streamer:
        while video_stream.more():

            image = video_stream.read()
            results = obj_detect.detect_objects(image, confidence_level=.5)
            specific_predictions = [
                r for r in results.predictions if r.label == 'person'
            ]

            res = tracker.update(specific_predictions)

            image = draw_tracked_boxes(image, res)
            # image = edgeiq.markup_image(image, people_predictions)

            streamer.send_data(image)
Exemple #21
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)

            results = obj_detect.detect_objects(image, confidence_level=.5)
            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=obj_detect.colors)

            # Generate text to display on streamer
            text = ["Model: {}".format(obj_detect.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))
            text.append("Objects:")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))

            streamer.send_data(image, text)
        streamer.wait()

    print("Program Ending")
Exemple #22
0
def main(camera, use_streamer, server_addr, stream_fps):
    fps = edgeiq.FPS()

    try:
        # initialize the streamer
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr, stream_fps, sio, writer).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            
            # loop detection
            while True:
                frame = video_stream.read()
                text = [""]
                text.append(writer.text)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #23
0
def streamer_from(config):
    # print("alwaysai_helper.py: streamer_from")
    should_enable = config.get(ENABLE_STREAMER, True)
    if should_enable == True:
        return edgeiq.Streamer()
    return None
Exemple #24
0
def main():

    # if you would like to test an additional model, add one to the list below:
    models = ["alwaysai/mobilenet_ssd", "alwaysai/ssd_inception_v2_coco_2018_01_28"]

    # if you've added a model, add a new color in as a list of tuples in BGR format
    # to make visualization easier (e.g. [(B, G, R)]).
    colors = [[(66, 68, 179)], [(50, 227, 62)]]

    detectors = []

    # load all the models (creates a new object detector for each model)
    for model in models:

        # start up a first object detection model
        obj_detect = edgeiq.ObjectDetection(model)
        obj_detect.load(engine=edgeiq.Engine.DNN)

        # track the generated object detection items by storing them in detectors
        detectors.append(obj_detect)

        # print the details of each model to the console
        print("Model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                text = [""]

                # gather data from the all the detectors
                for i in range(0, len(detectors)):
                    results = detectors[i].detect_objects(
                        frame, confidence_level=.5)
                    object_frame = edgeiq.markup_image(
                        frame, results.predictions, show_labels=False, colors=colors[i])

                    # for the first frame, overwrite the input feed
                    if i == 0:
                        display_frame = object_frame
                    else:

                        # otherwise, append the new marked-up frame to the previous one
                        display_frame = numpy.concatenate((object_frame, display_frame))

                    # append each prediction
                    for prediction in results.predictions:
                        text.append(
                                "Model {} detects {}: {:2.2f}%".format(detectors[i].model_id,
                                prediction.label, prediction.confidence * 100))

                # send the image frame and the predictions for both
                # prediction models to the output stream
                streamer.send_data(display_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #25
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/ssd_mobilenet_v1_coco_2018_01_28")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    image_paths = sorted(list(edgeiq.list_images("images/")))
    print("Images:\n{}\n".format(image_paths))

    with edgeiq.Streamer(queue_depth=len(image_paths),
                         inter_msg_time=3) as streamer:
        for image_path in image_paths:
            # Load image from disk
            image = cv2.imread(image_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            r, g, b = cv2.split(image)
            fig = plt.figure()
            axis = fig.add_subplot(1, 1, 1, projection="3d")

            pixel_colors = image.reshape(
                (np.shape(image)[0] * np.shape(image)[1], 3))
            norm = colors.Normalize(vmin=-1., vmax=1.)
            norm.autoscale(pixel_colors)
            pixel_colors = norm(pixel_colors).tolist()

            axis.scatter(r.flatten(),
                         g.flatten(),
                         b.flatten(),
                         facecolors=pixel_colors,
                         marker=".")
            axis.set_xlabel("Red")
            axis.set_ylabel("Green")
            axis.set_zlabel("Blue")
            plt.show()

            #convert from rgb to hsv and pick out 2 shades
            hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
            hsv_drot = (18, 24, 61)
            hsv_lrot = (13, 203, 55)

            #build the color mask
            mask = cv2.inRange(hsv_image, hsv_lrot, hsv_drot)
            res = cv2.bitwise_and(image, image, mask=mask)
            plt.subplot(1, 2, 1)
            plt.imshow(mask, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(res)
            plt.show()

            #2nd layer mask, did not display
            hsv_olive = (34, 32, 120)
            hsv_dolive = (37, 240, 27)
            mask_ol = cv2.inRange(hsv_image, hsv_olive, hsv_dolive)
            res_w = cv2.bitwise_and(image, image, mask=mask_ol)
            plt.subplot(1, 2, 1)
            plt.imshow(mask_ol, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(res_w)
            plt.show()

            #final mask
            final_mask = mask + mask_ol
            final_result = cv2.bitwise_and(image, image, mask=final_mask)
            plt.subplot(1, 2, 1)
            plt.imshow(final_mask, cmap="gray")
            plt.subplot(1, 2, 2)
            plt.imshow(final_result)
            plt.show()

            #testing .shape and typecast image
            print("The type of this input is {}".format(type(image)))
            print("Shape: {}".format(image.shape))

            #piee
            ##text.append(get_colors(get_image(image_path), 4, True))

            # Generate text to display on streamer
            text = ["Model: {}".format(obj_detect.model_id)]
            text.append("Inference time: {:1.3f} s".format(results.duration))

            #need to convert from bgr to rgb
            swapped_colors = swap(obj_detect.colors)
            text.append("Colors printed!")
            # text.append(swapped_colors)

            print(swapped_colors)

            # print(obj_detect.colors)

            # converted = np.array([np.array(rgb) for rgb in swapped_colors]) // numpy arrays with lists (like numpy contained within itself, list of lists)

            # print(converted.shape)

            results = obj_detect.detect_objects(image, confidence_level=.5)

            image = edgeiq.markup_image(image,
                                        results.predictions,
                                        colors=obj_detect.colors)
            # print(rgb2hex(swapped_colors))

            # print(converted)

            # iterate through tuple list and convert
            # for x in obj_detect.colors:
            #     text.append(rgb2hex(swapped_colors))
            #     text.append(format(x))

            text.append("Objects:")

            for prediction in results.predictions:
                text.append("{}: {:2.2f}%".format(prediction.label,
                                                  prediction.confidence * 100))

            streamer.send_data(image, text)

        streamer.wait()

    print("Program Ending")
Exemple #26
0
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)
    tracker = edgeiq.CentroidTracker(deregister_frames=30)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            prev_tracked_people = {}
            logs = []
            currentPeople = 0

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                people = edgeiq.filter_predictions_by_label(results.predictions, ['person'])
                tracked_people = tracker.update(people)

                people = []
                for (object_id, prediction) in tracked_people.items():
                    new_label = 'Person {}'.format(object_id)
                    prediction.label = new_label
                    people.append(prediction)

                frame = edgeiq.markup_image(
                        frame, people, colors=obj_detect.colors)

                new_entries = set(tracked_people) - set(prev_tracked_people)
                for entry in new_entries:
                    save_snapshot(frame, entry)
                    logs.append('Person {} entered'.format(entry))
                    currentPeople += 1

                new_exits = set(prev_tracked_people) - set(tracked_people)
                for exit in new_exits:
                    logs.append('Person {} exited'.format(exit))
                    currentPeople -= 1

                prev_tracked_people = dict(tracked_people)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                for prediction in people:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                
                text.append('Current Occupancy:')
                text += str(currentPeople)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #27
0
def main():
    fps = edgeiq.FPS()

    # Change parameter to alwaysai/human_pose_eyecloud to run the human pose model.
    with edgeiq.EyeCloud('alwaysai/mobilenet_ssd_eyecloud'
                         ) as camera, edgeiq.Streamer() as streamer:
                         

        fps.start()

        centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20, max_distance=100)

        number=0

        while True:

            text = ["FPS:{}".format(fps.compute_fps())]

            frame = camera.get_frame()

            #print('image sequence = {}'.format(frame.sequence_index))

            result = camera.get_model_result(confidence_level=0.9)

        

            # Check for inferencing results.
            if result:
                #print('model sequence = {}'.format(result.sequence_index))

                text.append("Model: {}".format(camera.model_id))

                if camera.model_purpose == 'PoseEstimation':
                    frame = result.draw_poses(frame)

                    text.append("Inference time: {:1.3f} s".format(result.duration))

                    for ind, pose in enumerate(result.poses):
                        text.append("Person {}".format(ind))
                        text.append('-' * 10)
                        text.append("Key Points:")
                        for key_point in pose.key_points:
                            text.append(str(key_point))

                elif camera.model_purpose == 'ObjectDetection':
                  

                    #移除交并比过大的检测框
                    if(len(result.predictions)>1):
                        for i,prediction in enumerate(result.predictions):
                            for j in range((len(result.predictions)-1),i,-1):
                                #print("i:%s  j:%s"%(i,j))
                                IOU=prediction.box.compute_overlap(result.predictions[j].box)
                                print(IOU)
                                if(IOU>0.3):
                                    #print("距离过近")
                                    result.predictions.pop()
                    

                    #划定FOV有效检测区
                    for i,prediction in  enumerate(result.predictions):
                        if(prediction.box.center[0]<500 or prediction.box.center[0]>1300):
                            result.predictions.pop(i)

                        #print("x:%s  y:%s"%(prediction.box.center[0],prediction.box.center[1]))

                    objects = centroid_tracker.update(result.predictions)

                    text.append("Inference time: {:1.3f} s".format(result.duration))
                    text.append("Objects:")

                    
               
                    for (object_id, prediction) in objects.items():
                        new_label = 'person {}'.format(object_id)
                        
                        if(object_id+1>number):
                            number=object_id+1

                        prediction.label = new_label
                        text.append("{}: {:2.2f}%".format(prediction.label, prediction.confidence * 100))
                        result.predictions.append(prediction)          
                    
                    
                    frame = edgeiq.markup_image(frame, result.predictions)

                    text.append("people pass: {}".format(number))





                elif camera.model_purpose == 'Classification':
                    if len(result.predictions) > 0:
                        top_prediction = result.predictions[0]
                        text = "Classification: {}, {:.2f}%".format(
                            top_prediction.label,
                            top_prediction.confidence * 100)
                    else:
                        text = None

                    cv2.putText(frame, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX,
                                0.4, (0, 0, 255), 2)
            
            cv2.rectangle(frame,(500,0),(1300,1080),(255,0,0), 2)

            streamer.send_data(frame, text)

            if streamer.check_exit():
                break

            fps.update()

        print('fps = {}'.format(fps.compute_fps()))
Exemple #28
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO,
                        accelerator=edgeiq.Accelerator.MYRIAD)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    y_letter = cv2.imread('letter_y.png')
    m_letter = cv2.imread('m_letter.jpg')
    c_letter = cv2.imread('c_letter.jpeg')
    a_letter = cv2.imread('a_letter.jpg')

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = [""]
                for ind, pose in enumerate(results.poses):
                    right_wrist_y = pose.key_points[4][1]
                    right_wrist_x = pose.key_points[4][0]
                    right_elbow_y = pose.key_points[3][1]
                    right_elbow_x = pose.key_points[3][0]
                    left_wrist_y = pose.key_points[7][1]
                    left_wrist_x = pose.key_points[7][0]
                    left_elbow_y = pose.key_points[6][1]
                    left_elbow_x = pose.key_points[6][0]
                    nose_y = pose.key_points[0][1]
                    nose_x = pose.key_points[0][0]
                    neck_y = pose.key_points[1][0]
                    if nose_y != -1 and neck_y != -1:
                        neck_distance = neck_y - nose_y
                    else:
                        neck_distance = 0
                    if right_wrist_y != -1 and left_wrist_y != -1 and nose_y != -1 and left_elbow_y != -1 and right_elbow_y != -1 and neck_distance > 0:
                        if right_wrist_y < nose_y and left_wrist_y < nose_y and right_wrist_x > right_elbow_x and left_wrist_x < left_elbow_x:
                            if right_wrist_y < (
                                    nose_y -
                                    neck_distance / 3.0) and left_wrist_y < (
                                        nose_y - neck_distance / 3.0):
                                print("----------A!-------------")
                                overlay = edgeiq.resize(
                                    a_letter, frame.shape[1], frame.shape[0],
                                    False)
                                cv2.addWeighted(frame, 0.4, overlay, 0.6, 0,
                                                frame)
                                continue
                            elif (nose_y - neck_distance) < right_wrist_y and (
                                    nose_y - neck_distance) < left_wrist_y:
                                print("----------M!-------------")
                                overlay = edgeiq.resize(
                                    m_letter, frame.shape[1], frame.shape[0],
                                    False)
                                cv2.addWeighted(frame, 0.4, overlay, 0.6, 0,
                                                frame)
                                continue
                    if right_wrist_y != -1 and left_wrist_y != -1 and nose_y != -1 and right_elbow_x and left_elbow_x and right_wrist_x and left_wrist_x:
                        if right_wrist_y < nose_y and left_wrist_y < nose_y and right_wrist_x < right_elbow_x and left_wrist_x > left_elbow_x:
                            print("----------Y!-------------")
                            overlay = edgeiq.resize(y_letter, frame.shape[1],
                                                    frame.shape[0], False)
                            cv2.addWeighted(frame, 0.4, overlay, 0.6, 0, frame)
                            continue
                    if left_wrist_x != -1 and nose_x != -1 and left_wrist_y != -1 and nose_y != -1 and right_wrist_y != -1 and nose_x != -1:
                        if right_wrist_x > nose_x and right_wrist_y < nose_y and left_wrist_x > nose_x:
                            print("----------C!-------------")
                            overlay = edgeiq.resize(c_letter, frame.shape[1],
                                                    frame.shape[0], False)
                            cv2.addWeighted(frame, 0.4, overlay, 0.6, 0, frame)
                            continue

                streamer.send_data(results.draw_poses(frame), text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Exemple #29
0
def main():

    # The current frame index
    frame_idx = 0

    # The number of frames to skip before running detector
    detect_period = 50

    # if you would like to test an additional model, add one to the list below:
    models = [
        "alwaysai/ssd_mobilenet_v2_oidv4",
        "alwaysai/ssd_inception_v2_coco_2018_01_28"
    ]

    # include any labels that you wish to detect from any models (listed above in 'models') here in this list
    detected_contraband = [
        "Pen", "cell phone", "backpack", "book", "Book", "Ring binder",
        "Headphones", "Calculator", "Mobile phone", "Telephone", "Microphone",
        "Ipod", "Remote control"
    ]

    # load all the models (creates a new object detector for each model)
    detectors = []
    for model in models:

        # start up a first object detection model
        obj_detect = edgeiq.ObjectDetection(model)
        obj_detect.load(engine=edgeiq.Engine.DNN)

        # track the generated object detection items by storing them in detectors
        detectors.append(obj_detect)

        # print the details of each model to the console
        print("Model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

    tracker = edgeiq.CorrelationTracker(max_objects=5)
    fps = edgeiq.FPS()
    contraband_summary = ContrabandSummary()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                predictions_to_markup = []
                text = [""]

                # only analyze every 'detect_period' frame (i.e. every 50th in original code)
                if frame_idx % detect_period == 0:

                    # gather data from the all the detectors
                    for i in range(0, len(detectors)):
                        results = detectors[i].detect_objects(
                            frame, confidence_level=.2)

                        # Stop tracking old objects
                        if tracker.count:
                            tracker.stop_all()

                        # append each prediction
                        predictions = results.predictions
                        for prediction in predictions:

                            if (prediction.label.strip()
                                    in detected_contraband):
                                contraband_summary.contraband_alert(
                                    prediction.label, frame)
                                predictions_to_markup.append(prediction)
                                tracker.start(frame, prediction)
                else:

                    # if there are objects being tracked, update the tracker with the new frame
                    if tracker.count:

                        # get the new predictions for the objects being tracked, used to markup the frame
                        predictions_to_markup = tracker.update(frame)

                # mark up the frame with the predictions for the contraband objects
                frame = edgeiq.markup_image(frame,
                                            predictions_to_markup,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=obj_detect.colors)

                # send the collection of contraband detection points (string and video frame) to the streamer
                text = contraband_summary.get_contraband_string()

                streamer.send_data(frame, text)
                frame_idx += 1
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Exemple #30
0
def main():
    # Spin up the object detector
    obj_detect = edgeiq.ObjectDetection("alwaysai/" + OBJECT_DETECTION_MODEL)
    obj_detect.load(engine=edgeiq.Engine.DNN_CUDA,
                    accelerator=edgeiq.Accelerator.NVIDIA)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    # Prepare to track frames per second calculations
    fps = edgeiq.FPS()

    # Load any prior instance of the tracker, otherwise spin up a new one
    centroid_tracker = file_manager.load(
        CENTROID_TRACKER,
        edgeiq.CentroidTracker(deregister_frames=TRACKER_DEREGISTER_FRAMES,
                               max_distance=TRACKER_MAX_DISTANCE))
    # Load any prior instance of the metrics data, otherwise start a new one
    metrics = file_manager.load(METRICS_MANAGER,
                                metrics_manager.MetricsManager())

    try:
        if IP_CAMERA_FEED is not None:
            stream_details = edgeiq.IPVideoStream(IP_CAMERA_FEED)
        else:
            stream_details = edgeiq.WebcamVideoStream(cam=0)

        with stream_details as video_stream, \
        edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection and centroid tracker
            while True:
                metrics.newLoop()
                frame = video_stream.read()
                results = obj_detect.detect_objects(
                    frame, confidence_level=DETECT_CONFIDENCE_THRESHOLD)

                # Ignore detections of anything other than people
                filter = edgeiq.filter_predictions_by_label(
                    results.predictions, ['person'])

                # Adding info for streamer display
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("People currently detected:")

                objects = centroid_tracker.update(filter)

                # Store active predictions for just this loop
                predictions = []

                # Store the active object ids for just this loop
                if len(objects.items()) == 0:
                    # No people detected
                    text.append("-- NONE")

                for (object_id, prediction) in objects.items():
                    metrics.addTimeFor(object_id)
                    timeForId = metrics.timeForId(object_id)
                    # Correcting for fact that index 0 is first object in an array
                    idAdjusted = object_id + 1
                    # Display text with bounding box in video
                    new_label = "Person {i} | {t} sec".format(i=idAdjusted,
                                                              t=timeForId)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                # Add metrics to text going to streamer
                m = metrics.currentMetrics()
                text.append("")  # Spacing
                text.append("Total people seen: {}".format(m["count"]))
                text.append("Total time: {} sec".format(m["total"]))
                text.append("Average time: {0:.1f} sec".format(m["avg"]))
                text.append("Longest individual time: {} sec".format(m["max"]))

                # Update output streamer
                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        # TODO: Update to save every few seconds in case a crash occurs
        file_manager.save(metrics, METRICS_MANAGER)
        file_manager.save(centroid_tracker, CENTROID_TRACKER)
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")