Example #1
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(
            engine=edgeiq.Engine.DNN_OPENVINO,
            accelerator=edgeiq.Accelerator.MYRIAD)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()
    server_comm = ServerComm()
    server_comm.setup()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream:
            time.sleep(2.0)
            fps.start()
            while True:
                frame = video_stream.read()
                server_comm.send_frame(frame)
                fps.update()

    finally:
        fps.stop()
Example #2
0
def main(cam=0, video_file=None, debug=False):
    if video_file is not None:
        video_stream_class = edgeiq.FileVideoStream
        kwargs = {'path': video_file, 'play_realtime': True}
    else:
        video_stream_class = edgeiq.WebcamVideoStream
        kwargs = {'cam': cam}

        edgeiq.WebcamVideoStream.more = lambda x: True

    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")

    if edgeiq.is_jetson():
        pose_estimator.load(engine=edgeiq.Engine.TENSOR_RT)
    else:
        pose_estimator.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    jj = JumpingJacksTracker()

    try:
        with video_stream_class(**kwargs) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while video_stream.more():
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                if len(results.poses) > 0:
                    jj.update(results.poses[0])
                else:
                    text.append('No poses found')
                text.append("Jumping jacks: {}".format(jj.count))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        if debug is True:
            jj.save_history()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
def main():

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            check_posture = CheckPosture()

            # loop detection
            while True:
                frame = video_stream.read()
                frame, text = check_posture.update(frame)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #4
0
def main():

    dm = DetectionManager()

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video, \
                edgeiq.Streamer() as streamer:
            time.sleep(2.0)
            fps.start()

            while True:
                image = video.read()
                (image, text) = dm.update(image)
                streamer.send_data(image, text)
                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #5
0
def main():
    # load the configuration data from config.json
    config = load_json(CONFIG_FILE)
    scale = config.get(SCALE)

    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")

    pose_estimator.load(engine=edgeiq.Engine.DNN,
                        accelerator=edgeiq.Accelerator.CPU)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            posture = CheckPosture(scale)

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                for ind, pose in enumerate(results.poses):
                    text.append("Person {}".format(ind))
                    text.append('-' * 10)
                    text.append("Key Points:")

                    # update the instance key_points to check the posture
                    posture.set_key_points(pose.key_points)

                    correct_posture = posture.correct_posture()
                    if not correct_posture:
                        text.append(posture.build_message())

                        # make a sound to alert the user to improper posture
                        print("\a")

                streamer.send_data(results.draw_poses(frame), text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #6
0
def main():

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                # Generate text to display on streamer
                text = "FLiR Lepton"

                streamer.send_data(frame_value, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #7
0
def main():
    fps = edgeiq.FPS()
    detector = edgeiq.AprilTagDetector(edgeiq.AprilTagFamily.TAG_16h5)
    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            # loop detection
            while True:
                text = []
                frame = video_stream.read()
                detections = detector.detect(frame)
                for detection in detections:
                    frame = detection.markup_image(frame, tag_id=True)
                    text.append(detection.tag_id)

                text.append(fps.compute_fps())
                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break

                fps.update()

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Example #8
0
def main():
    #ardunio = serial.Serial('COM1', 115200, timeout = 1)
    GPIO.setmode(GPIO.BCM)
    buzzer = 23
    GPIO.setup(buzzer, GPIO.OUT)
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))
    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                predictions = edgeiq.filter_predictions_by_label(
                    results.predictions, ["bottle"])
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_confidences=False,
                                            colors=obj_detect.colors)
                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")
                for prediction in predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))
                    GPIO.output(buzzer, GPIO.HIGH)
                    GPIO.input(buzzer)
                    sleep(0.5)
                    GPIO.output(buzzer, GPIO.LOW)
                    GPIO.input(buzzer)
                    sleep(0.5)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #9
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation(
        "alwaysai/fcn_resnet18_pascal_voc_512x320")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA,
                               accelerator=edgeiq.Accelerator.NVIDIA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    class_list = ['bottle']

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=320)
                results = semantic_segmentation.segment_image(frame)

                object_map = semantic_segmentation.build_object_map(
                    results.class_map, class_list)

                object_mask = semantic_segmentation.build_image_mask(
                    object_map)

                # object_mask[np.where((object_mask==[0,0,0]).all(axis=2))] = [255,255,255]

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                blended = edgeiq.blend_images(frame, object_mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #10
0
def main():

    label_defs = {}

    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)


    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:

                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(
                        frame, labelToString(label_defs, results.predictions), show_labels = False,
                        show_confidences = False, colors=obj_detect.colors, 
                        line_thickness = 0)
                frame = addNotes(frame, results.predictions)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")
                text.append("fps:{:2.2f}".format(fps.compute_fps()))
                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
def main():

    text = "Facial Overlays with Dlib"

    fps = edgeiq.FPS()

    shape_predictor = "shape_predictor_68_face_landmarks.dat"
    dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)


    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()

                resized_frame, gray_resized_frame = dlib_flm.image_preprocessor(frame)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(gray_resized_frame)

                left_eye = 0
                right_eye = 0

                for facial_coordinate in facial_coordinates:
                    for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
                        if name is 'left_eye':
                            left_eye = facial_coordinate[i:j]
                        #Uncoment if you want the patch on right eye as well.
                        #elif name is 'right_eye':
                        #    right_eye = facial_coordinate[i:j]
                    leftEyeSize, leftEyeCenter = eye_size(left_eye)
                    #Uncoment if you want the patch on right eye as well.
                    #rightEyeSize, rightEyeCenter = eye_size(right_eye)
                    place_mustache(resized_frame, facial_coordinate)
                    #Uncoment if you want to place spectacles on the face.
                    #place_glasses(resized_frame, facial_coordinate)
                    place_eye_patch(resized_frame, leftEyeCenter, leftEyeSize)
                    #place_eye(resized_frame, rightEyeCenter, rightEyeSize)

                streamer.send_data(resized_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #12
0
def main(camera, use_streamer, server_addr):
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        streamer = None
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #13
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))

    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                              max_distance=50)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection and centroid tracker
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=400)
                results = obj_detect.detect_objects(frame, confidence_level=.5)

                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                objects = centroid_tracker.update(results.predictions)

                # Update the label to reflect the object ID
                predictions = []
                for (object_id, prediction) in objects.items():
                    new_label = 'face {}'.format(object_id)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #14
0
def main():
    # Set up object detection API
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    # Set up rpi ports
    GPIO.setmode(GPIO.BOARD)
    GPIO.setup(LEFT_PORT, GPIO.OUT)
    GPIO.setup(RIGHT_PORT, GPIO.OUT)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.8) # Maybe filter the result to bottles or bags for demo?
                
                image_Centering(results.predictions)

                # Debug information
                if(debug_On):
                    frame = edgeiq.markup_image(
                            frame, results.predictions, colors=obj_detect.colors)

                    # Generate text to display on streamer
                    text = ["Model: {}".format(obj_detect.model_id)]
                    text.append(
                            "Inference time: {:1.3f} s".format(results.duration))
                    text.append("Objects:")

                    for prediction in results.predictions:
                        text.append("{}: {:2.2f}%".format(
                            prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                time.sleep(FRAME_A_RATE)

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
def main():

    text = "Facial Landmarks with Dlib"

    fps = edgeiq.FPS()

    shape_predictor = "shape_predictor_68_face_landmarks.dat"
    dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()

                resized_frame, gray_resized_frame = dlib_flm.image_preprocessor(
                    frame)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(
                    gray_resized_frame)

                # Loop to markup resized_frame
                for (i, rectangle) in enumerate(rectangles):
                    (x, y, w,
                     h) = dlib_flm.dlib_rectangle_to_cv_bondingbox(rectangle)
                    cv2.rectangle(resized_frame, (x, y), (x + w, y + h),
                                  (0, 255, 0), 2)
                    cv2.putText(resized_frame, "Face #{}".format(i + 1),
                                (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 2)

                for facial_coordinate in facial_coordinates:
                    for (x, y) in facial_coordinate:
                        cv2.circle(resized_frame, (x, y), 1, (255, 0, 0), -1)

                streamer.send_data(resized_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #16
0
def main():
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()
                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=facial_detector.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Faces:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
    def __init__(self, camera_idx, engine, model_id, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.idx = camera_idx
        self._engine = engine
        self._model_id = model_id

        self._results_q = CircularQueue(2)
        self._stop_event = threading.Event()

        self._prev_results = None

        self._fps = edgeiq.FPS()
Example #18
0
def get_components(config):
    # print("alwaysai_helper.py: get_components")
    fps = edgeiq.FPS()
    fps.start()
    obj_detector = object_detector_from(config)
    streamer = streamer_from(config)
    tracker = tracker_from(config)
    video_stream = video_stream_from(config)
    return {
        FPS: fps,
        OBJECT_DETECTOR: obj_detector,
        STREAMER: streamer,
        TRACKER: tracker,
        VIDEO_STREAM: video_stream
    }
Example #19
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                pdict = dict()
                myList = [None] * 15

                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append(
                    "Inference time: {:1.3f} s".format(results.duration) +
                    "\nFPS: {:.2f}".format(fps.compute_fps()))
                for ind, pose in enumerate(results.poses):
                    pdict["Person {}".format(ind)] = pose.key_points
                    df = pd.DataFrame(data=pdict,
                                      index=pose.key_points,
                                      columns=pdict)
                    print(df)

                streamer.send_data(results.draw_poses(frame), text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #20
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation("alwaysai/enet")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.FileVideoStream('toronto.mp4', play_realtime=True) as video_stream, \
                edgeiq.Streamer() as streamer:  # play_realtime simulates video feed from a camera
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = semantic_segmentation.segment_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                mask = semantic_segmentation.build_image_mask(
                    results.class_map)
                blended = edgeiq.blend_images(frame, mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #21
0
def main():

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                # HSV
                frame_hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
                frame_value = frame_hsv[:, :, 2]

                # bilateral filter - edge-preserving image smoothing method
                blurredBrightness = cv2.bilateralFilter(
                    frame_value, 9, 150, 150)

                # Canny edge detector
                thresh = 50
                edges = cv2.Canny(blurredBrightness,
                                  thresh,
                                  thresh * 2,
                                  L2gradient=True)

                # Generate text to display on streamer
                text = "Thermal Edge Detector"

                streamer.send_data(edges, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #22
0
def main():
    """Run object detector and centroid tracker."""
    tracker = edgeiq.CorrelationTracker(
            max_objects=5, enter_cb=face_enters, exit_cb=face_exits)

    fps = edgeiq.FPS()

    try:
        with edgeiq.oak.Oak('alwaysai/face_detection_0200_oak',
                        sensor=edgeiq.Sensor.res_1080,
                        video_mode=edgeiq.VideoMode.preview) as oak_camera, \
                        edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection and tracking
            while True:
                frame = oak_camera.get_frame()
                results = oak_camera.get_model_result(confidence_level=.6)
                if results:
                    fps.update()
                    text = ["Faces Detected:"]

                    objects = tracker.update(results.predictions, frame)

                    # Update the label to reflect the object ID
                    predictions = []
                    for (object_id, prediction) in objects.items():
                        prediction.label = "face {}".format(object_id)
                        text.append("{}".format(prediction.label))
                        predictions.append(prediction)
                    text.append(("approx. FPS: {:.2f}".
                                format(fps.compute_fps())))
                    frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("Program Ending")
def main():
    fps = edgeiq.FPS()

    try:
        streamer = edgeiq.Streamer()
        streamer.setup()
        video_stream = edgeiq.WebcamVideoStream(
            cam=0)  # replace with FileVideoStream if need be

        # Allow application to warm up
        video_stream.start()
        time.sleep(2.0)
        fps.start()
        text = [""]

        # initialize Vaccine Trakcer
        vaccine_tracker = VaccineTracker()

        # loop detection
        while True:
            frame = video_stream.read()
            vaccine_tracker.update(frame)

            # draw the vaccination box in the frame
            frame = edgeiq.markup_image(frame, [
                edgeiq.ObjectDetectionPrediction(label="vaccination",
                                                 index=0,
                                                 box=vaccine_tracker.box,
                                                 confidence=100.00)
            ])
            streamer.send_data(frame, text)
            fps.update()

            if streamer.check_exit():
                break
    finally:
        fps.stop()
        streamer.close()
        video_stream.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #24
0
def main():
    """Run object detector."""
    fps = edgeiq.FPS()

    try:
        with edgeiq.Oak('alwaysai/ssd_v2_coco_oak') as oak_camera,\
                edgeiq.Streamer() as streamer:
            # Allow Oak camera to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = oak_camera.get_frame()
                results = oak_camera.get_model_result(confidence_level=.8)
                if results:
                    fps.update()

                    text = ["Oak Camera Detections:"]
                    text.append("approx. FPS: {:.2f}".format(
                        fps.compute_fps()))
                    text.append("Objects:")
                    for prediction in results.predictions:
                        center = tuple(
                            int(round(val)) for val in prediction.box.center)
                        b, g, r = frame[center[1], center[0]]
                        cname = getColorName(r, g, b)
                        text.append("{}: {:2.2f}% color = {}".format(
                            prediction.label, prediction.confidence * 100,
                            cname))

                    # Mark up image for display
                    frame = edgeiq.markup_image(frame, results.predictions)

                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("Program Ending")
Example #25
0
def pose_estimation():
    """
    Oak.get_model_result can return different results based on the purpose of the model running on the camera.

    This function shows how to work with pose estimation models.
    """
    fps = edgeiq.FPS()

    with edgeiq.Oak('alwaysai/human_pose_oak') as camera, edgeiq.Streamer(
    ) as streamer:

        fps.start()
        while True:

            text = ['FPS: {:2.2f}'.format(fps.compute_fps())]

            frame = camera.get_frame()

            result = camera.get_model_result()

            # Check for inferencing results. Oak.get_model_result is a non-blocking call and will return None when new data is not available.
            if result:
                frame = result.draw_poses(frame)

                text.append("Poses:")

                for ind, pose in enumerate(result.poses):
                    text.append("Person {}".format(ind))
                    text.append('-' * 10)
                    text.append("Key Points:")
                    for key_point in pose.key_points:
                        text.append(str(key_point))

            streamer.send_data(frame, text)

            if streamer.check_exit():
                break

            fps.update()

        print('fps = {}'.format(fps.compute_fps()))
Example #26
0
def depth_stream():
    """
    This function shows how to retrieve the depth stream from the camera.
    """

    fps = edgeiq.FPS()
    with Oak('alwaysai/mobilenet_ssd_oak',
             capture_depth=True) as camera, edgeiq.Streamer() as streamer:

        fps.start()
        while True:
            text = ['FPS: {:2.2f}'.format(fps.compute_fps())]

            depth = camera.get_depth()
            if depth is not None:
                streamer.send_data(depth, text)
                fps.update()

            if streamer.check_exit():
                break

        print('fps = {}'.format(fps.compute_fps()))
Example #27
0
def main(camera, use_streamer, server_addr, stream_fps):
    fps = edgeiq.FPS()
    try:
        streamer = None
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr, stream_fps, sio).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            prevModelChoice = None

            while True:
                if model_choice == "q":
                    print('Exiting program...')
                    if streamer is not None:
                        streamer.close()
                    fps.stop()
                    return
                model = models[model_choice]
                if model_choice != prevModelChoice and prevModelChoice is not None:
                    displayRuntimeStatistics(model)
                frame = video_stream.read()
                frame, text = runModel(model, frame, model_choice)
                streamer.send_data(frame, text)
                fps.update()
                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Example #28
0
def object_detection():
    """
    Oak.get_model_result can return different results based on the purpose of the model running on the camera.

    This function shows how to work with object detection models.
    """
    fps = edgeiq.FPS()

    with edgeiq.Oak('alwaysai/mobilenet_ssd_oak') as camera, edgeiq.Streamer(
    ) as streamer:

        fps.start()
        while True:

            text = ['FPS: {:2.2f}'.format(fps.compute_fps())]

            frame = camera.get_frame()

            result = camera.get_model_result(confidence_level=.75)

            # Check for inferencing results. Oak.get_model_result is a non-blocking call and will return None when new data is not available.
            if result:
                frame = edgeiq.markup_image(frame, result.predictions)

                text.append("Objects:")

                for prediction in result.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

            streamer.send_data(frame, text)

            if streamer.check_exit():
                break

            fps.update()

        print('fps = {}'.format(fps.compute_fps()))
Example #29
0
def main(camera, use_streamer, server_addr, stream_fps):
    fps = edgeiq.FPS()

    try:
        # initialize the streamer
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr, stream_fps, sio, writer).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            
            # loop detection
            while True:
                frame = video_stream.read()
                text = [""]
                text.append(writer.text)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Example #30
0
def fps_monitor():
    return edgeiq.FPS()