Пример #1
0
def main():

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                # Generate text to display on streamer
                text = "FLiR Lepton"

                streamer.send_data(frame_value, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #2
0
def main():

    dm = DetectionManager()

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video, \
                edgeiq.Streamer() as streamer:
            time.sleep(2.0)
            fps.start()

            while True:
                image = video.read()
                (image, text) = dm.update(image)
                streamer.send_data(image, text)
                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #3
0
    def _run_detection(self):
        obj_detect = edgeiq.ObjectDetection(self._model_id)
        obj_detect.load(engine=self._engine)

        print("Loaded model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

        with edgeiq.WebcamVideoStream(cam=self.idx) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            self._fps.start()

            while True:
                frame = video_stream.read()

                if self._stop_event.is_set():
                    break

                results = obj_detect.detect_objects(frame, confidence_level=.5)

                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                output_results = {
                    "idx": self.idx,
                    "frame": frame,
                    "results": results,
                    "model_id": obj_detect.model_id
                }
                self._results_q.put(output_results)

                self._fps.update()
Пример #4
0
def main():

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            check_posture = CheckPosture()

            # loop detection
            while True:
                frame = video_stream.read()
                frame, text = check_posture.update(frame)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #5
0
def write_data(data):
    writer.write = True
    writer.text = 'Data Collection Started!'
    time.sleep(0.5)
    print('start signal received')
    file_name = file_set_up("video")

    with edgeiq.WebcamVideoStream(cam=0) as video_stream, edgeiq.VideoWriter(
            file_name, fps=SAMPLE_RATE) as video_writer:
        
        if SAMPLE_RATE > video_stream.fps:
            raise RuntimeError(
                "Sampling rate {} cannot be greater than the camera's FPS {}".
                format(SAMPLE_RATE, video_stream.fps))

        time.sleep(2.0)

        print('Data Collection Started!')
        while True:
            t_start = time.time()
            frame = video_stream.read()
            video_writer.write_frame(frame)
            t_end = time.time() - t_start
            t_wait = (1 / SAMPLE_RATE) - t_end
            if t_wait > 0:
                time.sleep(t_wait)

            if not writer.write:
                writer.text = 'Data Collection Ended'
                time.sleep(0.5)
                print('Data Collection Ended')
                break
Пример #6
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(
            engine=edgeiq.Engine.DNN_OPENVINO,
            accelerator=edgeiq.Accelerator.MYRIAD)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()
    server_comm = ServerComm()
    server_comm.setup()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream:
            time.sleep(2.0)
            fps.start()
            while True:
                frame = video_stream.read()
                server_comm.send_frame(frame)
                fps.update()

    finally:
        fps.stop()
Пример #7
0
def main():
    fps = edgeiq.FPS()
    detector = edgeiq.AprilTagDetector(edgeiq.AprilTagFamily.TAG_16h5)
    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            # loop detection
            while True:
                text = []
                frame = video_stream.read()
                detections = detector.detect(frame)
                for detection in detections:
                    frame = detection.markup_image(frame, tag_id=True)
                    text.append(detection.tag_id)

                text.append(fps.compute_fps())
                streamer.send_data(frame, text)

                if streamer.check_exit():
                    break

                fps.update()

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Пример #8
0
def main():
    # load the configuration data from config.json
    config = load_json(CONFIG_FILE)
    scale = config.get(SCALE)

    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")

    pose_estimator.load(engine=edgeiq.Engine.DNN,
                        accelerator=edgeiq.Accelerator.CPU)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            posture = CheckPosture(scale)

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                for ind, pose in enumerate(results.poses):
                    text.append("Person {}".format(ind))
                    text.append('-' * 10)
                    text.append("Key Points:")

                    # update the instance key_points to check the posture
                    posture.set_key_points(pose.key_points)

                    correct_posture = posture.correct_posture()
                    if not correct_posture:
                        text.append(posture.build_message())

                        # make a sound to alert the user to improper posture
                        print("\a")

                streamer.send_data(results.draw_poses(frame), text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #9
0
def _video_camera_stream(camera_id):
    if camera_id is None:
        # Default cam index
        camera_id = 0
    video_stream = edgeiq.WebcamVideoStream(cam=camera_id)
    time.sleep(2.0)
    return video_stream
Пример #10
0
def main():
    #ardunio = serial.Serial('COM1', 115200, timeout = 1)
    GPIO.setmode(GPIO.BCM)
    buzzer = 23
    GPIO.setup(buzzer, GPIO.OUT)
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))
    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                predictions = edgeiq.filter_predictions_by_label(
                    results.predictions, ["bottle"])
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_confidences=False,
                                            colors=obj_detect.colors)
                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")
                for prediction in predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))
                    GPIO.output(buzzer, GPIO.HIGH)
                    GPIO.input(buzzer)
                    sleep(0.5)
                    GPIO.output(buzzer, GPIO.LOW)
                    GPIO.input(buzzer)
                    sleep(0.5)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #11
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation(
        "alwaysai/fcn_resnet18_pascal_voc_512x320")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA,
                               accelerator=edgeiq.Accelerator.NVIDIA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    class_list = ['bottle']

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=320)
                results = semantic_segmentation.segment_image(frame)

                object_map = semantic_segmentation.build_object_map(
                    results.class_map, class_list)

                object_mask = semantic_segmentation.build_image_mask(
                    object_map)

                # object_mask[np.where((object_mask==[0,0,0]).all(axis=2))] = [255,255,255]

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                blended = edgeiq.blend_images(frame, object_mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #12
0
def main():

    label_defs = {}

    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)


    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:

                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(
                        frame, labelToString(label_defs, results.predictions), show_labels = False,
                        show_confidences = False, colors=obj_detect.colors, 
                        line_thickness = 0)
                frame = addNotes(frame, results.predictions)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")
                text.append("fps:{:2.2f}".format(fps.compute_fps()))
                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #13
0
def main():

    text = "Facial Overlays with Dlib"

    fps = edgeiq.FPS()

    shape_predictor = "shape_predictor_68_face_landmarks.dat"
    dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)


    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()

                resized_frame, gray_resized_frame = dlib_flm.image_preprocessor(frame)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(gray_resized_frame)

                left_eye = 0
                right_eye = 0

                for facial_coordinate in facial_coordinates:
                    for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
                        if name is 'left_eye':
                            left_eye = facial_coordinate[i:j]
                        #Uncoment if you want the patch on right eye as well.
                        #elif name is 'right_eye':
                        #    right_eye = facial_coordinate[i:j]
                    leftEyeSize, leftEyeCenter = eye_size(left_eye)
                    #Uncoment if you want the patch on right eye as well.
                    #rightEyeSize, rightEyeCenter = eye_size(right_eye)
                    place_mustache(resized_frame, facial_coordinate)
                    #Uncoment if you want to place spectacles on the face.
                    #place_glasses(resized_frame, facial_coordinate)
                    place_eye_patch(resized_frame, leftEyeCenter, leftEyeSize)
                    #place_eye(resized_frame, rightEyeCenter, rightEyeSize)

                streamer.send_data(resized_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #14
0
def main(camera, use_streamer, server_addr):
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        streamer = None
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #15
0
def main():
    # Set up object detection API
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    # Set up rpi ports
    GPIO.setmode(GPIO.BOARD)
    GPIO.setup(LEFT_PORT, GPIO.OUT)
    GPIO.setup(RIGHT_PORT, GPIO.OUT)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.8) # Maybe filter the result to bottles or bags for demo?
                
                image_Centering(results.predictions)

                # Debug information
                if(debug_On):
                    frame = edgeiq.markup_image(
                            frame, results.predictions, colors=obj_detect.colors)

                    # Generate text to display on streamer
                    text = ["Model: {}".format(obj_detect.model_id)]
                    text.append(
                            "Inference time: {:1.3f} s".format(results.duration))
                    text.append("Objects:")

                    for prediction in results.predictions:
                        text.append("{}: {:2.2f}%".format(
                            prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                time.sleep(FRAME_A_RATE)

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
Пример #16
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))

    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                              max_distance=50)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection and centroid tracker
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=400)
                results = obj_detect.detect_objects(frame, confidence_level=.5)

                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                objects = centroid_tracker.update(results.predictions)

                # Update the label to reflect the object ID
                predictions = []
                for (object_id, prediction) in objects.items():
                    new_label = 'face {}'.format(object_id)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #17
0
def start_detection(config,
                    did_start_callback,
                    enter_callback,
                    exit_callback,
                    did_end_object_callback,
                    should_log=True):
    """
    Enable features dependent on config and pass callback down to actual detection functions
    """
    global ENTER_CALLBACK
    global EXIT_CALLBACK
    ENTER_CALLBACK = enter_callback
    EXIT_CALLBACK = exit_callback
    print('alwaysai.py: start_detection: enter_callback: {}'.format(
        ENTER_CALLBACK))

    # Configs
    od_config = alwaysai_configs.ObjectDetector(config)
    ct_config = alwaysai_configs.CentroidTracker(config)
    vs_config = alwaysai_configs.VideoStream(config)
    od = object_detector(od_config.model_id)
    t = edgeiq.CentroidTracker(deregister_frames=ct_config.deregister_frames,
                               max_distance=ct_config.max_distance)
    en_zones_config = config.get('entry_zones', [])
    ex_zones_config = config.get('exit_zones', [])
    entry_zones = zones_from_config(en_zones_config)
    exit_zones = zones_from_config(ex_zones_config)
    vs = None

    # print('alwaysai.py: start_detection: en_zones_config: {}'.format(en_zones_config))
    # print('alwaysai.py: start_detection: entry_zones: {}'.format(entry_zones))

    # Inits
    if vs_config.mode == 'camera':
        if should_log:
            print('alwaysai.py: start_detection: enabling camera w/ id: {}'.
                  format(vs_config.camera_id))
        vs = edgeiq.WebcamVideoStream(cam=vs_config.camera_id)
    if vs_config.mode == 'file':
        if should_log:
            print('alwaysai.py: start_detection: reading from file')
        vs = edgeiq.FileVideoStream(vs_config.filename, play_realtime=True)
    enable_streamer = config.get('enable_streamer', False)
    streamer = alwaysai_configs.DummyStreamer()
    if enable_streamer:
        print('alwaysai.py: start_detection: ENABLING streamer')
        streamer = edgeiq.Streamer()

    # Start
    start_video_detection_with_streamer(vs, od_config, od, streamer, t,
                                        entry_zones, exit_zones,
                                        did_start_callback, did_detect,
                                        did_end_object_callback)
Пример #18
0
def main():

    text = "Facial Landmarks with Dlib"

    fps = edgeiq.FPS()

    shape_predictor = "shape_predictor_68_face_landmarks.dat"
    dlib_flm = facial_landmarks.Dlib_FLM(shape_predictor)

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()

                resized_frame, gray_resized_frame = dlib_flm.image_preprocessor(
                    frame)
                facial_coordinates, rectangles = dlib_flm.detect_faces_shapes(
                    gray_resized_frame)

                # Loop to markup resized_frame
                for (i, rectangle) in enumerate(rectangles):
                    (x, y, w,
                     h) = dlib_flm.dlib_rectangle_to_cv_bondingbox(rectangle)
                    cv2.rectangle(resized_frame, (x, y), (x + w, y + h),
                                  (0, 255, 0), 2)
                    cv2.putText(resized_frame, "Face #{}".format(i + 1),
                                (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 2)

                for facial_coordinate in facial_coordinates:
                    for (x, y) in facial_coordinate:
                        cv2.circle(resized_frame, (x, y), 1, (255, 0, 0), -1)

                streamer.send_data(resized_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #19
0
def main():
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(facial_detector.engine))
    print("Accelerator: {}\n".format(facial_detector.accelerator))
    print("Model:\n{}\n".format(facial_detector.model_id))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as webcam, \
                edgeiq.Streamer() as streamer:
            # Allow webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = webcam.read()
                # detect human faces
                results = facial_detector.detect_objects(frame,
                                                         confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=facial_detector.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(facial_detector.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Faces:")

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #20
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                pdict = dict()
                myList = [None] * 15

                # Generate text to display on streamer
                text = ["Model: {}".format(pose_estimator.model_id)]
                text.append(
                    "Inference time: {:1.3f} s".format(results.duration) +
                    "\nFPS: {:.2f}".format(fps.compute_fps()))
                for ind, pose in enumerate(results.poses):
                    pdict["Person {}".format(ind)] = pose.key_points
                    df = pd.DataFrame(data=pdict,
                                      index=pose.key_points,
                                      columns=pdict)
                    print(df)

                streamer.send_data(results.draw_poses(frame), text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #21
0
def main():

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                # HSV
                frame_hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
                frame_value = frame_hsv[:, :, 2]

                # bilateral filter - edge-preserving image smoothing method
                blurredBrightness = cv2.bilateralFilter(
                    frame_value, 9, 150, 150)

                # Canny edge detector
                thresh = 50
                edges = cv2.Canny(blurredBrightness,
                                  thresh,
                                  thresh * 2,
                                  L2gradient=True)

                # Generate text to display on streamer
                text = "Thermal Edge Detector"

                streamer.send_data(edges, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #22
0
def take_snapshot(data):
    """Takes a single snapshot and saves it.

    """
    print('snapshot signal received')
    file_name = file_set_up("image")
    with edgeiq.WebcamVideoStream(cam=0) as video_stream, edgeiq.VideoWriter(
            file_name, fps=SAMPLE_RATE) as video_writer:

        time.sleep(2.0)
        writer.text = 'Taking Snapshot'
        time.sleep(1.0)
        print('Taking Snapshot')
        frame = video_stream.read()
        video_writer.write_frame(frame)
        writer.text = 'Snapshot Saved'
        time.sleep(0.5)
        print('Snapshot Saved')
Пример #23
0
def main():
    fps = edgeiq.FPS()

    try:
        streamer = edgeiq.Streamer()
        streamer.setup()
        video_stream = edgeiq.WebcamVideoStream(
            cam=0)  # replace with FileVideoStream if need be

        # Allow application to warm up
        video_stream.start()
        time.sleep(2.0)
        fps.start()
        text = [""]

        # initialize Vaccine Trakcer
        vaccine_tracker = VaccineTracker()

        # loop detection
        while True:
            frame = video_stream.read()
            vaccine_tracker.update(frame)

            # draw the vaccination box in the frame
            frame = edgeiq.markup_image(frame, [
                edgeiq.ObjectDetectionPrediction(label="vaccination",
                                                 index=0,
                                                 box=vaccine_tracker.box,
                                                 confidence=100.00)
            ])
            streamer.send_data(frame, text)
            fps.update()

            if streamer.check_exit():
                break
    finally:
        fps.stop()
        streamer.close()
        video_stream.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #24
0
def main(camera, use_streamer, server_addr, stream_fps):
    fps = edgeiq.FPS()
    try:
        streamer = None
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr, stream_fps, sio).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            prevModelChoice = None

            while True:
                if model_choice == "q":
                    print('Exiting program...')
                    if streamer is not None:
                        streamer.close()
                    fps.stop()
                    return
                model = models[model_choice]
                if model_choice != prevModelChoice and prevModelChoice is not None:
                    displayRuntimeStatistics(model)
                frame = video_stream.read()
                frame, text = runModel(model, frame, model_choice)
                streamer.send_data(frame, text)
                fps.update()
                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Пример #25
0
def main(camera, use_streamer, server_addr, stream_fps):
    fps = edgeiq.FPS()

    try:
        # initialize the streamer
        if use_streamer:
            streamer = edgeiq.Streamer().setup()
        else:
            streamer = CVClient(server_addr, stream_fps, sio, writer).setup()

        with edgeiq.WebcamVideoStream(cam=camera) as video_stream:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            
            # loop detection
            while True:
                frame = video_stream.read()
                text = [""]
                text.append(writer.text)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        if streamer is not None:
            streamer.close()
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #26
0
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)
    tracker = edgeiq.CentroidTracker(deregister_frames=30)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            prev_tracked_people = {}
            logs = []
            currentPeople = 0

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                people = edgeiq.filter_predictions_by_label(results.predictions, ['person'])
                tracked_people = tracker.update(people)

                people = []
                for (object_id, prediction) in tracked_people.items():
                    new_label = 'Person {}'.format(object_id)
                    prediction.label = new_label
                    people.append(prediction)

                frame = edgeiq.markup_image(
                        frame, people, colors=obj_detect.colors)

                new_entries = set(tracked_people) - set(prev_tracked_people)
                for entry in new_entries:
                    save_snapshot(frame, entry)
                    logs.append('Person {} entered'.format(entry))
                    currentPeople += 1

                new_exits = set(prev_tracked_people) - set(tracked_people)
                for exit in new_exits:
                    logs.append('Person {} exited'.format(exit))
                    currentPeople -= 1

                prev_tracked_people = dict(tracked_people)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                for prediction in people:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                
                text.append('Current Occupancy:')
                text += str(currentPeople)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #27
0
def gen():
    #Load in our machine learning models!
    detector_config = {
        "engine": edgeiq.Engine.DNN_OPENVINO,
        "accelerator": edgeiq.Accelerator.MYRIAD
    }

    #Get the face detector:
    facial_detector = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    facial_detector.load(**detector_config)
    describe_model(facial_detector, "Face")

    #Get the gender detector
    gender_detector = edgeiq.Classification("alwaysai/gendernet")
    gender_detector.load(**detector_config)
    describe_model(gender_detector, "Gender")

    #Get the age detector
    age_detector = edgeiq.Classification("alwaysai/agenet")
    age_detector.load(**detector_config)
    describe_model(age_detector, "Age")

    texts = ["No patient detected!"]

    with edgeiq.WebcamVideoStream(cam=0) as webcam:
        # loop detection
        while True:
            frame = webcam.read()

            #Flip the image upside down bc of how we orient the camera
            frame = np.flipud(frame)

            # detect human faces
            face_results = facial_detector.detect_objects(frame,
                                                          confidence_level=.5)

            if len(face_results.predictions) > 0:
                face = frame[face_results.predictions[0].box.
                             start_y:face_results.predictions[0].box.end_y,
                             face_results.predictions[0].box.
                             start_x:face_results.predictions[0].box.end_x]

                #Detect gender and age
                gender_results = gender_detector.classify_image(
                    face, confidence_level=.9)
                age_results = age_detector.classify_image(face)

                frame = blur_detections(frame, face_results.predictions)

                # Find the index of highest confidence
                if len(gender_results.predictions) > 0 and len(
                        age_results.predictions) > 0:
                    top_prediction1 = gender_results.predictions[0]
                    top_prediction2 = age_results.predictions[0]
                    texts = []
                    texts.append("Gender Classification:")
                    texts.append("{}, {:.1f}%".format(
                        top_prediction1.label,
                        top_prediction1.confidence * 100))
                    texts.append("Age Classification:")
                    texts.append("{}, {:.1f}%".format(
                        top_prediction2.label,
                        top_prediction2.confidence * 100))
            else:
                texts = ["No patient detected!"]

            #HACK: Add a panel to the right side of the image
            label_panel = np.zeros(
                (frame.shape[0], frame.shape[1] // 2, frame.shape[2])) + 255
            org_coords = [(frame.shape[0] // 15, i * frame.shape[1] // 10)
                          for i in range(1, 5)]
            for i, text in enumerate(texts):
                label_panel = imwrite(label_panel,
                                      text,
                                      org_coords[i],
                                      thickness=1 + ((i % 2) == 0))

            frame = np.concatenate((frame, label_panel), axis=1)

            #Encode and deploy
            ret, jpeg = cv2.imencode('.jpg', frame)
            frame = jpeg.tobytes()
            #yield frame
            yield (b'\r\n--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
Пример #28
0
def main():

    # if you would like to test an additional model, add one to the list below:
    models = ["alwaysai/mobilenet_ssd", "alwaysai/ssd_inception_v2_coco_2018_01_28"]

    # if you've added a model, add a new color in as a list of tuples in BGR format
    # to make visualization easier (e.g. [(B, G, R)]).
    colors = [[(66, 68, 179)], [(50, 227, 62)]]

    detectors = []

    # load all the models (creates a new object detector for each model)
    for model in models:

        # start up a first object detection model
        obj_detect = edgeiq.ObjectDetection(model)
        obj_detect.load(engine=edgeiq.Engine.DNN)

        # track the generated object detection items by storing them in detectors
        detectors.append(obj_detect)

        # print the details of each model to the console
        print("Model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()

                text = [""]

                # gather data from the all the detectors
                for i in range(0, len(detectors)):
                    results = detectors[i].detect_objects(
                        frame, confidence_level=.5)
                    object_frame = edgeiq.markup_image(
                        frame, results.predictions, show_labels=False, colors=colors[i])

                    # for the first frame, overwrite the input feed
                    if i == 0:
                        display_frame = object_frame
                    else:

                        # otherwise, append the new marked-up frame to the previous one
                        display_frame = numpy.concatenate((object_frame, display_frame))

                    # append each prediction
                    for prediction in results.predictions:
                        text.append(
                                "Model {} detects {}: {:2.2f}%".format(detectors[i].model_id,
                                prediction.label, prediction.confidence * 100))

                # send the image frame and the predictions for both
                # prediction models to the output stream
                streamer.send_data(display_frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Пример #29
0
def main():

    # The current frame index
    frame_idx = 0

    # The number of frames to skip before running detector
    detect_period = 50

    # if you would like to test an additional model, add one to the list below:
    models = [
        "alwaysai/ssd_mobilenet_v2_oidv4",
        "alwaysai/ssd_inception_v2_coco_2018_01_28"
    ]

    # include any labels that you wish to detect from any models (listed above in 'models') here in this list
    detected_contraband = [
        "Pen", "cell phone", "backpack", "book", "Book", "Ring binder",
        "Headphones", "Calculator", "Mobile phone", "Telephone", "Microphone",
        "Ipod", "Remote control"
    ]

    # load all the models (creates a new object detector for each model)
    detectors = []
    for model in models:

        # start up a first object detection model
        obj_detect = edgeiq.ObjectDetection(model)
        obj_detect.load(engine=edgeiq.Engine.DNN)

        # track the generated object detection items by storing them in detectors
        detectors.append(obj_detect)

        # print the details of each model to the console
        print("Model:\n{}\n".format(obj_detect.model_id))
        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Labels:\n{}\n".format(obj_detect.labels))

    tracker = edgeiq.CorrelationTracker(max_objects=5)
    fps = edgeiq.FPS()
    contraband_summary = ContrabandSummary()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:

            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                predictions_to_markup = []
                text = [""]

                # only analyze every 'detect_period' frame (i.e. every 50th in original code)
                if frame_idx % detect_period == 0:

                    # gather data from the all the detectors
                    for i in range(0, len(detectors)):
                        results = detectors[i].detect_objects(
                            frame, confidence_level=.2)

                        # Stop tracking old objects
                        if tracker.count:
                            tracker.stop_all()

                        # append each prediction
                        predictions = results.predictions
                        for prediction in predictions:

                            if (prediction.label.strip()
                                    in detected_contraband):
                                contraband_summary.contraband_alert(
                                    prediction.label, frame)
                                predictions_to_markup.append(prediction)
                                tracker.start(frame, prediction)
                else:

                    # if there are objects being tracked, update the tracker with the new frame
                    if tracker.count:

                        # get the new predictions for the objects being tracked, used to markup the frame
                        predictions_to_markup = tracker.update(frame)

                # mark up the frame with the predictions for the contraband objects
                frame = edgeiq.markup_image(frame,
                                            predictions_to_markup,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=obj_detect.colors)

                # send the collection of contraband detection points (string and video frame) to the streamer
                text = contraband_summary.get_contraband_string()

                streamer.send_data(frame, text)
                frame_idx += 1
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Пример #30
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO,
                        accelerator=edgeiq.Accelerator.MYRIAD)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    y_letter = cv2.imread('letter_y.png')
    m_letter = cv2.imread('m_letter.jpg')
    c_letter = cv2.imread('c_letter.jpeg')
    a_letter = cv2.imread('a_letter.jpg')

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = [""]
                for ind, pose in enumerate(results.poses):
                    right_wrist_y = pose.key_points[4][1]
                    right_wrist_x = pose.key_points[4][0]
                    right_elbow_y = pose.key_points[3][1]
                    right_elbow_x = pose.key_points[3][0]
                    left_wrist_y = pose.key_points[7][1]
                    left_wrist_x = pose.key_points[7][0]
                    left_elbow_y = pose.key_points[6][1]
                    left_elbow_x = pose.key_points[6][0]
                    nose_y = pose.key_points[0][1]
                    nose_x = pose.key_points[0][0]
                    neck_y = pose.key_points[1][0]
                    if nose_y != -1 and neck_y != -1:
                        neck_distance = neck_y - nose_y
                    else:
                        neck_distance = 0
                    if right_wrist_y != -1 and left_wrist_y != -1 and nose_y != -1 and left_elbow_y != -1 and right_elbow_y != -1 and neck_distance > 0:
                        if right_wrist_y < nose_y and left_wrist_y < nose_y and right_wrist_x > right_elbow_x and left_wrist_x < left_elbow_x:
                            if right_wrist_y < (
                                    nose_y -
                                    neck_distance / 3.0) and left_wrist_y < (
                                        nose_y - neck_distance / 3.0):
                                print("----------A!-------------")
                                overlay = edgeiq.resize(
                                    a_letter, frame.shape[1], frame.shape[0],
                                    False)
                                cv2.addWeighted(frame, 0.4, overlay, 0.6, 0,
                                                frame)
                                continue
                            elif (nose_y - neck_distance) < right_wrist_y and (
                                    nose_y - neck_distance) < left_wrist_y:
                                print("----------M!-------------")
                                overlay = edgeiq.resize(
                                    m_letter, frame.shape[1], frame.shape[0],
                                    False)
                                cv2.addWeighted(frame, 0.4, overlay, 0.6, 0,
                                                frame)
                                continue
                    if right_wrist_y != -1 and left_wrist_y != -1 and nose_y != -1 and right_elbow_x and left_elbow_x and right_wrist_x and left_wrist_x:
                        if right_wrist_y < nose_y and left_wrist_y < nose_y and right_wrist_x < right_elbow_x and left_wrist_x > left_elbow_x:
                            print("----------Y!-------------")
                            overlay = edgeiq.resize(y_letter, frame.shape[1],
                                                    frame.shape[0], False)
                            cv2.addWeighted(frame, 0.4, overlay, 0.6, 0, frame)
                            continue
                    if left_wrist_x != -1 and nose_x != -1 and left_wrist_y != -1 and nose_y != -1 and right_wrist_y != -1 and nose_x != -1:
                        if right_wrist_x > nose_x and right_wrist_y < nose_y and left_wrist_x > nose_x:
                            print("----------C!-------------")
                            overlay = edgeiq.resize(c_letter, frame.shape[1],
                                                    frame.shape[0], False)
                            cv2.addWeighted(frame, 0.4, overlay, 0.6, 0, frame)
                            continue

                streamer.send_data(results.draw_poses(frame), text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")