Esempio n. 1
0
def perform_object_detection(frame):
    """Perform object detction on an image, update
    the table data, and returns a string.

    Args:
        frame (numpy array): The frame from the camera stream.

    Returns:
        string: The string representation of the image
    """
    frame = edgeiq.resize(frame, width=800, height=300)
    results = obj_detect.detect_objects(frame, confidence_level=.5)
    frame = edgeiq.markup_image(frame,
                                results.predictions,
                                colors=obj_detect.colors)
    frame = cv2.imencode('.jpg', frame)[1].tobytes()

    # update data for table
    objects = {
        'timestamp': str(round((time.time() - START_TIME), 0)),
        'labels': ", ".join([p.label for p in results.predictions])
    }

    global data
    if data is None:
        data = pd.DataFrame({k: [v] for k, v in objects.items()})
    else:
        data = data.append(pd.DataFrame({k: [v] for k, v in objects.items()}))

    data = data.drop_duplicates()

    return frame
Esempio n. 2
0
def main():
    semantic_segmentation = edgeiq.SemanticSegmentation(
        "alwaysai/fcn_resnet18_pascal_voc_512x320")
    semantic_segmentation.load(engine=edgeiq.Engine.DNN_CUDA,
                               accelerator=edgeiq.Accelerator.NVIDIA)

    print("Loaded model:\n{}\n".format(semantic_segmentation.model_id))
    print("Engine: {}".format(semantic_segmentation.engine))
    print("Accelerator: {}\n".format(semantic_segmentation.accelerator))
    print("Labels:\n{}\n".format(semantic_segmentation.labels))

    fps = edgeiq.FPS()

    class_list = ['bottle']

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=320)
                results = semantic_segmentation.segment_image(frame)

                object_map = semantic_segmentation.build_object_map(
                    results.class_map, class_list)

                object_mask = semantic_segmentation.build_image_mask(
                    object_map)

                # object_mask[np.where((object_mask==[0,0,0]).all(axis=2))] = [255,255,255]

                # Generate text to display on streamer
                text = ["Model: {}".format(semantic_segmentation.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Legend:")
                text.append(semantic_segmentation.build_legend())

                blended = edgeiq.blend_images(frame, object_mask, alpha=0.5)

                streamer.send_data(blended, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Esempio n. 3
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))

    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                              max_distance=50)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection and centroid tracker
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=400)
                results = obj_detect.detect_objects(frame, confidence_level=.5)

                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                objects = centroid_tracker.update(results.predictions)

                # Update the label to reflect the object ID
                predictions = []
                for (object_id, prediction) in objects.items():
                    new_label = 'face {}'.format(object_id)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Esempio n. 4
0
    def send_frame(self, frame):
        frame = edgeiq.resize(
                frame, width=self._max_image_width,
                height=self._max_image_height, keep_scale=True)

        # Encode frame as jpeg
        frame = cv2.imencode('.jpg', frame)[1].tobytes()
        # Encode frame in base64 representation and remove
        # utf-8 encoding
        frame = base64.b64encode(frame).decode('utf-8')
        frame = "data:image/jpeg;base64,{}".format(frame)
        self._sio.emit('cv-cmd', {'cmd': 'update_frame', 'data': frame})
 def send_data(self, frame, text):
     cur_t = time.time()
     if cur_t - self._last_update_t > self._wait_t:
         self._last_update_t = cur_t
         frame = edgeiq.resize(
                 frame, width=640, height=480, keep_scale=True)
         sio.emit(
                 'cv2server',
                 {
                     'image': self._convert_image_to_jpeg(frame),
                     'text': '<br />'.join(text)
                 })
Esempio n. 6
0
    def send_data(self, frame, text):
        """Sends image and text to the flask server.

        Args:
            frame (numpy array): the image
            text (string): the text
        """
        cur_t = time.time()
        if cur_t - self._last_update_t > self._wait_t:
            self._last_update_t = cur_t
            frame = edgeiq.resize(
                    frame, width=640, height=480, keep_scale=True)
            socketio.emit(
                    'server2web',
                    {
                        'image': self._convert_image_to_jpeg(frame),
                        'text': '<br />'.join(text)
                    })
            socketio.sleep(0.0001)
Esempio n. 7
0
def main():
    pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
    pose_estimator.load(engine=edgeiq.Engine.DNN_OPENVINO,
                        accelerator=edgeiq.Accelerator.MYRIAD)

    print("Loaded model:\n{}\n".format(pose_estimator.model_id))
    print("Engine: {}".format(pose_estimator.engine))
    print("Accelerator: {}\n".format(pose_estimator.accelerator))

    fps = edgeiq.FPS()

    y_letter = cv2.imread('letter_y.png')
    m_letter = cv2.imread('m_letter.jpg')
    c_letter = cv2.imread('c_letter.jpeg')
    a_letter = cv2.imread('a_letter.jpg')

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = pose_estimator.estimate(frame)
                # Generate text to display on streamer
                text = [""]
                for ind, pose in enumerate(results.poses):
                    right_wrist_y = pose.key_points[4][1]
                    right_wrist_x = pose.key_points[4][0]
                    right_elbow_y = pose.key_points[3][1]
                    right_elbow_x = pose.key_points[3][0]
                    left_wrist_y = pose.key_points[7][1]
                    left_wrist_x = pose.key_points[7][0]
                    left_elbow_y = pose.key_points[6][1]
                    left_elbow_x = pose.key_points[6][0]
                    nose_y = pose.key_points[0][1]
                    nose_x = pose.key_points[0][0]
                    neck_y = pose.key_points[1][0]
                    if nose_y != -1 and neck_y != -1:
                        neck_distance = neck_y - nose_y
                    else:
                        neck_distance = 0
                    if right_wrist_y != -1 and left_wrist_y != -1 and nose_y != -1 and left_elbow_y != -1 and right_elbow_y != -1 and neck_distance > 0:
                        if right_wrist_y < nose_y and left_wrist_y < nose_y and right_wrist_x > right_elbow_x and left_wrist_x < left_elbow_x:
                            if right_wrist_y < (
                                    nose_y -
                                    neck_distance / 3.0) and left_wrist_y < (
                                        nose_y - neck_distance / 3.0):
                                print("----------A!-------------")
                                overlay = edgeiq.resize(
                                    a_letter, frame.shape[1], frame.shape[0],
                                    False)
                                cv2.addWeighted(frame, 0.4, overlay, 0.6, 0,
                                                frame)
                                continue
                            elif (nose_y - neck_distance) < right_wrist_y and (
                                    nose_y - neck_distance) < left_wrist_y:
                                print("----------M!-------------")
                                overlay = edgeiq.resize(
                                    m_letter, frame.shape[1], frame.shape[0],
                                    False)
                                cv2.addWeighted(frame, 0.4, overlay, 0.6, 0,
                                                frame)
                                continue
                    if right_wrist_y != -1 and left_wrist_y != -1 and nose_y != -1 and right_elbow_x and left_elbow_x and right_wrist_x and left_wrist_x:
                        if right_wrist_y < nose_y and left_wrist_y < nose_y and right_wrist_x < right_elbow_x and left_wrist_x > left_elbow_x:
                            print("----------Y!-------------")
                            overlay = edgeiq.resize(y_letter, frame.shape[1],
                                                    frame.shape[0], False)
                            cv2.addWeighted(frame, 0.4, overlay, 0.6, 0, frame)
                            continue
                    if left_wrist_x != -1 and nose_x != -1 and left_wrist_y != -1 and nose_y != -1 and right_wrist_y != -1 and nose_x != -1:
                        if right_wrist_x > nose_x and right_wrist_y < nose_y and left_wrist_x > nose_x:
                            print("----------C!-------------")
                            overlay = edgeiq.resize(c_letter, frame.shape[1],
                                                    frame.shape[0], False)
                            cv2.addWeighted(frame, 0.4, overlay, 0.6, 0, frame)
                            continue

                streamer.send_data(results.draw_poses(frame), text)

                fps.update()

                if streamer.check_exit():
                    break
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Esempio n. 8
0
def main():
    classifier = edgeiq.Classification("alwaysai/googlenet")
    classifier.load(engine=edgeiq.Engine.DNN_CUDA,
                    accelerator=edgeiq.Accelerator.NVIDIA)

    print("Engine: {}".format(classifier.engine))
    print("Accelerator: {}\n".format(classifier.accelerator))
    print("Model:\n{}\n".format(classifier.model_id))
    print("Labels:\n{}\n".format(classifier.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=224)
                results = classifier.classify_image(frame)

                # Generate text to display on streamer
                text = ["Model: {}".format(classifier.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                if results.predictions:
                    print(results.predictions[0].label)
                    if results.predictions[0].label == 'water bottle':
                        # label the frame
                        image_text = "Label: {}, {:.2f}".format(
                            results.predictions[0].label,
                            results.predictions[0].confidence)
                        cv2.putText(frame, image_text, (5, 25),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
                                    2)

                        for idx, prediction in enumerate(
                                results.predictions[:5]):
                            text.append(
                                "{}. label: {}, confidence: {:.5}".format(
                                    idx + 1, prediction.label,
                                    prediction.confidence))
                    else:
                        text.append("No water bottles detected.")

                else:
                    text.append("No water bottles detected.")

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
 def image_preprocessor(self, image):
     image = edgeiq.resize(image, width=500)
     gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     return image, gray_image
Esempio n. 10
0
def main():
    #detects the object
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)

            tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                             max_distance=50)

            fps.start()

            objects = {}
            objectsCopy = {}
            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=600)
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                text.append("Item Count:")
                objectsCopy = objects.copy()
                objects = tracker.update(results.predictions)

                if len(objects) < len(objectsCopy):
                    for key in objects:
                        del objectsCopy[key]
                    for key in objectsCopy:
                        text.append(
                            ("%s has been stolen!" %
                             objectsCopy[key].label).format(results.duration))

                #if len(objects) < count:
                #    print('something left the frame')

                #count = len(objects)

                #predictions = []
                #for (object_id, prediction) in objects.items():
                #    new_label = 'Object {}'.format(object_id)
                #    prediction.label = new_label
                #    text.append(new_label)
                #    predictions.append(prediction)

                #for prediction in results.predictions:
                #   text.append("{}: {:2.2f}%".format(
                #      prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Esempio n. 11
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/yolo_v3_tiny")
    obj_detect.load(engine=edgeiq.Engine.DNN_CUDA,
                    accelerator=edgeiq.Accelerator.NVIDIA_FP16)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))
    print("Detecting:\n{}\n".format(OBJECT))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=416)
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                predictions = edgeiq.filter_predictions_by_label(
                    results.predictions, OBJECT)
                frame = edgeiq.markup_image(frame,
                                            predictions,
                                            show_confidences=False,
                                            colors=obj_detect.colors)

                # Print date and time on frame
                current_time_date = str(datetime.datetime.now())
                (h, w) = frame.shape[:2]
                cv2.putText(frame, current_time_date, (10, h - 5),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

                # Count OBJECT
                counter = {obj: 0 for obj in OBJECT}

                for prediction in predictions:
                    # increment the counter of the detected object
                    counter[prediction.label] += 1

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Object counts:")

                for label, count in counter.items():
                    text.append("{}: {}".format(label, count))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")