Beispiel #1
0
    def __init__(self):
        # client configuration
        self.id = "waiting_room"
        self.box = edgeiq.BoundingBox(0, 0, 1920, 1080)
        self.server_event_url = "http://localhost:5001/" # configure as needed
        self.chairs = { # configure as needed
            # 'chair1': 1
        }
        self.capacity = 4 # configure as needed

        # detection models
        self.detector = self.load_model("alwaysai/yolov3")
        self.mask_detector = self.load_model("<username>/<model_name>") # train and use your model here!

        # labels of interest (used to filter predictions)
        self.interest_items = {
            "person": InterestItem(16, 42, "person", ["person"])
        }

        # tracker to associate object ids with predictions
        self.centroid_tracker = edgeiq.CentroidTracker(deregister_frames=4, max_distance=130)

        self.covid_event_log = {}
        self.event_log = {}
        self.send_setup()
    def __init__(self):
        self.id = "vaccination_area"
        self._send_events = False
        self.server_event_url = "http://localhost:5001/"  # configure as needed
        self._start_time = time.time()

        # detection model
        self.detector = self.load_model("alwaysai/yolov3")

        self.centroid_tracker = edgeiq.CentroidTracker(
            deregister_frames=4, max_distance=130)  # configure as needed

        # vaccination box
        self.box = edgeiq.BoundingBox(1269, 187, 1920,
                                      1080)  # configure as needed

        # for overall vaccination logic, configure as needed
        self.current_ids = []
        self.timestamp = None
        self.total_vaccinations = 0
        self.vaccination_time = 30
        self.scheduled_vaccinations = 20
        self.doses_per_vial = 10
        self.last_apt = datetime.datetime.today().replace(hour=16, minute=45)
        self.send_event(0)
Beispiel #3
0
def main():
    obj_detect = edgeiq.ObjectDetection(
        "alwaysai/res10_300x300_ssd_iter_140000")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))

    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                              max_distance=50)

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection and centroid tracker
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=400)
                results = obj_detect.detect_objects(frame, confidence_level=.5)

                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")

                objects = centroid_tracker.update(results.predictions)

                # Update the label to reflect the object ID
                predictions = []
                for (object_id, prediction) in objects.items():
                    new_label = 'face {}'.format(object_id)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Beispiel #4
0
def start_detection(config,
                    did_start_callback,
                    enter_callback,
                    exit_callback,
                    did_end_object_callback,
                    should_log=True):
    """
    Enable features dependent on config and pass callback down to actual detection functions
    """
    global ENTER_CALLBACK
    global EXIT_CALLBACK
    ENTER_CALLBACK = enter_callback
    EXIT_CALLBACK = exit_callback
    print('alwaysai.py: start_detection: enter_callback: {}'.format(
        ENTER_CALLBACK))

    # Configs
    od_config = alwaysai_configs.ObjectDetector(config)
    ct_config = alwaysai_configs.CentroidTracker(config)
    vs_config = alwaysai_configs.VideoStream(config)
    od = object_detector(od_config.model_id)
    t = edgeiq.CentroidTracker(deregister_frames=ct_config.deregister_frames,
                               max_distance=ct_config.max_distance)
    en_zones_config = config.get('entry_zones', [])
    ex_zones_config = config.get('exit_zones', [])
    entry_zones = zones_from_config(en_zones_config)
    exit_zones = zones_from_config(ex_zones_config)
    vs = None

    # print('alwaysai.py: start_detection: en_zones_config: {}'.format(en_zones_config))
    # print('alwaysai.py: start_detection: entry_zones: {}'.format(entry_zones))

    # Inits
    if vs_config.mode == 'camera':
        if should_log:
            print('alwaysai.py: start_detection: enabling camera w/ id: {}'.
                  format(vs_config.camera_id))
        vs = edgeiq.WebcamVideoStream(cam=vs_config.camera_id)
    if vs_config.mode == 'file':
        if should_log:
            print('alwaysai.py: start_detection: reading from file')
        vs = edgeiq.FileVideoStream(vs_config.filename, play_realtime=True)
    enable_streamer = config.get('enable_streamer', False)
    streamer = alwaysai_configs.DummyStreamer()
    if enable_streamer:
        print('alwaysai.py: start_detection: ENABLING streamer')
        streamer = edgeiq.Streamer()

    # Start
    start_video_detection_with_streamer(vs, od_config, od, streamer, t,
                                        entry_zones, exit_zones,
                                        did_start_callback, did_detect,
                                        did_end_object_callback)
Beispiel #5
0
    def detection_base(model, confidence, image_array):
        detector = edgeiq.ObjectDetection(
            model)  # model example: "alwaysai/res10_300x300_ssd_iter_140000"
        detector.load(engine=edgeiq.Engine.DNN)

        centroid_tracker = edgeiq.CentroidTracker(deregister_frames=100,
                                                  max_distance=50)
        results = detector.detect_objects(image_array,
                                          confidence_level=confidence)
        objects = centroid_tracker.update(results.predictions)

        predictions = []
        for (object_id, prediction) in objects.items():
            prediction.label = "{}: {}".format(prediction.label, object_id)
            predictions.append(prediction)

        image = edgeiq.markup_image(image_array, predictions)

        return image, results, None
Beispiel #6
0
def _centroid_tracker_from(config):
    # print("alwaysai_helper.py: _centroid_tracker")
    frames = config.get(CENTROID_FRAMES, 20)
    distance = config.get(CENTROID_MAX, 50)
    return edgeiq.CentroidTracker(deregister_frames=frames,
                                  max_distance=distance)
Beispiel #7
0
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)
    tracker = edgeiq.CentroidTracker(deregister_frames=30)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            prev_tracked_people = {}
            logs = []
            currentPeople = 0

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                people = edgeiq.filter_predictions_by_label(results.predictions, ['person'])
                tracked_people = tracker.update(people)

                people = []
                for (object_id, prediction) in tracked_people.items():
                    new_label = 'Person {}'.format(object_id)
                    prediction.label = new_label
                    people.append(prediction)

                frame = edgeiq.markup_image(
                        frame, people, colors=obj_detect.colors)

                new_entries = set(tracked_people) - set(prev_tracked_people)
                for entry in new_entries:
                    save_snapshot(frame, entry)
                    logs.append('Person {} entered'.format(entry))
                    currentPeople += 1

                new_exits = set(prev_tracked_people) - set(tracked_people)
                for exit in new_exits:
                    logs.append('Person {} exited'.format(exit))
                    currentPeople -= 1

                prev_tracked_people = dict(tracked_people)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                for prediction in people:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                
                text.append('Current Occupancy:')
                text += str(currentPeople)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Beispiel #8
0
def main():
    fps = edgeiq.FPS()

    # Change parameter to alwaysai/human_pose_eyecloud to run the human pose model.
    with edgeiq.EyeCloud('alwaysai/mobilenet_ssd_eyecloud'
                         ) as camera, edgeiq.Streamer() as streamer:
                         

        fps.start()

        centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20, max_distance=100)

        number=0

        while True:

            text = ["FPS:{}".format(fps.compute_fps())]

            frame = camera.get_frame()

            #print('image sequence = {}'.format(frame.sequence_index))

            result = camera.get_model_result(confidence_level=0.9)

        

            # Check for inferencing results.
            if result:
                #print('model sequence = {}'.format(result.sequence_index))

                text.append("Model: {}".format(camera.model_id))

                if camera.model_purpose == 'PoseEstimation':
                    frame = result.draw_poses(frame)

                    text.append("Inference time: {:1.3f} s".format(result.duration))

                    for ind, pose in enumerate(result.poses):
                        text.append("Person {}".format(ind))
                        text.append('-' * 10)
                        text.append("Key Points:")
                        for key_point in pose.key_points:
                            text.append(str(key_point))

                elif camera.model_purpose == 'ObjectDetection':
                  

                    #移除交并比过大的检测框
                    if(len(result.predictions)>1):
                        for i,prediction in enumerate(result.predictions):
                            for j in range((len(result.predictions)-1),i,-1):
                                #print("i:%s  j:%s"%(i,j))
                                IOU=prediction.box.compute_overlap(result.predictions[j].box)
                                print(IOU)
                                if(IOU>0.3):
                                    #print("距离过近")
                                    result.predictions.pop()
                    

                    #划定FOV有效检测区
                    for i,prediction in  enumerate(result.predictions):
                        if(prediction.box.center[0]<500 or prediction.box.center[0]>1300):
                            result.predictions.pop(i)

                        #print("x:%s  y:%s"%(prediction.box.center[0],prediction.box.center[1]))

                    objects = centroid_tracker.update(result.predictions)

                    text.append("Inference time: {:1.3f} s".format(result.duration))
                    text.append("Objects:")

                    
               
                    for (object_id, prediction) in objects.items():
                        new_label = 'person {}'.format(object_id)
                        
                        if(object_id+1>number):
                            number=object_id+1

                        prediction.label = new_label
                        text.append("{}: {:2.2f}%".format(prediction.label, prediction.confidence * 100))
                        result.predictions.append(prediction)          
                    
                    
                    frame = edgeiq.markup_image(frame, result.predictions)

                    text.append("people pass: {}".format(number))





                elif camera.model_purpose == 'Classification':
                    if len(result.predictions) > 0:
                        top_prediction = result.predictions[0]
                        text = "Classification: {}, {:.2f}%".format(
                            top_prediction.label,
                            top_prediction.confidence * 100)
                    else:
                        text = None

                    cv2.putText(frame, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX,
                                0.4, (0, 0, 255), 2)
            
            cv2.rectangle(frame,(500,0),(1300,1080),(255,0,0), 2)

            streamer.send_data(frame, text)

            if streamer.check_exit():
                break

            fps.update()

        print('fps = {}'.format(fps.compute_fps()))
Beispiel #9
0
def main():
    # Spin up the object detector
    obj_detect = edgeiq.ObjectDetection("alwaysai/" + OBJECT_DETECTION_MODEL)
    obj_detect.load(engine=edgeiq.Engine.DNN_CUDA,
                    accelerator=edgeiq.Accelerator.NVIDIA)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    # Prepare to track frames per second calculations
    fps = edgeiq.FPS()

    # Load any prior instance of the tracker, otherwise spin up a new one
    centroid_tracker = file_manager.load(
        CENTROID_TRACKER,
        edgeiq.CentroidTracker(deregister_frames=TRACKER_DEREGISTER_FRAMES,
                               max_distance=TRACKER_MAX_DISTANCE))
    # Load any prior instance of the metrics data, otherwise start a new one
    metrics = file_manager.load(METRICS_MANAGER,
                                metrics_manager.MetricsManager())

    try:
        if IP_CAMERA_FEED is not None:
            stream_details = edgeiq.IPVideoStream(IP_CAMERA_FEED)
        else:
            stream_details = edgeiq.WebcamVideoStream(cam=0)

        with stream_details as video_stream, \
        edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection and centroid tracker
            while True:
                metrics.newLoop()
                frame = video_stream.read()
                results = obj_detect.detect_objects(
                    frame, confidence_level=DETECT_CONFIDENCE_THRESHOLD)

                # Ignore detections of anything other than people
                filter = edgeiq.filter_predictions_by_label(
                    results.predictions, ['person'])

                # Adding info for streamer display
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("People currently detected:")

                objects = centroid_tracker.update(filter)

                # Store active predictions for just this loop
                predictions = []

                # Store the active object ids for just this loop
                if len(objects.items()) == 0:
                    # No people detected
                    text.append("-- NONE")

                for (object_id, prediction) in objects.items():
                    metrics.addTimeFor(object_id)
                    timeForId = metrics.timeForId(object_id)
                    # Correcting for fact that index 0 is first object in an array
                    idAdjusted = object_id + 1
                    # Display text with bounding box in video
                    new_label = "Person {i} | {t} sec".format(i=idAdjusted,
                                                              t=timeForId)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                # Add metrics to text going to streamer
                m = metrics.currentMetrics()
                text.append("")  # Spacing
                text.append("Total people seen: {}".format(m["count"]))
                text.append("Total time: {} sec".format(m["total"]))
                text.append("Average time: {0:.1f} sec".format(m["avg"]))
                text.append("Longest individual time: {} sec".format(m["max"]))

                # Update output streamer
                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        # TODO: Update to save every few seconds in case a crash occurs
        file_manager.save(metrics, METRICS_MANAGER)
        file_manager.save(centroid_tracker, CENTROID_TRACKER)
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Beispiel #10
0
def main():
    #detects the object
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)

            tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                             max_distance=50)

            fps.start()

            objects = {}
            objectsCopy = {}
            # loop detection
            while True:
                frame = video_stream.read()
                frame = edgeiq.resize(frame, width=600)
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                text.append("Item Count:")
                objectsCopy = objects.copy()
                objects = tracker.update(results.predictions)

                if len(objects) < len(objectsCopy):
                    for key in objects:
                        del objectsCopy[key]
                    for key in objectsCopy:
                        text.append(
                            ("%s has been stolen!" %
                             objectsCopy[key].label).format(results.duration))

                #if len(objects) < count:
                #    print('something left the frame')

                #count = len(objects)

                #predictions = []
                #for (object_id, prediction) in objects.items():
                #    new_label = 'Object {}'.format(object_id)
                #    prediction.label = new_label
                #    text.append(new_label)
                #    predictions.append(prediction)

                #for prediction in results.predictions:
                #   text.append("{}: {:2.2f}%".format(
                #      prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
Beispiel #11
0
def main():

    cam_type = os.environ.get('OPENNCC_CAM')
    print(cam_type)
    model_name = None
    if cam_type is None:
        model_name = "abhatikar/package_detector"
        package_detector = edgeiq.ObjectDetection(model_name)
        cam_type = os.environ.get('NCS2_CAM')
        if cam_type is None:
            cam_type = "webcam"
            package_detector.load(engine=edgeiq.Engine.DNN)
        else:
            cam_type = "ncs2"
            package_detector.load(engine=edgeiq.Engine.DNN_OPENVINO)
    else:
        cam_type = "openncc"
        model_name = "abhatikar/package_detector_ncc"

    # add a centroid tracker to see if a new package arrives
    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=10,
                                              max_distance=50)
    if cam_type is not "openncc":
        # Descriptions printed to console
        print("Engine: {}".format(package_detector.engine))
        print("Accelerator: {}\n".format(package_detector.accelerator))
        print("Model:\n{}\n".format(package_detector.model_id))
        print("Labels:\n{}\n".format(package_detector.labels))

    fps = edgeiq.FPS()

    # Variables to limit inference
    counter = 0
    DETECT_RATE = 10

    # Object to monitor the system
    pm = PackageMonitor()

    if cam_type is "openncc":
        video_stream = edgeiq.EyeCloud(model_name).start()
    else:
        video_stream = edgeiq.WebcamVideoStream(cam=0).start()
    try:
        with edgeiq.Streamer() as streamer:

            # Allow the camera to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection
            while True:
                counter += 1

                # Run this loop whenever there's a package detected or every DETECT_RATE frames
                if pm.package_is_detected() or counter % DETECT_RATE == 0:

                    # Read in the video stream
                    if cam_type is "openncc":
                        frame = video_stream.get_frame()
                        # Check for packages in the new frame
                        package_results = video_stream.get_model_result(
                            confidence_level=.90)
                    else:
                        frame = video_stream.read()
                        # Check for packages in the new frame
                        package_results = package_detector.detect_objects(
                            frame, confidence_level=.90)

                    if package_results is not None:
                        # update the package predictions
                        objects = centroid_tracker.update(
                            package_results.predictions)
                        pm.set_packages(objects)

                        # Generate labels to display the face detections on the streamer
                        text = ["Model: {}".format(model_name)]
                        text.append("Inference time: {:1.3f} s".format(
                            package_results.duration))
                        predictions = []

                        # update labels for each identified package to print to the screen
                        for (object_id, prediction) in objects.items():
                            new_label = 'Package {}'.format(object_id)
                            prediction.label = new_label
                            text.append(new_label)
                            predictions.append(prediction)

                        # Alter the original frame mark up to show tracking labels
                        frame = edgeiq.markup_image(frame,
                                                    predictions,
                                                    show_labels=True,
                                                    show_confidences=False,
                                                    line_thickness=3,
                                                    font_size=1,
                                                    font_thickness=3)

                        # Do some action based on state
                        text.append(pm.action())

                        # Send the image frame and the predictions to the output stream
                        streamer.send_data(frame, text)

                        fps.update()

                        if streamer.check_exit():
                            video_stream.stop()
                            break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
Beispiel #12
0
    def start(self):
        # This is code from the person-counter app
        # TODO rework this to use a callback, observer-pattern, or generator
        obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
        obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Model:\n{}\n".format(obj_detect.model_id))

        centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                                  max_distance=50)
        fps = edgeiq.FPS()

        # Object to store time info for detected people
        allPeople = {}

        try:

            with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                    edgeiq.Streamer() as streamer:
                # Allow Webcam to warm up
                time.sleep(2.0)
                fps.start()

                # Loop detection and centroid tracker
                while True:
                    frame = video_stream.read()
                    results = obj_detect.detect_objects(frame,
                                                        confidence_level=.5)

                    # Ignore detections of anything other than people
                    filter = edgeiq.filter_predictions_by_label(
                        results.predictions, ['person'])

                    # Adding info for streamer display
                    text = ["Model: {}".format(obj_detect.model_id)]
                    text.append("Inference time: {:1.3f} s".format(
                        results.duration))
                    text.append("People currently detected:")

                    objects = centroid_tracker.update(filter)

                    # Store active predictions for just this loop
                    predictions = []
                    # Store the active object ids for just this loop
                    active_ids = []

                    if len(objects.items()) == 0:
                        # No people detected
                        text.append("-- NONE")

                    for (object_id, prediction) in objects.items():
                        seenTime = traffic_manager.timeSeenFor(
                            object_id, allPeople)
                        # Correct id displayed for start of array at index 0
                        actualPersonNumber = object_id + 1

                        # Display general data on person seen
                        new_label = "-- Person {i} | {t} sec".format(
                            i=actualPersonNumber, t=seenTime)
                        active_ids.append(object_id)
                        prediction.label = new_label
                        text.append(new_label)
                        predictions.append(prediction)

                    # Update output streamer
                    frame = edgeiq.markup_image(frame, predictions)
                    streamer.send_data(frame, text)
                    fps.update()

                    if streamer.check_exit():
                        break

        finally:
            fps.stop()
            print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
            print("approx. FPS: {:.2f}".format(fps.compute_fps()))
            print("Program Ending")
Beispiel #13
0
def start_file_detection_and_tracking(delivery_object,
                                      filter_for,
                                      model_name,
                                      filename,
                                      detection_confidence=.5,
                                      enable_streamer=True,
                                      streamer_show_labels=True,
                                      tracker_deregister_frames=20,
                                      tracker_max_distance=50,
                                      should_log=False):
    """Starts a detection loop"""
    obj_detect = object_detector(model_name)
    tracker = edgeiq.CentroidTracker(
        deregister_frames=tracker_deregister_frames,
        max_distance=tracker_max_distance)
    fps = edgeiq.FPS()

    try:
        # Enables video camera and streamer

        # TODO: add streamer disable feature here

        with edgeiq.FileVideoStream(
                filename) as video_stream, edgeiq.Streamer() as streamer:

            # Start tracking of frames per second
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                text = []
                # Run detection
                # detect human faces
                results = obj_detect.detect_objects(
                    frame, confidence_level=detection_confidence)

                # TODO: Add filter option here

                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_labels=streamer_show_labels)

                # Generate text to display on streamer
                text.append("Model: {}".format(obj_detect.model_id))
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))

                predictions = []
                objects = tracker.update(results.predictions)
                # predictions = results.predictions
                for (object_id, prediction) in objects.items():
                    # print(vars(prediction))
                    text.append("{}: {}: {:2.2f}%".format(
                        object_id, prediction.label,
                        prediction.confidence * 100))
                    predictions.append(prediction)

                    if delivery.should_send_image(object_id):
                        # Extract image
                        face_image = edgeiq.cutout_image(frame, prediction.box)
                        # Send data to server
                        delivery.send_image(object_id, prediction.label,
                                            face_image)
                    elif delivery.should_send_data(object_id):
                        delivery.send_data(object_id, prediction.label)

                    # if delivery.should_send_data(object_id):
                    #     delivery.send_data(object_id, prediction.label)

                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()
                if streamer.check_exit():
                    break

    finally:
        # stop fps counter and display information
        fps.stop()
        if should_log == True:
            print("[INFO] elapsed time: {:.2f}".format(
                fps.get_elapsed_seconds()))
            print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps()))
            print("Program Ending")
Beispiel #14
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))

    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                              max_distance=50)
    fps = edgeiq.FPS()

    try:

        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection and centroid tracker
            while True:
                metrics_manager.newLoop()
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.8)

                # Ignore detections of anything other than people
                filter = edgeiq.filter_predictions_by_label(
                    results.predictions, ['person'])

                # Adding info for streamer display
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("People currently detected:")

                objects = centroid_tracker.update(filter)

                # Store active predictions for just this loop
                predictions = []
                # Store the active object ids for just this loop

                if len(objects.items()) == 0:
                    # No people detected
                    text.append("-- NONE")

                for (object_id, prediction) in objects.items():
                    metrics_manager.addTimeFor(object_id)
                    timeForId = metrics_manager.timeForId(object_id)
                    # Correcting for fact that index 0 is first object in an array
                    idAdjusted = object_id + 1
                    # Display text with bounding box in video
                    new_label = "Person {i} | {t} sec".format(i=idAdjusted,
                                                              t=timeForId)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                # Add metrics to text going to streamer
                metrics = metrics_manager.currentMetrics()
                text.append("")  # Spacing
                text.append("Total people seen: {}".format(metrics["count"]))
                text.append("Total time: {} sec".format(metrics["total"]))
                text.append("Average time: {0:.1f} sec".format(metrics["avg"]))
                text.append("Longest individual time: {} sec".format(
                    metrics["max"]))

                # Update output streamer
                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")