示例#1
0
def main():
    #ardunio = serial.Serial('COM1', 115200, timeout = 1)
    GPIO.setmode(GPIO.BCM)
    buzzer = 23
    GPIO.setup(buzzer, GPIO.OUT)
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))
    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                predictions = edgeiq.filter_predictions_by_label(
                    results.predictions, ["bottle"])
                frame = edgeiq.markup_image(frame,
                                            results.predictions,
                                            show_confidences=False,
                                            colors=obj_detect.colors)
                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Objects:")
                for prediction in predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))
                    GPIO.output(buzzer, GPIO.HIGH)
                    GPIO.input(buzzer)
                    sleep(0.5)
                    GPIO.output(buzzer, GPIO.LOW)
                    GPIO.input(buzzer)
                    sleep(0.5)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#2
0
def filtered_predictions_from(config, obj_detect, tracker, frame):
    # print("alwaysai_helper.py: _filtered_predictions")
    confidence = config.get(OBJ_DETCTION_CONFIDENCE, .5)
    results = obj_detect.detect_objects(frame, confidence_level=confidence)
    #  Why is 'filter' still resulting in a lint error
    filter = edgeiq.filter_predictions_by_label(results.predictions,
                                                config.get(FILTER_LABELS, []))
    filtered_results = tracker.update(filter)
    return filtered_results
    def update(self, image):
        # if someone is in the chair -- we're waiting for a vaccine
        results = self.detector.detect_objects(image, confidence_level=0.6)
        people_pred = edgeiq.filter_predictions_by_label(
            results.predictions, ["person"])
        if len(people_pred) > 0:

            # now check how many people are in the vaccination areas
            predictions = self.centroid_tracker.update(people_pred)
            keys = self.check_overlap(predictions)

            if len(keys) == 2 and len(self.current_ids) == 0:
                # start tracking
                self.current_ids = keys
                self.timestamp = time.time()

            elif len(keys) <= 2:
                if self.has_expired():
                    self.total_vaccinations += 1
                    self.timestamp = None
                    self.current_ids = []
                    self.send_event(1)
示例#4
0
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)
    tracker = edgeiq.CentroidTracker(deregister_frames=30)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            prev_tracked_people = {}
            logs = []
            currentPeople = 0

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                people = edgeiq.filter_predictions_by_label(results.predictions, ['person'])
                tracked_people = tracker.update(people)

                people = []
                for (object_id, prediction) in tracked_people.items():
                    new_label = 'Person {}'.format(object_id)
                    prediction.label = new_label
                    people.append(prediction)

                frame = edgeiq.markup_image(
                        frame, people, colors=obj_detect.colors)

                new_entries = set(tracked_people) - set(prev_tracked_people)
                for entry in new_entries:
                    save_snapshot(frame, entry)
                    logs.append('Person {} entered'.format(entry))
                    currentPeople += 1

                new_exits = set(prev_tracked_people) - set(tracked_people)
                for exit in new_exits:
                    logs.append('Person {} exited'.format(exit))
                    currentPeople -= 1

                prev_tracked_people = dict(tracked_people)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                for prediction in people:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                
                text.append('Current Occupancy:')
                text += str(currentPeople)

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#5
0
def main():
    # Spin up the object detector
    obj_detect = edgeiq.ObjectDetection("alwaysai/" + OBJECT_DETECTION_MODEL)
    obj_detect.load(engine=edgeiq.Engine.DNN_CUDA,
                    accelerator=edgeiq.Accelerator.NVIDIA)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))

    # Prepare to track frames per second calculations
    fps = edgeiq.FPS()

    # Load any prior instance of the tracker, otherwise spin up a new one
    centroid_tracker = file_manager.load(
        CENTROID_TRACKER,
        edgeiq.CentroidTracker(deregister_frames=TRACKER_DEREGISTER_FRAMES,
                               max_distance=TRACKER_MAX_DISTANCE))
    # Load any prior instance of the metrics data, otherwise start a new one
    metrics = file_manager.load(METRICS_MANAGER,
                                metrics_manager.MetricsManager())

    try:
        if IP_CAMERA_FEED is not None:
            stream_details = edgeiq.IPVideoStream(IP_CAMERA_FEED)
        else:
            stream_details = edgeiq.WebcamVideoStream(cam=0)

        with stream_details as video_stream, \
        edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection and centroid tracker
            while True:
                metrics.newLoop()
                frame = video_stream.read()
                results = obj_detect.detect_objects(
                    frame, confidence_level=DETECT_CONFIDENCE_THRESHOLD)

                # Ignore detections of anything other than people
                filter = edgeiq.filter_predictions_by_label(
                    results.predictions, ['person'])

                # Adding info for streamer display
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("People currently detected:")

                objects = centroid_tracker.update(filter)

                # Store active predictions for just this loop
                predictions = []

                # Store the active object ids for just this loop
                if len(objects.items()) == 0:
                    # No people detected
                    text.append("-- NONE")

                for (object_id, prediction) in objects.items():
                    metrics.addTimeFor(object_id)
                    timeForId = metrics.timeForId(object_id)
                    # Correcting for fact that index 0 is first object in an array
                    idAdjusted = object_id + 1
                    # Display text with bounding box in video
                    new_label = "Person {i} | {t} sec".format(i=idAdjusted,
                                                              t=timeForId)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                # Add metrics to text going to streamer
                m = metrics.currentMetrics()
                text.append("")  # Spacing
                text.append("Total people seen: {}".format(m["count"]))
                text.append("Total time: {} sec".format(m["total"]))
                text.append("Average time: {0:.1f} sec".format(m["avg"]))
                text.append("Longest individual time: {} sec".format(m["max"]))

                # Update output streamer
                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        # TODO: Update to save every few seconds in case a crash occurs
        file_manager.save(metrics, METRICS_MANAGER)
        file_manager.save(centroid_tracker, CENTROID_TRACKER)
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
示例#6
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))
    print("Labels:\n{}\n".format(obj_detect.labels))
    print("Detecting:\n{}\n".format(OBJECTS))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                predictions = edgeiq.filter_predictions_by_label(
                    results.predictions, OBJECTS)
                frame = edgeiq.markup_image(frame,
                                            predictions,
                                            show_confidences=False,
                                            colors=obj_detect.colors)

                # Print date and time on frame
                current_time_date = str(datetime.datetime.now())
                (h, w) = frame.shape[:2]
                cv2.putText(frame, current_time_date, (10, h - 5),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

                # Count objects
                counter = {obj: 0 for obj in OBJECTS}

                for prediction in predictions:
                    # increment the counter of the detected object
                    counter[prediction.label] += 1

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("Object counts:")

                for label, count in counter.items():
                    text.append("{}: {}".format(label, count))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#7
0
    def update(self, image):
        """Performs mask detection and distance calculation, marks up the image
        with appropriate colored boxes, checks for new events and sends alerts, and
        returns a text update and new image for the calling function to use. 

        Args:
            image (numpy array): The image to inference on

        Returns:
            (image, text): Returns the marked up image and text of the application status
        """
        self.covid_event_log = {}
        goodlist, badlist, mask_pred, no_mask_pred = [], [], [], []
        text = []
        self.covid_event_log['people_not_distanced'] = []
        self.covid_event_log['people_distanced'] = []
        self.covid_event_log['no_masks'] = 0
        self.covid_event_log['masks'] = 0
        self.covid_event_log['uncertain_masks'] = 0

        # get predictions as regular
        results = self.detector.detect_objects(image, confidence_level=0.99)
        
        # filter by labels of interest (i.e. 'person')
        people_pred = edgeiq.filter_predictions_by_label(results.predictions, list(self.interest_items.keys()))
        if len(people_pred) > 0:
        
            # send them to the centroid_tracker tracker
            # now we have results in format: {object_id: ObjectDetectionPrediction}
            tracked_people_pred = self.centroid_tracker.update(people_pred)

            # get area update
            keys = self.check_overlap(tracked_people_pred)

            new_predictions = {}
            for object_id, prediction in tracked_people_pred.items():
                if object_id in keys:
                    new_predictions[object_id] = prediction
        
            # map tracked objects to distance detection
            good_dist, bad_dist = self.get_distances(new_predictions)

            goodlist.extend(list(good_dist.values()))
            badlist.extend(list(bad_dist.values()))

            self.covid_event_log['people_not_distanced'] = list(bad_dist.keys())
            self.covid_event_log['people_distanced'] = list(good_dist.keys())
            text.append("{} people not distanced\n".format(len(bad_dist)))

        # map tracked objects to mask_detection
        mask_predictions = self.get_mask_results(people_pred, image)
        #print("mask_predictions {}".format(mask_predictions))

        if len(mask_predictions) > 0: 
            # get people predictions, to detect uncertain masks
            no_mask_pred, mask_pred, uncertain = self.map_mask_predictions(mask_predictions)

            self.covid_event_log['no_masks'] = len(no_mask_pred)
            self.covid_event_log['masks'] = len(mask_pred)
            self.covid_event_log['uncertain_masks'] = len(uncertain)
            no_mask_pred.extend(uncertain)

            text.append("{} people not wearing masks".format(len(no_mask_pred)))
            
        goodlist.extend(mask_pred)
        badlist.extend(no_mask_pred)

        image = edgeiq.markup_image(
                        image, badlist, show_labels=True, line_thickness=2, font_size=2, font_thickness=3, show_confidences=False, colors=[(0,0,255)])
        
        image = edgeiq.markup_image(
                        image, goodlist, show_labels=True, line_thickness=2, font_size=2, font_thickness=3, show_confidences=False, colors=[(12,105,7)])

        # send any relevant results to the server
        self.check_for_events()
        
        return image, text
示例#8
0
def main():
    obj_detect = edgeiq.ObjectDetection(
            "alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # loop detection
            while True:
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                filtered_predictions = edgeiq.filter_predictions_by_label(results.predictions, ['car'])
                
                # puts only the car predictions in its own array
                car_boxes = []
                for i, prediction in enumerate(filtered_predictions):
                      car_boxes.append((i, prediction))
                # print(car_boxes)
                
                # finding the average width of the car
                avg_width = 0
                for prediction in filtered_predictions:
                  # print('label = {}, box = {}, width = {}'.format(prediction.label, prediction.box, prediction.box.width))
                   avg_width += prediction.box.width
                
                # width sorting code
                if (len(filtered_predictions) != 0):
                   avg_width = avg_width / len(filtered_predictions)
                   print(avg_width) 
                   
                    
                   for i, prediction in enumerate(filtered_predictions):
                      
                      min = i
                      
                      for j, prediction in enumerate(filtered_predictions):
                         if(filtered_predictions[j].box.start_x < filtered_predictions[min].box.start_x):
                            min = j   
                            filtered_predictions[i], filtered_predictions[min] = filtered_predictions[min], filtered_predictions[i]
                      filtered_predictions.reverse()
  
                    # for prediction in filtered_predictions:
                      # print('labelsorted = {}, boxsorted = {}, widthsorted = {}'.format(prediction.label, prediction.box, prediction.box.width))  
    
               	# distance calculation code
                spaces = 0
                for i, prediction in enumerate(filtered_predictions):                
                   if(i < (len(filtered_predictions)-1)):
                      d = abs(filtered_predictions[i+1].box.start_x - filtered_predictions[i].box.end_x)
                      c = avg_width / 5
                      n = math.floor(d/(avg_width + c))
                      spaces = spaces + n
                      # print('distance = {}, buffer = {}, number of spaces = {}, total spaces = {}'.format(d,c,n,spaces))
                      print('available parking spaces = {}'.format(spaces))

                frame = edgeiq.markup_image(
                        frame, filtered_predictions, colors=obj_detect.colors)
                
                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")
                # text.append("Distance: ".format(distance))
                

                for prediction in results.predictions:
                    text.append("{}: {:2.2f}%".format(
                        prediction.label, prediction.confidence * 100))

                streamer.send_data(frame, text)

                fps.update()

                if streamer.check_exit():
                    break
                
                
    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))

        print("Program Ending")
示例#9
0
def main():
    global ENABLE_SEND
    global ENABLE_STREAMER
    
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine())
    # obj_detect.load(engine=edgeiq.Engine.DNN)

    print("Loaded model:\n{}\n".format(obj_detect.model_id))
    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Labels:\n{}\n".format(obj_detect.labels))

    fps = edgeiq.FPS()

    try:
        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()
            width = 0
            height = 0

            # loop detection
            while True:
                frame = video_stream.read()
                if width == 0:
                    height, width, _ = frame.shape
                results = obj_detect.detect_objects(frame, confidence_level=.5)
                frame = edgeiq.markup_image(
                        frame, results.predictions, colors=obj_detect.colors)

                # Generate text to display on streamer
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append(
                        "Inference time: {:1.3f} s".format(results.duration))
                text.append("Objects:")

                predictions = edgeiq.filter_predictions_by_label(
                                    results.predictions, ["person"])

                largest_area = 0
                largest_prediction = None
                for prediction in predictions:
                    if prediction.label == "person":
                        if prediction.box.area > largest_area:
                            largest_prediction = prediction
                        text.append("{}: {:2.2f}%: center:{} area:{}".format(
                            prediction.label, prediction.confidence * 100, prediction.box.center, prediction.box.area))
                
                text.append("Frame width:{} height:{}".format(width, height))

                # Send data to server
                if ENABLE_SEND == True:
                    if largest_prediction is not None:
                        a = largest_prediction.box.area
                        print(f'app.py: area: {a}')
                        payload = {"X": largest_prediction.box.center[0], 
                                    "Y":largest_prediction.box.center[1], 
                                    "W": width,
                                    "H": height,
                                    "A": str(a)}
                        post.data(SERVER_URL,payload)

                if ENABLE_STREAMER == True:
                    streamer.send_data(frame, text)
                    fps.update()
                    if streamer.check_exit():
                        break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")
示例#10
0
def start_video_detection_with_streamer(video_stream, od_config,
                                        object_detector, streamer,
                                        centroid_tracker, entry_zones,
                                        exit_zones, did_start_callback,
                                        detection_callback,
                                        did_end_object_callback):
    """
    Start video detection
    """
    labels = od_config.target_labels
    displayed_frame_size = False

    try:
        video_stream.start()
        streamer_enabled = isinstance(streamer, edgeiq.Streamer)
        if streamer_enabled:
            entry_predictions = entry_predictions_from(entry_zones)
            exit_predictions = exit_predictions_from(exit_zones)
            streamer.setup()
        did_start_callback()

        prior_track = {}

        while True:
            frame = video_stream.read()

            # Print out the frame size
            if displayed_frame_size == False:
                height, width, channels = frame.shape
                print(
                    'alwaysai.py: start_video_detection_with_streamer: frame w x h: {} x {}'
                    .format(width, height))
                displayed_frame_size = True

            # Run the object detector
            results = object_detector.detect_objects(
                frame, confidence_level=od_config.confidence)
            predictions = results.predictions

            # Filter by target labels if arg passed in
            if labels:
                predictions = edgeiq.filter_predictions_by_label(
                    predictions, labels)

            # Update the tracker so we can id each instance of an object
            current_track = centroid_tracker.update(predictions)

            if len(predictions) > 0:
                # print('alwaysai.py: start_video_detection_with_streamer: objects detected: {}'.format(current_track.items()))
                detection_callback(current_track.items(), entry_zones,
                                   exit_zones)

            # Find diff in object ids to see if we've stopped tracking anything
            current_keys = current_track.keys()
            prior_keys = prior_track.keys()
            diff_keys = prior_keys - current_keys
            if len(diff_keys) != 0:
                tracked_predictions = prior_track.items()
                # print('alwaysai.py: start_video_detection_with_streamer: diff_keys: {}. prior_tracks: {}'.format(diff_keys, tracked_predictions))
                did_end_object_callback(list(diff_keys))

            prior_track = copy.deepcopy(current_track)

            # Update image and info for debug streamer, if enabled
            if streamer_enabled:
                marked_predictions = []
                for (object_id, prediction) in current_track.items():
                    prediction.label = "Person {}".format(object_id)
                    marked_predictions.append(prediction)
                frame = edgeiq.markup_image(frame,
                                            marked_predictions,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=object_detector.colors)
                frame = edgeiq.markup_image(frame,
                                            entry_predictions,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=[(0, 255, 0)])
                frame = edgeiq.markup_image(frame,
                                            exit_predictions,
                                            show_labels=True,
                                            show_confidences=False,
                                            colors=[(0, 0, 255)])
                frame = edgeiq.transparent_overlay_boxes(frame,
                                                         entry_predictions,
                                                         alpha=0.2,
                                                         colors=[(0, 200, 0)])
                frame = edgeiq.transparent_overlay_boxes(frame,
                                                         exit_predictions,
                                                         alpha=0.2,
                                                         colors=[(0, 0, 200)])
                text = []
                text.append("Model: {}".format(object_detector.model_id))
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                streamer.send_data(frame, text)

            # Check exit conditions
            # File video streams need to check for additional frames before stopping
            more = getattr(video_stream, "more", None)
            if callable(more) and video_stream.more() == False:
                print(
                    'alwaysai.py: start_video_detection_with_streamer: file video stream ended'
                )
                break
            if streamer.check_exit():
                break
    finally:
        video_stream.stop()
        streamer.close()
示例#11
0
    def start(self):
        # This is code from the person-counter app
        # TODO rework this to use a callback, observer-pattern, or generator
        obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
        obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

        print("Engine: {}".format(obj_detect.engine))
        print("Accelerator: {}\n".format(obj_detect.accelerator))
        print("Model:\n{}\n".format(obj_detect.model_id))

        centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                                  max_distance=50)
        fps = edgeiq.FPS()

        # Object to store time info for detected people
        allPeople = {}

        try:

            with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                    edgeiq.Streamer() as streamer:
                # Allow Webcam to warm up
                time.sleep(2.0)
                fps.start()

                # Loop detection and centroid tracker
                while True:
                    frame = video_stream.read()
                    results = obj_detect.detect_objects(frame,
                                                        confidence_level=.5)

                    # Ignore detections of anything other than people
                    filter = edgeiq.filter_predictions_by_label(
                        results.predictions, ['person'])

                    # Adding info for streamer display
                    text = ["Model: {}".format(obj_detect.model_id)]
                    text.append("Inference time: {:1.3f} s".format(
                        results.duration))
                    text.append("People currently detected:")

                    objects = centroid_tracker.update(filter)

                    # Store active predictions for just this loop
                    predictions = []
                    # Store the active object ids for just this loop
                    active_ids = []

                    if len(objects.items()) == 0:
                        # No people detected
                        text.append("-- NONE")

                    for (object_id, prediction) in objects.items():
                        seenTime = traffic_manager.timeSeenFor(
                            object_id, allPeople)
                        # Correct id displayed for start of array at index 0
                        actualPersonNumber = object_id + 1

                        # Display general data on person seen
                        new_label = "-- Person {i} | {t} sec".format(
                            i=actualPersonNumber, t=seenTime)
                        active_ids.append(object_id)
                        prediction.label = new_label
                        text.append(new_label)
                        predictions.append(prediction)

                    # Update output streamer
                    frame = edgeiq.markup_image(frame, predictions)
                    streamer.send_data(frame, text)
                    fps.update()

                    if streamer.check_exit():
                        break

        finally:
            fps.stop()
            print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
            print("approx. FPS: {:.2f}".format(fps.compute_fps()))
            print("Program Ending")
示例#12
0
def main():
    obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd")
    obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO)

    print("Engine: {}".format(obj_detect.engine))
    print("Accelerator: {}\n".format(obj_detect.accelerator))
    print("Model:\n{}\n".format(obj_detect.model_id))

    centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20,
                                              max_distance=50)
    fps = edgeiq.FPS()

    try:

        with edgeiq.WebcamVideoStream(cam=0) as video_stream, \
                edgeiq.Streamer() as streamer:
            # Allow Webcam to warm up
            time.sleep(2.0)
            fps.start()

            # Loop detection and centroid tracker
            while True:
                metrics_manager.newLoop()
                frame = video_stream.read()
                results = obj_detect.detect_objects(frame, confidence_level=.8)

                # Ignore detections of anything other than people
                filter = edgeiq.filter_predictions_by_label(
                    results.predictions, ['person'])

                # Adding info for streamer display
                text = ["Model: {}".format(obj_detect.model_id)]
                text.append("Inference time: {:1.3f} s".format(
                    results.duration))
                text.append("People currently detected:")

                objects = centroid_tracker.update(filter)

                # Store active predictions for just this loop
                predictions = []
                # Store the active object ids for just this loop

                if len(objects.items()) == 0:
                    # No people detected
                    text.append("-- NONE")

                for (object_id, prediction) in objects.items():
                    metrics_manager.addTimeFor(object_id)
                    timeForId = metrics_manager.timeForId(object_id)
                    # Correcting for fact that index 0 is first object in an array
                    idAdjusted = object_id + 1
                    # Display text with bounding box in video
                    new_label = "Person {i} | {t} sec".format(i=idAdjusted,
                                                              t=timeForId)
                    prediction.label = new_label
                    text.append(new_label)
                    predictions.append(prediction)

                # Add metrics to text going to streamer
                metrics = metrics_manager.currentMetrics()
                text.append("")  # Spacing
                text.append("Total people seen: {}".format(metrics["count"]))
                text.append("Total time: {} sec".format(metrics["total"]))
                text.append("Average time: {0:.1f} sec".format(metrics["avg"]))
                text.append("Longest individual time: {} sec".format(
                    metrics["max"]))

                # Update output streamer
                frame = edgeiq.markup_image(frame, predictions)
                streamer.send_data(frame, text)
                fps.update()

                if streamer.check_exit():
                    break

    finally:
        fps.stop()
        print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
        print("approx. FPS: {:.2f}".format(fps.compute_fps()))
        print("Program Ending")