def main(): obj_detect = edgeiq.ObjectDetection( "alwaysai/ssd_mobilenet_v1_coco_2018_01_28") obj_detect.load(engine=edgeiq.Engine.DNN) image_paths = sorted(list(edgeiq.list_images("images/"))) print("Images:\n{}\n".format(image_paths)) with edgeiq.Streamer(queue_depth=len(image_paths), inter_msg_time=4) as streamer: for image_path in image_paths: # Load image from disk image = cv2.imread(image_path) results = obj_detect.detect_objects(image, confidence_level=.5) image = edgeiq.markup_image(image, results.predictions, colors=[(255, 255, 255)]) # Generate text to display on streamer text = ["<b>Model:</b> {}".format(obj_detect.model_id)] text.append("<b>Inference time:</b> {:1.3f} s".format( results.duration)) text.append("<b>Objects:</b>") for prediction in results.predictions: text.append("{}: {:2.2f}%".format(prediction.label, prediction.confidence * 100)) if image_path == 'images/example_08.jpg': text.append("<br><br><b><em>Hello, World!</em></b>") streamer.send_data(image, text) streamer.wait() print("Program Ending")
def perform_object_detection(frame): """Perform object detction on an image, update the table data, and returns a string. Args: frame (numpy array): The frame from the camera stream. Returns: string: The string representation of the image """ frame = edgeiq.resize(frame, width=800, height=300) results = obj_detect.detect_objects(frame, confidence_level=.5) frame = edgeiq.markup_image(frame, results.predictions, colors=obj_detect.colors) frame = cv2.imencode('.jpg', frame)[1].tobytes() # update data for table objects = { 'timestamp': str(round((time.time() - START_TIME), 0)), 'labels': ", ".join([p.label for p in results.predictions]) } global data if data is None: data = pd.DataFrame({k: [v] for k, v in objects.items()}) else: data = data.append(pd.DataFrame({k: [v] for k, v in objects.items()})) data = data.drop_duplicates() return frame
def _run_detection(self): obj_detect = edgeiq.ObjectDetection(self._model_id) obj_detect.load(engine=self._engine) print("Loaded model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) with edgeiq.WebcamVideoStream(cam=self.idx) as video_stream: # Allow Webcam to warm up time.sleep(2.0) self._fps.start() while True: frame = video_stream.read() if self._stop_event.is_set(): break results = obj_detect.detect_objects(frame, confidence_level=.5) frame = edgeiq.markup_image(frame, results.predictions, colors=obj_detect.colors) output_results = { "idx": self.idx, "frame": frame, "results": results, "model_id": obj_detect.model_id } self._results_q.put(output_results) self._fps.update()
def main(): #ardunio = serial.Serial('COM1', 115200, timeout = 1) GPIO.setmode(GPIO.BCM) buzzer = 23 GPIO.setup(buzzer, GPIO.OUT) obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd") obj_detect.load(engine=edgeiq.Engine.DNN_OPENVINO) print("Loaded model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = video_stream.read() results = obj_detect.detect_objects(frame, confidence_level=.5) predictions = edgeiq.filter_predictions_by_label( results.predictions, ["bottle"]) frame = edgeiq.markup_image(frame, results.predictions, show_confidences=False, colors=obj_detect.colors) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("Objects:") for prediction in predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) GPIO.output(buzzer, GPIO.HIGH) GPIO.input(buzzer) sleep(0.5) GPIO.output(buzzer, GPIO.LOW) GPIO.input(buzzer) sleep(0.5) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): label_defs = {} obj_detect = edgeiq.ObjectDetection( "alwaysai/mobilenet_ssd") obj_detect.load(engine=edgeiq.Engine.DNN) print("Loaded model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=1) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = video_stream.read() results = obj_detect.detect_objects(frame, confidence_level=.5) frame = edgeiq.markup_image( frame, labelToString(label_defs, results.predictions), show_labels = False, show_confidences = False, colors=obj_detect.colors, line_thickness = 0) frame = addNotes(frame, results.predictions) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append( "Inference time: {:1.3f} s".format(results.duration)) text.append("Objects:") text.append("fps:{:2.2f}".format(fps.compute_fps())) for prediction in results.predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(camera, use_streamer, server_addr): obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd") obj_detect.load(engine=edgeiq.Engine.DNN) print("Loaded model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) fps = edgeiq.FPS() try: streamer = None if use_streamer: streamer = edgeiq.Streamer().setup() else: streamer = CVClient(server_addr).setup() with edgeiq.WebcamVideoStream(cam=camera) as video_stream: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = video_stream.read() results = obj_detect.detect_objects(frame, confidence_level=.5) frame = edgeiq.markup_image(frame, results.predictions, colors=obj_detect.colors) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("Objects:") for prediction in results.predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: if streamer is not None: streamer.close() fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): obj_detect = edgeiq.ObjectDetection( "alwaysai/res10_300x300_ssd_iter_140000") obj_detect.load(engine=edgeiq.Engine.DNN) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Model:\n{}\n".format(obj_detect.model_id)) centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20, max_distance=50) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection and centroid tracker while True: frame = video_stream.read() frame = edgeiq.resize(frame, width=400) results = obj_detect.detect_objects(frame, confidence_level=.5) text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("Objects:") objects = centroid_tracker.update(results.predictions) # Update the label to reflect the object ID predictions = [] for (object_id, prediction) in objects.items(): new_label = 'face {}'.format(object_id) prediction.label = new_label text.append(new_label) predictions.append(prediction) frame = edgeiq.markup_image(frame, predictions) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): # Set up object detection API obj_detect = edgeiq.ObjectDetection( "alwaysai/mobilenet_ssd") obj_detect.load(engine=edgeiq.Engine.DNN) # Set up rpi ports GPIO.setmode(GPIO.BOARD) GPIO.setup(LEFT_PORT, GPIO.OUT) GPIO.setup(RIGHT_PORT, GPIO.OUT) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = video_stream.read() results = obj_detect.detect_objects(frame, confidence_level=.8) # Maybe filter the result to bottles or bags for demo? image_Centering(results.predictions) # Debug information if(debug_On): frame = edgeiq.markup_image( frame, results.predictions, colors=obj_detect.colors) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append( "Inference time: {:1.3f} s".format(results.duration)) text.append("Objects:") for prediction in results.predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) streamer.send_data(frame, text) fps.update() time.sleep(FRAME_A_RATE) if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps()))
def main(): facial_detector = edgeiq.ObjectDetection( "alwaysai/res10_300x300_ssd_iter_140000") facial_detector.load(engine=edgeiq.Engine.DNN) print("Engine: {}".format(facial_detector.engine)) print("Accelerator: {}\n".format(facial_detector.accelerator)) print("Model:\n{}\n".format(facial_detector.model_id)) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as webcam, \ edgeiq.Streamer() as streamer: # Allow webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = webcam.read() # detect human faces results = facial_detector.detect_objects(frame, confidence_level=.5) frame = edgeiq.markup_image(frame, results.predictions, colors=facial_detector.colors) # Generate text to display on streamer text = ["Model: {}".format(facial_detector.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("Faces:") for prediction in results.predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: # stop fps counter and display information fps.stop() print("[INFO] elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("[INFO] approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): """Run csi video stream and object detector.""" obj_detect = edgeiq.ObjectDetection( "alwaysai/ssd_mobilenet_v1_coco_2018_01_28") obj_detect.load(engine=edgeiq.Engine.DNN_CUDA) print("Loaded model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) try: with enhanced_csi.JetsonVideoStream(cam=0, rotation=enhanced_csi. FrameRotation.ROTATE_180, camera_mode=enhanced_csi. JetsonCameraMode. IMX477_4032x3040_30_0, display_width=640, display_height=480) as video_stream,\ edgeiq.Streamer() as streamer: time.sleep(2.0) video_stream.start_counting_fps() # loop detection while True: frame = enhanced_csi.read_camera(video_stream, True) results = obj_detect.detect_objects(frame, confidence_level=.4) frame = edgeiq.markup_image(frame, results.predictions, colors=obj_detect.colors) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("Objects:") for prediction in results.predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) video_stream.frames_displayed += 1 streamer.send_data(frame, text) if streamer.check_exit(): break video_stream.release_fps_stats() finally: print("Program Ending")
def main(): obj_detect = edgeiq.ObjectDetection( "alwaysai/ssd_mobilenet_v2_coco_2018_03_29") obj_detect.load(engine=edgeiq.Engine.DNN) print("Loaded model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) try: with edgeiq.RealSense() as video_stream, \ edgeiq.Streamer() as streamer: print("starting RealSense camera") time.sleep(2.0) # loop detection while True: distances = [] depth_image, color_image = video_stream.read() roi = video_stream.roi(depth_image, color_image, min=None, max=0.9) # frame = edgeiq.resize(color_image, width=416) results = obj_detect.detect_objects(roi, confidence_level=.6) roi = edgeiq.markup_image( roi, results.predictions, colors=obj_detect.colors) for prediction in results.predictions: distances.append(video_stream.compute_object_distance(prediction.box,depth_image)) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append( "Inference time: {:1.3f} s".format(results.duration)) text.append("Objects:") for i, prediction in enumerate(results.predictions): text.append("{}: {:2.1f}% Distance = {:2.2f}m".format( prediction.label, prediction.confidence * 100, distances[i])) streamer.send_data(roi, text) if streamer.check_exit(): break finally: print("Program Ending")
def main(): obj_det = edgeiq.ObjectDetection("tester2204/CE-Recog") if edgeiq.is_jetson(): obj_det.load(engine=edgeiq.Engine.DNN_CUDA) print("Nvidia Jetson Detected\n") else: obj_det.load(engine=edgeiq.Engine.DNN) print("Device is not a Nvidia Jetson Board\n") print("Initializing Application...\n") print("Model:\n{}\n".format(obj_det.model_id)) print("Engine:\n{}\n".format(obj_det.engine)) print("Labels:\n{}\n".format(obj_det.labels)) #imgURL = "https://specials-images.forbesimg.com/imageserve/5e88b867e2bb040006427704/0x0.jpg" #urllib.request.urlretrieve(imgURL, "this.jpg") #Change based on OS and User #image = "Images/this.jpg" image_lists = sorted(list(edgeiq.list_images("Images/"))) with edgeiq.Streamer(queue_depth=len(image_lists), inter_msg_time=7) as streamer: i = 0 while i < 3: for image_list in image_lists: show_image = cv2.imread(image_list) image = show_image.copy() results = obj_det.detect_objects(image, confidence_level=.5) image = edgeiq.markup_image(image, results.predictions, colors=obj_det.colors) shown = ["Model: {}".format(obj_det.model_id)] shown.append("Inference time: {:1.3f} s".format( results.duration)) shown.append("Objects:") for prediction in results.predictions: shown.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) streamer.send_data(image, shown) streamer.wait() i = i + 1 #if streamer.check_exit(): print("That's it folks!") print("Thanks for using Ben's Object Recognition Model & Software") print("Sponsored by: Darien's Face")
def _runDetectionModel(model, frame): results = model.detect_objects(frame, confidence_level=.5) frame = edgeiq.markup_image(frame, results.predictions, colors=model.colors) # Generate text to display on streamer text = ["Model: {}".format(model.model_id)] text.append("Inference time: {:1.3f} s".format(results.duration)) text.append("Objects:") for prediction in results.predictions: text.append("{}: {:2.2f}%".format(prediction.label, prediction.confidence * 100)) return frame, text
def end_tracking_loop(components, predictions, text): # print("alwaysai_helper.py: end_tracking_loop") fps = components[FPS] streamer = components[STREAMER] video_stream = components[VIDEO_STREAM] frame = components[CURRENT_FRAME] if fps is None: raise Exception( "alwaysai_helper.py: end_tracking_loop: fps missing from components" ) if video_stream is None: raise Exception( "alwaysai_helper.py: end_tracking_loop: video_stream missing from components" ) frame = edgeiq.markup_image(frame, predictions) if streamer is not None: streamer.send_data(frame, text) fps.update()
def main(): """Run object detector and centroid tracker.""" tracker = edgeiq.CorrelationTracker( max_objects=5, enter_cb=face_enters, exit_cb=face_exits) fps = edgeiq.FPS() try: with edgeiq.oak.Oak('alwaysai/face_detection_0200_oak', sensor=edgeiq.Sensor.res_1080, video_mode=edgeiq.VideoMode.preview) as oak_camera, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection and tracking while True: frame = oak_camera.get_frame() results = oak_camera.get_model_result(confidence_level=.6) if results: fps.update() text = ["Faces Detected:"] objects = tracker.update(results.predictions, frame) # Update the label to reflect the object ID predictions = [] for (object_id, prediction) in objects.items(): prediction.label = "face {}".format(object_id) text.append("{}".format(prediction.label)) predictions.append(prediction) text.append(("approx. FPS: {:.2f}". format(fps.compute_fps()))) frame = edgeiq.markup_image(frame, predictions) streamer.send_data(frame, text) if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("Program Ending")
def main(): fps = edgeiq.FPS() try: streamer = edgeiq.Streamer() streamer.setup() video_stream = edgeiq.WebcamVideoStream( cam=0) # replace with FileVideoStream if need be # Allow application to warm up video_stream.start() time.sleep(2.0) fps.start() text = [""] # initialize Vaccine Trakcer vaccine_tracker = VaccineTracker() # loop detection while True: frame = video_stream.read() vaccine_tracker.update(frame) # draw the vaccination box in the frame frame = edgeiq.markup_image(frame, [ edgeiq.ObjectDetectionPrediction(label="vaccination", index=0, box=vaccine_tracker.box, confidence=100.00) ]) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() streamer.close() video_stream.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): """Run object detector.""" fps = edgeiq.FPS() try: with edgeiq.Oak('alwaysai/ssd_v2_coco_oak') as oak_camera,\ edgeiq.Streamer() as streamer: # Allow Oak camera to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = oak_camera.get_frame() results = oak_camera.get_model_result(confidence_level=.8) if results: fps.update() text = ["Oak Camera Detections:"] text.append("approx. FPS: {:.2f}".format( fps.compute_fps())) text.append("Objects:") for prediction in results.predictions: center = tuple( int(round(val)) for val in prediction.box.center) b, g, r = frame[center[1], center[0]] cname = getColorName(r, g, b) text.append("{}: {:2.2f}% color = {}".format( prediction.label, prediction.confidence * 100, cname)) # Mark up image for display frame = edgeiq.markup_image(frame, results.predictions) streamer.send_data(frame, text) if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("Program Ending")
def detection_base(model, confidence, image_array): detector = edgeiq.ObjectDetection( model) # model example: "alwaysai/res10_300x300_ssd_iter_140000" detector.load(engine=edgeiq.Engine.DNN) centroid_tracker = edgeiq.CentroidTracker(deregister_frames=100, max_distance=50) results = detector.detect_objects(image_array, confidence_level=confidence) objects = centroid_tracker.update(results.predictions) predictions = [] for (object_id, prediction) in objects.items(): prediction.label = "{}: {}".format(prediction.label, object_id) predictions.append(prediction) image = edgeiq.markup_image(image_array, predictions) return image, results, None
def handle(self): ''' Method to be called by the socketserver.BaseServer. ''' data = self.read() image = self.unpack_img(data) results = self.server.inferencer.detect_objects(image, confidence_level=.5) final = edgeiq.markup_image(image, results.predictions, colors=self.server.inferencer.colors) image = self.pack_img(final) if image is not None: self.send(image)
def object_detection(): """ Oak.get_model_result can return different results based on the purpose of the model running on the camera. This function shows how to work with object detection models. """ fps = edgeiq.FPS() with edgeiq.Oak('alwaysai/mobilenet_ssd_oak') as camera, edgeiq.Streamer( ) as streamer: fps.start() while True: text = ['FPS: {:2.2f}'.format(fps.compute_fps())] frame = camera.get_frame() result = camera.get_model_result(confidence_level=.75) # Check for inferencing results. Oak.get_model_result is a non-blocking call and will return None when new data is not available. if result: frame = edgeiq.markup_image(frame, result.predictions) text.append("Objects:") for prediction in result.predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) streamer.send_data(frame, text) if streamer.check_exit(): break fps.update() print('fps = {}'.format(fps.compute_fps()))
def main(): obj_detect = edgeiq.ObjectDetection( "alwaysai/ssd_mobilenet_v1_coco_2018_01_28") obj_detect.load(engine=edgeiq.Engine.DNN) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Model:\n{}\n".format(obj_detect.model_id)) print("Labels:\n{}\n".format(obj_detect.labels)) image_paths = sorted(list(edgeiq.list_images("images/"))) print("Images:\n{}\n".format(image_paths)) with edgeiq.Streamer(queue_depth=len(image_paths), inter_msg_time=3) as streamer: for image_path in image_paths: # Load image from disk image = cv2.imread(image_path) results = obj_detect.detect_objects(image, confidence_level=.5) image = edgeiq.markup_image(image, results.predictions, colors=obj_detect.colors) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format(results.duration)) text.append("Objects:") for prediction in results.predictions: text.append("{}: {:2.2f}%".format(prediction.label, prediction.confidence * 100)) streamer.send_data(image, text) streamer.wait() print("Program Ending")
def main(): # if you would like to test an additional model, add one to the list below: models = ["alwaysai/mobilenet_ssd", "alwaysai/ssd_inception_v2_coco_2018_01_28"] # if you've added a model, add a new color in as a list of tuples in BGR format # to make visualization easier (e.g. [(B, G, R)]). colors = [[(66, 68, 179)], [(50, 227, 62)]] detectors = [] # load all the models (creates a new object detector for each model) for model in models: # start up a first object detection model obj_detect = edgeiq.ObjectDetection(model) obj_detect.load(engine=edgeiq.Engine.DNN) # track the generated object detection items by storing them in detectors detectors.append(obj_detect) # print the details of each model to the console print("Model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = video_stream.read() text = [""] # gather data from the all the detectors for i in range(0, len(detectors)): results = detectors[i].detect_objects( frame, confidence_level=.5) object_frame = edgeiq.markup_image( frame, results.predictions, show_labels=False, colors=colors[i]) # for the first frame, overwrite the input feed if i == 0: display_frame = object_frame else: # otherwise, append the new marked-up frame to the previous one display_frame = numpy.concatenate((object_frame, display_frame)) # append each prediction for prediction in results.predictions: text.append( "Model {} detects {}: {:2.2f}%".format(detectors[i].model_id, prediction.label, prediction.confidence * 100)) # send the image frame and the predictions for both # prediction models to the output stream streamer.send_data(display_frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): obj_detect = edgeiq.ObjectDetection( "alwaysai/ssd_mobilenet_v1_coco_2018_01_28") obj_detect.load(engine=edgeiq.Engine.DNN) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Model:\n{}\n".format(obj_detect.model_id)) print("Labels:\n{}\n".format(obj_detect.labels)) image_paths = sorted(list(edgeiq.list_images("images/"))) print("Images:\n{}\n".format(image_paths)) with edgeiq.Streamer(queue_depth=len(image_paths), inter_msg_time=3) as streamer: for image_path in image_paths: # Load image from disk image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) r, g, b = cv2.split(image) fig = plt.figure() axis = fig.add_subplot(1, 1, 1, projection="3d") pixel_colors = image.reshape( (np.shape(image)[0] * np.shape(image)[1], 3)) norm = colors.Normalize(vmin=-1., vmax=1.) norm.autoscale(pixel_colors) pixel_colors = norm(pixel_colors).tolist() axis.scatter(r.flatten(), g.flatten(), b.flatten(), facecolors=pixel_colors, marker=".") axis.set_xlabel("Red") axis.set_ylabel("Green") axis.set_zlabel("Blue") plt.show() #convert from rgb to hsv and pick out 2 shades hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) hsv_drot = (18, 24, 61) hsv_lrot = (13, 203, 55) #build the color mask mask = cv2.inRange(hsv_image, hsv_lrot, hsv_drot) res = cv2.bitwise_and(image, image, mask=mask) plt.subplot(1, 2, 1) plt.imshow(mask, cmap="gray") plt.subplot(1, 2, 2) plt.imshow(res) plt.show() #2nd layer mask, did not display hsv_olive = (34, 32, 120) hsv_dolive = (37, 240, 27) mask_ol = cv2.inRange(hsv_image, hsv_olive, hsv_dolive) res_w = cv2.bitwise_and(image, image, mask=mask_ol) plt.subplot(1, 2, 1) plt.imshow(mask_ol, cmap="gray") plt.subplot(1, 2, 2) plt.imshow(res_w) plt.show() #final mask final_mask = mask + mask_ol final_result = cv2.bitwise_and(image, image, mask=final_mask) plt.subplot(1, 2, 1) plt.imshow(final_mask, cmap="gray") plt.subplot(1, 2, 2) plt.imshow(final_result) plt.show() #testing .shape and typecast image print("The type of this input is {}".format(type(image))) print("Shape: {}".format(image.shape)) #piee ##text.append(get_colors(get_image(image_path), 4, True)) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format(results.duration)) #need to convert from bgr to rgb swapped_colors = swap(obj_detect.colors) text.append("Colors printed!") # text.append(swapped_colors) print(swapped_colors) # print(obj_detect.colors) # converted = np.array([np.array(rgb) for rgb in swapped_colors]) // numpy arrays with lists (like numpy contained within itself, list of lists) # print(converted.shape) results = obj_detect.detect_objects(image, confidence_level=.5) image = edgeiq.markup_image(image, results.predictions, colors=obj_detect.colors) # print(rgb2hex(swapped_colors)) # print(converted) # iterate through tuple list and convert # for x in obj_detect.colors: # text.append(rgb2hex(swapped_colors)) # text.append(format(x)) text.append("Objects:") for prediction in results.predictions: text.append("{}: {:2.2f}%".format(prediction.label, prediction.confidence * 100)) streamer.send_data(image, text) streamer.wait() print("Program Ending")
def main(): obj_detect = edgeiq.ObjectDetection( "alwaysai/mobilenet_ssd") obj_detect.load(engine=edgeiq.Engine.DNN) tracker = edgeiq.CentroidTracker(deregister_frames=30) print("Loaded model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() prev_tracked_people = {} logs = [] currentPeople = 0 # loop detection while True: frame = video_stream.read() results = obj_detect.detect_objects(frame, confidence_level=.5) people = edgeiq.filter_predictions_by_label(results.predictions, ['person']) tracked_people = tracker.update(people) people = [] for (object_id, prediction) in tracked_people.items(): new_label = 'Person {}'.format(object_id) prediction.label = new_label people.append(prediction) frame = edgeiq.markup_image( frame, people, colors=obj_detect.colors) new_entries = set(tracked_people) - set(prev_tracked_people) for entry in new_entries: save_snapshot(frame, entry) logs.append('Person {} entered'.format(entry)) currentPeople += 1 new_exits = set(prev_tracked_people) - set(tracked_people) for exit in new_exits: logs.append('Person {} exited'.format(exit)) currentPeople -= 1 prev_tracked_people = dict(tracked_people) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append( "Inference time: {:1.3f} s".format(results.duration)) text.append("Objects:") for prediction in people: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) text.append('Current Occupancy:') text += str(currentPeople) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): # The current frame index frame_idx = 0 # The number of frames to skip before running detector detect_period = 50 # if you would like to test an additional model, add one to the list below: models = [ "alwaysai/ssd_mobilenet_v2_oidv4", "alwaysai/ssd_inception_v2_coco_2018_01_28" ] # include any labels that you wish to detect from any models (listed above in 'models') here in this list detected_contraband = [ "Pen", "cell phone", "backpack", "book", "Book", "Ring binder", "Headphones", "Calculator", "Mobile phone", "Telephone", "Microphone", "Ipod", "Remote control" ] # load all the models (creates a new object detector for each model) detectors = [] for model in models: # start up a first object detection model obj_detect = edgeiq.ObjectDetection(model) obj_detect.load(engine=edgeiq.Engine.DNN) # track the generated object detection items by storing them in detectors detectors.append(obj_detect) # print the details of each model to the console print("Model:\n{}\n".format(obj_detect.model_id)) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Labels:\n{}\n".format(obj_detect.labels)) tracker = edgeiq.CorrelationTracker(max_objects=5) fps = edgeiq.FPS() contraband_summary = ContrabandSummary() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = video_stream.read() predictions_to_markup = [] text = [""] # only analyze every 'detect_period' frame (i.e. every 50th in original code) if frame_idx % detect_period == 0: # gather data from the all the detectors for i in range(0, len(detectors)): results = detectors[i].detect_objects( frame, confidence_level=.2) # Stop tracking old objects if tracker.count: tracker.stop_all() # append each prediction predictions = results.predictions for prediction in predictions: if (prediction.label.strip() in detected_contraband): contraband_summary.contraband_alert( prediction.label, frame) predictions_to_markup.append(prediction) tracker.start(frame, prediction) else: # if there are objects being tracked, update the tracker with the new frame if tracker.count: # get the new predictions for the objects being tracked, used to markup the frame predictions_to_markup = tracker.update(frame) # mark up the frame with the predictions for the contraband objects frame = edgeiq.markup_image(frame, predictions_to_markup, show_labels=True, show_confidences=False, colors=obj_detect.colors) # send the collection of contraband detection points (string and video frame) to the streamer text = contraband_summary.get_contraband_string() streamer.send_data(frame, text) frame_idx += 1 fps.update() if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): # Spin up the object detector obj_detect = edgeiq.ObjectDetection("alwaysai/" + OBJECT_DETECTION_MODEL) obj_detect.load(engine=edgeiq.Engine.DNN_CUDA, accelerator=edgeiq.Accelerator.NVIDIA) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Model:\n{}\n".format(obj_detect.model_id)) print("Labels:\n{}\n".format(obj_detect.labels)) # Prepare to track frames per second calculations fps = edgeiq.FPS() # Load any prior instance of the tracker, otherwise spin up a new one centroid_tracker = file_manager.load( CENTROID_TRACKER, edgeiq.CentroidTracker(deregister_frames=TRACKER_DEREGISTER_FRAMES, max_distance=TRACKER_MAX_DISTANCE)) # Load any prior instance of the metrics data, otherwise start a new one metrics = file_manager.load(METRICS_MANAGER, metrics_manager.MetricsManager()) try: if IP_CAMERA_FEED is not None: stream_details = edgeiq.IPVideoStream(IP_CAMERA_FEED) else: stream_details = edgeiq.WebcamVideoStream(cam=0) with stream_details as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # Loop detection and centroid tracker while True: metrics.newLoop() frame = video_stream.read() results = obj_detect.detect_objects( frame, confidence_level=DETECT_CONFIDENCE_THRESHOLD) # Ignore detections of anything other than people filter = edgeiq.filter_predictions_by_label( results.predictions, ['person']) # Adding info for streamer display text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("People currently detected:") objects = centroid_tracker.update(filter) # Store active predictions for just this loop predictions = [] # Store the active object ids for just this loop if len(objects.items()) == 0: # No people detected text.append("-- NONE") for (object_id, prediction) in objects.items(): metrics.addTimeFor(object_id) timeForId = metrics.timeForId(object_id) # Correcting for fact that index 0 is first object in an array idAdjusted = object_id + 1 # Display text with bounding box in video new_label = "Person {i} | {t} sec".format(i=idAdjusted, t=timeForId) prediction.label = new_label text.append(new_label) predictions.append(prediction) # Add metrics to text going to streamer m = metrics.currentMetrics() text.append("") # Spacing text.append("Total people seen: {}".format(m["count"])) text.append("Total time: {} sec".format(m["total"])) text.append("Average time: {0:.1f} sec".format(m["avg"])) text.append("Longest individual time: {} sec".format(m["max"])) # Update output streamer frame = edgeiq.markup_image(frame, predictions) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() # TODO: Update to save every few seconds in case a crash occurs file_manager.save(metrics, METRICS_MANAGER) file_manager.save(centroid_tracker, CENTROID_TRACKER) print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): # The current frame index frame_idx = 0 # The number of frames to skip before running detector detect_period = 30 obj_detect = edgeiq.ObjectDetection( "alwaysai/ssd_mobilenet_v1_coco_2018_01_28") obj_detect.load(engine=edgeiq.Engine.DNN) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Model:\n{}\n".format(obj_detect.model_id)) print("Labels:\n{}\n".format(obj_detect.labels)) tracker = edgeiq.CorrelationTracker(max_objects=5) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() while True: frame = video_stream.read() predictions = [] if frame_idx % detect_period == 0: results = obj_detect.detect_objects(frame, confidence_level=.5) # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("Objects:") # Stop tracking old objects if tracker.count: tracker.stop_all() predictions = results.predictions for prediction in predictions: text.append("{}: {:2.2f}%".format( prediction.label, prediction.confidence * 100)) tracker.start(frame, prediction) else: if tracker.count: predictions = tracker.update(frame) frame = edgeiq.markup_image(frame, predictions, show_labels=True, show_confidences=False, colors=obj_detect.colors) streamer.send_data(frame, text) frame_idx += 1 fps.update() if streamer.check_exit(): break finally: tracker.stop_all() fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def updateStream(frame, streamer, fps, predictions, text): frame = edgeiq.markup_image(frame, predictions) streamer.send_data(frame, text) fps.update()
def main(): obj_detect = edgeiq.ObjectDetection("alwaysai/mobilenet_ssd") obj_detect.load(engine=edgeiq.Engine.DNN) print("Engine: {}".format(obj_detect.engine)) print("Accelerator: {}\n".format(obj_detect.accelerator)) print("Model:\n{}\n".format(obj_detect.model_id)) print("Labels:\n{}\n".format(obj_detect.labels)) print("Detecting:\n{}\n".format(OBJECTS)) fps = edgeiq.FPS() try: with edgeiq.WebcamVideoStream(cam=0) as video_stream, \ edgeiq.Streamer() as streamer: # Allow Webcam to warm up time.sleep(2.0) fps.start() # loop detection while True: frame = video_stream.read() results = obj_detect.detect_objects(frame, confidence_level=.5) predictions = edgeiq.filter_predictions_by_label( results.predictions, OBJECTS) frame = edgeiq.markup_image(frame, predictions, show_confidences=False, colors=obj_detect.colors) # Print date and time on frame current_time_date = str(datetime.datetime.now()) (h, w) = frame.shape[:2] cv2.putText(frame, current_time_date, (10, h - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # Count objects counter = {obj: 0 for obj in OBJECTS} for prediction in predictions: # increment the counter of the detected object counter[prediction.label] += 1 # Generate text to display on streamer text = ["Model: {}".format(obj_detect.model_id)] text.append("Inference time: {:1.3f} s".format( results.duration)) text.append("Object counts:") for label, count in counter.items(): text.append("{}: {}".format(label, count)) streamer.send_data(frame, text) fps.update() if streamer.check_exit(): break finally: fps.stop() print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds())) print("approx. FPS: {:.2f}".format(fps.compute_fps())) print("Program Ending")
def main(): fps = edgeiq.FPS() # Change parameter to alwaysai/human_pose_eyecloud to run the human pose model. with edgeiq.EyeCloud('alwaysai/mobilenet_ssd_eyecloud' ) as camera, edgeiq.Streamer() as streamer: fps.start() centroid_tracker = edgeiq.CentroidTracker(deregister_frames=20, max_distance=100) number=0 while True: text = ["FPS:{}".format(fps.compute_fps())] frame = camera.get_frame() #print('image sequence = {}'.format(frame.sequence_index)) result = camera.get_model_result(confidence_level=0.9) # Check for inferencing results. if result: #print('model sequence = {}'.format(result.sequence_index)) text.append("Model: {}".format(camera.model_id)) if camera.model_purpose == 'PoseEstimation': frame = result.draw_poses(frame) text.append("Inference time: {:1.3f} s".format(result.duration)) for ind, pose in enumerate(result.poses): text.append("Person {}".format(ind)) text.append('-' * 10) text.append("Key Points:") for key_point in pose.key_points: text.append(str(key_point)) elif camera.model_purpose == 'ObjectDetection': #移除交并比过大的检测框 if(len(result.predictions)>1): for i,prediction in enumerate(result.predictions): for j in range((len(result.predictions)-1),i,-1): #print("i:%s j:%s"%(i,j)) IOU=prediction.box.compute_overlap(result.predictions[j].box) print(IOU) if(IOU>0.3): #print("距离过近") result.predictions.pop() #划定FOV有效检测区 for i,prediction in enumerate(result.predictions): if(prediction.box.center[0]<500 or prediction.box.center[0]>1300): result.predictions.pop(i) #print("x:%s y:%s"%(prediction.box.center[0],prediction.box.center[1])) objects = centroid_tracker.update(result.predictions) text.append("Inference time: {:1.3f} s".format(result.duration)) text.append("Objects:") for (object_id, prediction) in objects.items(): new_label = 'person {}'.format(object_id) if(object_id+1>number): number=object_id+1 prediction.label = new_label text.append("{}: {:2.2f}%".format(prediction.label, prediction.confidence * 100)) result.predictions.append(prediction) frame = edgeiq.markup_image(frame, result.predictions) text.append("people pass: {}".format(number)) elif camera.model_purpose == 'Classification': if len(result.predictions) > 0: top_prediction = result.predictions[0] text = "Classification: {}, {:.2f}%".format( top_prediction.label, top_prediction.confidence * 100) else: text = None cv2.putText(frame, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2) cv2.rectangle(frame,(500,0),(1300,1080),(255,0,0), 2) streamer.send_data(frame, text) if streamer.check_exit(): break fps.update() print('fps = {}'.format(fps.compute_fps()))