def run(self): """ Runs the module. """ global capture # Allows time for socket server to start self.Helpers.logger.info( "Realsense D415 Streamer waiting 2 seconds for CamRead socket server." ) time.sleep(2) self.Helpers.logger.info("Realsense D415 Streamer continuing.") # Starts the socket module self.Socket = Socket("Realsense D415 Streamer") # Subscribes to the socket server capture = self.Socket.subscribe( self.Helpers.confs["EMAR"]["ip"], self.Helpers.confs["Realsense"]["socket"]["port"]) try: # Starts web server server = ThreadedHTTPServer( (self.Helpers.confs["EMAR"]["ip"], self.Helpers.confs["Realsense"]["server"]["port"]), CamHandler) self.Helpers.logger.info( "Realsense D415 Streamer server started on http://" + self.Helpers.confs["EMAR"]["ip"] + ":" + str(self.Helpers.confs["Realsense"]["server"]["port"])) server.serve_forever() except KeyboardInterrupt: # Closes socket server capture.close() exit()
def __init__(self): """ Initializes the class. """ super(RealsenseRead, self).__init__() self.Helpers = Helpers("Realsense D415 Reader") self.colorizer = rs.colorizer() # OpenCV fonts self.font = cv2.FONT_HERSHEY_SIMPLEX self.black = (0, 0, 0) self.green = (0, 255, 0) self.white = (255, 255, 255) # Starts the Realsense module self.Realsense = Realsense() # Connects to the Realsense camera self.profile = self.Realsense.connect() # Starts the socket module self.Socket = Socket("Realsense D415 Reader") # Sets up the object detection model self.Model = Model() self.Helpers.logger.info( "Realsense D415 Reader Class initialization complete.")
def run(self): """ Runs the module. """ fps = "" framecount = 0 time1 = 0 time2 = 0 self.font = cv2.FONT_HERSHEY_SIMPLEX self.color = (0, 0, 0) # Starts the TASS module self.TASS = TASS() # Connects to the camera self.TASS.connect() # Starts the socket module self.Socket = Socket("CamRead") # Starts the socket server soc = self.Socket.connect(self.Helpers.confs["EMAR"]["ip"], self.Helpers.confs["TASS"]["socket"]["port"]) while True: try: t1 = time.perf_counter() # Reads the current frame _, frame = self.TASS.lcv.read() width = frame.shape[1] # Writes header to frame cv2.putText(frame, "EMAR Mini Live Stream", (30, 50), self.font, 0.7, self.color, 2, cv2.LINE_AA) # Writes date to frame cv2.putText(frame, str(datetime.now()), (30, 80), self.font, 0.5, self.color, 2, cv2.LINE_AA) cv2.putText(frame, fps, (width - 170, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.color, 1, cv2.LINE_AA) # Streams the frame to the socket server encoded, buffer = cv2.imencode('.jpg', frame) soc.send(base64.b64encode(buffer)) # FPS calculation framecount += 1 if framecount >= 15: fps = "Stream: {:.1f} FPS".format(time1 / 15) framecount = 0 time1 = 0 time2 = 0 t2 = time.perf_counter() elapsedTime = t2 - t1 time1 += 1 / elapsedTime time2 += elapsedTime except KeyboardInterrupt: self.TASS.lcv.release() break
def __init__(self): """ Initializes the class. """ self.Helpers = Helpers("Camera") # Initiates the iotJumpWay connection class self.iot = iot() self.iot.connect() self.Sockets = Socket() self.Helpers.logger.info("Camera Class initialization complete.")
class RealsenseStream(Thread): """ Realsense D415 Streamer Class Realsense D415 streamer functions for EMAR Mini Emergency Assistance Robot. """ def __init__(self): """ Initializes the class. """ super(RealsenseStream, self).__init__() self.Helpers = Helpers("Realsense D415 Streamer") self.Helpers.logger.info( "Realsense D415 Streamer Helper Class initialization complete.") def run(self): """ Runs the module. """ global capture # Allows time for socket server to start self.Helpers.logger.info( "Realsense D415 Streamer waiting 2 seconds for CamRead socket server." ) time.sleep(2) self.Helpers.logger.info("Realsense D415 Streamer continuing.") # Starts the socket module self.Socket = Socket("Realsense D415 Streamer") # Subscribes to the socket server capture = self.Socket.subscribe( self.Helpers.confs["EMAR"]["ip"], self.Helpers.confs["Realsense"]["socket"]["port"]) try: # Starts web server server = ThreadedHTTPServer( (self.Helpers.confs["EMAR"]["ip"], self.Helpers.confs["Realsense"]["server"]["port"]), CamHandler) self.Helpers.logger.info( "Realsense D415 Streamer server started on http://" + self.Helpers.confs["EMAR"]["ip"] + ":" + str(self.Helpers.confs["Realsense"]["server"]["port"])) server.serve_forever() except KeyboardInterrupt: # Closes socket server capture.close() exit()
class CamStream(Thread): """ CamStream Class The CamStream Class streams the processed frame to a local server. """ def __init__(self): """ Initializes the class. """ super(CamStream, self).__init__() self.Helpers = Helpers("CamStream") self.Helpers.logger.info( "CamStream Helper Class initialization complete.") def run(self): """ Runs the module. """ global capture # Allows time for socket server to start self.Helpers.logger.info( "CamStream waiting 2 seconds for CamRead socket server.") time.sleep(2) self.Helpers.logger.info("CamStream continuing.") # Starts the socket module self.Socket = Socket("CamStream") # Subscribes to the socket server capture = self.Socket.subscribe( self.Helpers.confs["tass"]["socket"]["ip"], self.Helpers.confs["tass"]["socket"]["port"]) try: # Starts web server server = ThreadedHTTPServer((self.Helpers.confs["tass"]["ip"], self.Helpers.confs["tass"]["port"]), CamHandler) self.Helpers.logger.info("Stream server started on http://" + self.Helpers.confs["tass"]["ip"] + ":" + str(self.Helpers.confs["tass"]["port"])) server.serve_forever() except KeyboardInterrupt: # Closes socket server capture.close() exit()
def run(self): """ Runs the module. """ self.font = cv2.FONT_HERSHEY_SIMPLEX self.color = (0, 0, 0) # Starts the TASS module self.TASS = TASS() # Connects to the camera self.TASS.connect() # Encodes known user database self.TASS.preprocess() self.publishes = [None] * (len(self.TASS.encoded) + 1) # Starts the socket module self.Socket = Socket("CamRead") # Starts the socket server soc = self.Socket.connect(self.Helpers.confs["tass"]["socket"]["ip"], self.Helpers.confs["tass"]["socket"]["port"]) while True: try: # Reads the current frame _, frame = self.TASS.lcv.read() # Processes the frame raw, frame, gray = self.TASS.processim(frame) # Gets faces and coordinates faces, coords = self.TASS.faces(frame) # Writes header to frame cv2.putText(frame, "Office Camera 1", (10, 50), self.font, 0.7, self.color, 2, cv2.LINE_AA) # Writes date to frame cv2.putText(frame, str(datetime.now()), (10, 80), self.font, 0.5, self.color, 2, cv2.LINE_AA) if len(coords): i = 0 # Loops through coordinates for face in coords: # Gets facial landmarks coordinates coordsi = face_utils.shape_to_np(face) # Looks for matches/intruders person, msg = self.TASS.match(raw, [coords]) # If iotJumpWay publish for user is in past if (self.publishes[person] is None or (self.publishes[person] + (1 * 60)) < time.time()): # Update publish time for user self.publishes[person] = time.time() # Send iotJumpWay notification self.iotJumpWay.appDeviceChannelPub( "Sensors", self.Helpers.confs["tass"]["zid"], self.Helpers.confs["tass"]["did"], { "WarningOrigin": self.Helpers.confs["tass"]["sid"], "WarningType": "TASS", "WarningValue": person, "WarningMessage": msg }) # Draws facial landmarks for (x, y) in coordsi: cv2.circle(frame, (x, y), 2, (0, 255, 0), -1) # Adds user name to frame if person == 0: string = "Unknown" else: string = "User #" + str(person) cv2.putText(frame, string, (x + 75, y), self.font, 1, (0, 255, 0), 2, cv2.LINE_AA) i += 1 # Streams the modified frame to the socket server encoded, buffer = cv2.imencode('.jpg', frame) soc.send(base64.b64encode(buffer)) except KeyboardInterrupt: self.TASS.lcv.release() break
class RealsenseRead(Thread): """ Realsense D415 Reader Class Realsense D415 reader functions for EMAR Mini Emergency Assistance Robot. """ def __init__(self): """ Initializes the class. """ super(RealsenseRead, self).__init__() self.Helpers = Helpers("Realsense D415 Reader") self.colorizer = rs.colorizer() # OpenCV fonts self.font = cv2.FONT_HERSHEY_SIMPLEX self.black = (0, 0, 0) self.green = (0, 255, 0) self.white = (255, 255, 255) # Starts the Realsense module self.Realsense = Realsense() # Connects to the Realsense camera self.profile = self.Realsense.connect() # Starts the socket module self.Socket = Socket("Realsense D415 Reader") # Sets up the object detection model self.Model = Model() self.Helpers.logger.info( "Realsense D415 Reader Class initialization complete.") def run(self): """ Runs the module. """ time1 = 0 time2 = 0 fc = 0 dc = 0 fps = "" dfps = "" # Starts the socket server soc = self.Socket.connect( self.Helpers.confs["EMAR"]["ip"], self.Helpers.confs["Realsense"]["socket"]["port"]) try: while True: t1 = time.perf_counter() # Wait for depth and color frames frames = self.Realsense.pipeline.wait_for_frames() # Get color frames depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() # Get infrared frames #ir1_frame = frames.get_infrared_frame(1) #ir2_frame = frames.get_infrared_frame(2) if not depth_frame or not color_frame: #if not not ir1_frame or not ir2_frame: self.Helpers.logger.info( "Realsense D415 streams not ready, continuing.") continue # Convert images to numpy arrays depth_image = np.asanyarray(depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) #ir1_image = np.asanyarray(self.colorizer.colorize(ir1_frame).get_data()) #ir2_image = np.asanyarray(self.colorizer.colorize(ir2_frame).get_data()) colorized_depth = np.asanyarray( self.colorizer.colorize(depth_frame).get_data()) # Sets the blob and gets the detections from forward pass self.Model.setBlob(color_image) detections = self.Model.forwardPass() # Gets width, height and crop value for frame width, height = self.Model.getDims(color_image) # Writes header to frame cv2.putText(color_image, "EMAR Mini Color Stream", (30, 50), self.font, 0.7, self.black, 2, cv2.LINE_AA) # Writes date to frame cv2.putText(color_image, str(datetime.now()), (30, 80), self.font, 0.5, self.black, 2, cv2.LINE_AA) # Writes header to frame cv2.putText(colorized_depth, "EMAR Mini Depth Stream", (30, 50), self.font, 0.7, self.white, 2, cv2.LINE_AA) # Writes date to frame cv2.putText(colorized_depth, str(datetime.now()), (30, 80), self.font, 0.5, self.white, 2, cv2.LINE_AA) for i in range(detections.shape[2]): # Detection FPS if i == 0: dc += 1 # Gets and checks confidence of detection confidence = detections[0, 0, i, 2] if (confidence <= self.Helpers.confs["MobileNetSSD"]["threshold"]): continue # Gets class of detection class_id = int(detections[0, 0, i, 1]) # Gets bounding box of detection box_blx = int(detections[0, 0, i, 3] * width) box_bly = int(detections[0, 0, i, 4] * height) box_trx = int(detections[0, 0, i, 5] * width) box_try = int(detections[0, 0, i, 6] * height) # Gets the meters from the depth frame meters = depth_frame.as_depth_frame().get_distance( box_blx + int((box_trx - box_blx) / 2), box_bly + int( (box_try - box_bly) / 2)) # Prepares label text if meters != 0.00: label_text = self.Helpers.confs["MobileNetSSD"][ "classes"][int(class_id)] + " (" + str( confidence) + "%)" + " {:.2f}".format( meters) + "m" else: label_text = self.Helpers.confs["MobileNetSSD"][ "classes"][int(class_id)] + " (" + str( confidence) + "%)" cv2.rectangle(color_image, (box_blx, box_bly), (box_trx, box_try), self.green, 1) # Positions and writes the label label_size = cv2.getTextSize(label_text, self.font, 0.5, 1)[0] label_left = box_blx label_top = box_bly - label_size[1] if (label_top < 1): label_top = 1 label_bottom = label_top + label_size[1] cv2.putText(color_image, label_text, (label_left, label_bottom), self.font, 0.5, self.green, 1) # Writes FPS and Detection FPS cv2.putText(color_image, fps, (width - 170, 50), self.font, 0.5, self.black, 1, cv2.LINE_AA) cv2.putText(color_image, dfps, (width - 170, 65), self.font, 0.5, self.black, 1, cv2.LINE_AA) # Combine the color_image and colorized_depth frames together: frame = np.hstack((color_image, colorized_depth)) # Combine the ir1_image and ir2_image frames together: #frame = np.hstack((ir1_image, ir2_image)) # Streams the modified frame to the socket server encoded, buffer = cv2.imencode('.jpg', frame) soc.send(base64.b64encode(buffer)) # FPS calculation fc += 1 if fc >= 15: fps = "Stream: {:.1f} FPS".format(time1 / 15) dfps = "Detection: {:.1f} FPS".format(dc / time2) fc = 0 dc = 0 time1 = 0 time2 = 0 t2 = time.perf_counter() elapsedTime = t2 - t1 time1 += 1 / elapsedTime time2 += elapsedTime finally: # Stop streaming self.Realsense.pipeline.stop()
class RealsenseRead(Thread): """ Realsense D415 Reader Class Realsense D415 reader functions for EMAR Mini Emergency Assistance Robot. """ def __init__(self): """ Initializes the class. """ super(RealsenseRead, self).__init__() self.Helpers = Helpers("Realsense D415 Reader") self.colorizer = rs.colorizer() # OpenCV fonts self.font = cv2.FONT_HERSHEY_SIMPLEX self.black = (0, 0, 0) self.green = (0, 255, 0) # Starts the Realsense module self.Realsense = Realsense() # Connects to the Realsense camera self.profile = self.Realsense.connect() # Starts the socket module self.Socket = Socket("Realsense D415 Reader") # Sets up the object detection model self.Model = Model() self.Helpers.logger.info("Realsense D415 Reader Class initialization complete.") def run(self): """ Runs the module. """ t1 = 0 t2 = 0 fc = 0 dc = 0 fps = "" dfps = "" # Starts the socket server soc = self.Socket.connect(self.Helpers.confs["EMAR"]["ip"], self.Helpers.confs["Realsense"]["socket"]["port"]) try: while True: t1 = time.perf_counter() # Wait for a coherent pair of frames: depth and color frames = self.Realsense.pipeline.wait_for_frames() depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() #ir1_frame = frames.get_infrared_frame(1) #ir2_frame = frames.get_infrared_frame(2) if not depth_frame or not color_frame: #if not not ir1_frame or not ir2_frame: self.Helpers.logger.info("Realsense D415 streams not ready, continuing.") continue # Convert images to numpy arrays depth_image = np.asanyarray(depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) #ir1_image = np.asanyarray(self.colorizer.colorize(ir1_frame).get_data()) #ir2_image = np.asanyarray(self.colorizer.colorize(ir2_frame).get_data()) colorized_depth = np.asanyarray(self.colorizer.colorize(depth_frame).get_data()) width, height = self.Model.getDims(color_image) self.Model.setBlob(color_image) detections = self.Model.forwardPass() # Writes header to frame cv2.putText(color_image, "EMAR Mini Color Stream", (30,50), self.font, 0.7, self.black, 2, cv2.LINE_AA) # Writes date to frame cv2.putText(color_image, str(datetime.now()), (30,80), self.font, 0.5, self.black, 2, cv2.LINE_AA) # Writes header to frame cv2.putText(colorized_depth, "EMAR Mini Depth Stream", (30,50), self.font, 0.7, self.black, 2, cv2.LINE_AA) # Writes date to frame cv2.putText(colorized_depth, str(datetime.now()), (30,80), self.font, 0.5, self.black, 2, cv2.LINE_AA) for i in range(100): bi = i * 7 if i == 0: dc += 1 # Object location x1 = max(0, int(detections[bi + 3] * height)) y1 = max(0, int(detections[bi + 4] * width)) x2 = min(height, int(detections[bi + 5] * height)) y2 = min(width, int(detections[bi + 6] * width)) overlay = detections[bi:bi + 7] bi = 0 class_id = overlay[bi + 1] percentage = int(overlay[bi + 2] * 100) if (percentage <= self.Helpers.confs["MobileNetSSD"]["threshold"]): continue # Calculates box coordinates box_left = int(overlay[bi + 3] * width) box_top = int(overlay[bi + 4] * height) box_right = int(overlay[bi + 5] * width) box_bottom = int(overlay[bi + 6] * height) # Gets the meters from the depth frame meters = depth_frame.as_depth_frame().get_distance(box_left+int((box_right-box_left)/2), box_top+int((box_bottom-box_top)/2)) if meters != 0.00: label_text = self.Helpers.confs["MobileNetSSD"]["classes"][int(class_id)] + " (" + str(percentage) + "%)"+ " {:.2f}".format(meters) + "m" else: label_text = self.Helpers.confs["MobileNetSSD"]["classes"][int(class_id)] + " (" + str(percentage) + "%)" cv2.rectangle(color_image, (box_left, box_top), (box_right, box_bottom), self.green, 1) # Positions and writes the label label_size = cv2.getTextSize(label_text, self.font, 0.5, 1)[0] label_left = box_left label_top = box_top - label_size[1] if (label_top < 1): label_top = 1 label_right = label_left + label_size[0] label_bottom = label_top + label_size[1] cv2.putText(color_image, label_text, (label_left, label_bottom), self.font, 0.5, self.green, 1) # Writes FPS and Detection Frames Per Second cv2.putText(color_image, fps, (width-170,20), self.font, 0.5, self.black, 1, cv2.LINE_AA) cv2.putText(color_image, dfps, (width-170,35), self.font, 0.5, self.black, 1, cv2.LINE_AA) # Combine the color_image and colorized_depth frames together: frame = np.hstack((color_image, colorized_depth)) # Combine the ir1_image and ir2_image frames together: #frame = np.hstack((ir1_image, ir2_image)) # Streams the modified frame to the socket server encoded, buffer = cv2.imencode('.jpg', frame) soc.send(base64.b64encode(buffer)) # FPS calculation fc += 1 if fc >= 15: fps = "Stream: {:.1f} FPS".format(t1/15) dfps = "Detection: {:.1f} FPS".format(dc/t2) fc = 0 dc = 0 t1 = 0 t2 = 0 t2 = time.perf_counter() elapsedTime = t2-t1 t1 += 1/elapsedTime t2 += elapsedTime finally: # Stop streaming self.Realsense.pipeline.stop()