def start_cameras(config_file): name = "Camera Manager: " camera_names = [] camera_detect_queues = [] camera_response_queues = [] camera_oob_queues = [] camera_notification_queues = [] # Overkill to have one per cam, rethink aicameras = {} children = [] if config_file is None: print("Please specify a configuration file write to") return if os.path.isfile(config_file) is False: print("Configuration file: {} doesn't exist, exiting.".format( config_file)) return from .CamAiNotification import CamAiNotification self = CamAi(config_file=config_file) if self.config is None: logger.error("Initialization of CamAi configuration failed") logger.error("Incorrect or missing configuration file specified?") return manager_options = self.config.get_manager_options() # Just above others, but readers should get most scheduling from .CamAiUtils import increase_my_priority increase_my_priority(5) detector_needed = False camera_index = 0 cameraconfigs = self.config.get_cameras() for cameraconfig in cameraconfigs: # These should go into the loop as well, but URL is mandatory cameraname = cameraconfig['name'] camera_names.append(cameraname) camera_handle = camera_index # Need to abstract this list and not rely on indexes, have a cammgr and # camera class instead of this mess camera_index += 1 camera_queues = self.create_camera_queues(cameraconfig) from .CamAiConfig import CamAiCameraMode if cameraconfig['mode'] != CamAiCameraMode.record_only.name: detector_needed = True # TODO, not all queues are initialized till the end of this loop # AiCameras need detection config for image resize pipelining # Need a cleaner way to do this and get rid of the catch-22 dependency from .CamAiDetection import CamAiDetection detection = CamAiDetection( name=cameraname, detect_queues=camera_detect_queues, response_queues=camera_response_queues, pipeline_image_resize=manager_options['pipelineresize'], singledetectorqueue=manager_options['singledetectorqueue'], multiprocessing=manager_options['multiprocessing_detector']) else: logger.warning( f"{name}: {cameraname} is in record only mode, no detection object needed" ) detection = None from .CamAiCamera import CamAiCamera aicamera = CamAiCamera.from_dict(camera_handle=camera_handle, cameraconfig=cameraconfig, managerconfig=manager_options, camera_queues=camera_queues, detection=detection) # TODO: Manager Should just use aicamera objects instead of instead of # discrete arrays for each queue type camera_detect_queues.append(camera_queues['detect_queue']) camera_response_queues.append(camera_queues['response_queue']) camera_oob_queues.append(camera_queues['oob_queue']) camera_notification_queues.append(camera_queues['notification_queue']) aicameras[camera_handle] = aicamera # # TODO: Redundant with aicameras, need to refactor children.append(aicamera) logger.warning("{}: Start camera".format(cameraname)) aicamera.start() # Start the notification process/thread, face detection needs another # tensorflow instance, so a process is required for now, # Doing this before starting detectors should ensure GPU memory gets # allocated to facedetection before detectors greedy allocation for maskrcnn # takes up everything available # TODO: need to evaluate how to consolidate this into detector. # TODO: Looks like the intermittent notifier process hanging at start # is tied to a combination of multithreading/processing/logging in the same # process at the same time Reducing logging around notification process creation # 'could' reduce the occurence notification = CamAiNotification(self.config, camera_notification_queues) #notification = CamAiNotification.CamAiNotification(self.config, camera_notification_queues) notification.start() if detector_needed is True: # This stuff should move to CamAiDetection class's start method detectors = [] if manager_options['multiprocessing_detector'] is False: num_detectors = manager_options['numdetectors'] else: num_detectors = 1 # for num in range(Number_Of_Detectors): # TODO: We are using threads directly instead of detector classes # Need to finish this last bit of conversion to classes from .CamAiDetection import object_detector_server for num in range(num_detectors): if (manager_options['multiprocessing_detector'] is False): detector = threading.Thread( target=object_detector_server, # detector = # ProfiledThread(target=object_detector_server, # \ args=(detection, camera_names), name=("detector" + "_" + str(num))) detector.do_detect = True detectors.append(detector) logger.warning("{}: Starting detector thread number {}".format( name, num)) detector.start() else: detector = Process( target=object_detector_server, # detector = # ProfiledProcess(target=object_detector_server, # \ args=(detection, camera_names), name=("detector" + "_" + str(num))) detectors.append(detector) logger.warning( "{}: Starting detector process number {}".format( name, num)) detector.start() else: logger.warning( f"{name}: All cameras in record only mode, will not launch detector instances" ) logger.debug("{}: Installing Signal Handlers".format(name)) install_sighandler() waitcount = 0 global signal_received_SIGHUP global signal_received_SIGQUIT global signal_received_SIGTERM signal_received_SIGHUP = False signal_received_SIGQUIT = False signal_received_SIGTERM = False while True: try: time.sleep(5) if DEBUG is True: # Debug memory leaks, print every 5 minutes waitcount += 1 if waitcount % 120 == 0: objgraph.show_most_common_types(limit=20) # Handle signals here if signal_received_SIGHUP is True: # TODO: Should reread config files, tell writer to rotate logs # etc. logger.warning( "{}: TODO: SIGHUP is not yet handled".format(name)) signal_received_SIGHUP = False pass if signal_received_SIGQUIT is True: logger.warning("{}: SIGQUIT received, exiting".format(name)) break if signal_received_SIGTERM is True: logger.warning("{}: SIGTERM received, exiting".format(name)) break except KeyboardInterrupt: break # Start Cleaning up all child threads/processes here quitmessage = CamAiMessage.CamAiQuitMsg() # Let Observers know via queues for index, aicamera in enumerate(children): logger.warning("{}: sending quit message on oob queue for {}".format( name, aicamera.name)) camera_oob_queues[index].put(quitmessage, False) aicamera.stop() # Tell threads/processes to not wait for any more queue items #camera_response_queues[index].put(quitmessage, False) #aicamera.join(10) # Let Detectors know via queues for camera_detect_queue in camera_detect_queues: logger.warning("{}: Sending Quit to detect queues".format(name)) camera_detect_queue.put(quitmessage) # Detectors should in turn let observers know via response queue if # they are in the middle of processing, or should they even do this? # Wait on observers for index, aicamera in enumerate(children): logger.warning("{}: Waiting on Camera {} to return".format( name, aicamera.name)) aicamera.stop() aicamera.join(10) if detector_needed is True: for detector in detectors: detector.do_detect = False # TODO: get rid of this when fully moved to detection class usage #detector.stop() detector.join(10) else: logger.warning( f"{name}: All cameras in record only mode, no detector instances to stop" ) # Notifier, observers should already have asked corresponding notifiers # via respective notification_queues notification.stop() notification.join(10) if DEBUG is True: objgraph.show_most_common_types(limit=30) os.sync() #cv.destroyAllWindows() logger.warning("{}: Bye, cleaning up in {} seconds".format( name, Quit_Wait_Time)) for t in range(Quit_Wait_Time): logger.warning("{}: {} seconds left".format(name, (Quit_Wait_Time - t))) time.sleep(1) os.sync() # Flush whatever buffers we can till the end