async def wait_for_queue(self): """Wait for queue to be empty or until max amount of seconds """ logging.info("...Start waiting for queue to be empty") start_ts = time() current_ts = start_ts queues_empty = EnodoJobManager.get_open_jobs_count() == 0 and \ EnodoJobManager.get_active_jobs_count() == 0 while (current_ts - start_ts) < 60 and not queues_empty and \ not self._force_shutdown: current_ts = time() queues_empty = EnodoJobManager.get_open_jobs_count( ) == 0 and EnodoJobManager.get_active_jobs_count() == 0 await asyncio.sleep(1) if not queues_empty: if self._force_shutdown: logging.info("...Queue is not empty, but shutdown is forced, " "canceling jobs") else: logging.info("...Queue is not empty, but hit max time limit, " "canceling jobs") await EnodoJobManager.clear_jobs() else: logging.info("...Queue is empty, closing task to work queue") ServerState.work_queue = False
async def resp_get_enodo_stats(cls): return {'data': { "no_series": SeriesManager.get_series_count(), "no_ignored_series": await SeriesManager.get_ignored_series_count(), "no_open_jobs": EnodoJobManager.get_open_jobs_count(), "no_active_jobs": EnodoJobManager.get_active_jobs_count(), "no_failed_jobs": EnodoJobManager.get_failed_jobs_count(), "no_listeners": ClientManager.get_listener_count(), "no_workers": ClientManager.get_worker_count(), "no_busy_workers": ClientManager.get_busy_worker_count(), "no_output_streams": len(EnodoEventManager.outputs) }}
async def _check_for_jobs(self, series, series_name): """Private function to check if jobs need to be created Args: series (Series): Series instance series_name (string): name of series """ # Check if series does have any failed jobs if len(EnodoJobManager.get_failed_jobs_for_series(series_name)) > 0: return # Check if base analysis has already run if series.base_analysis_status() == JOB_STATUS_NONE: base_analysis_job = series.base_analysis_job module = ClientManager.get_module(base_analysis_job.module) if module is None: async with SeriesManager.get_series(series_name) as s: s.state.set_job_check_status(base_analysis_job.config_name, "Unknown module") return await EnodoJobManager.create_job(base_analysis_job.config_name, series_name) # Only continue if base analysis has finished if series.base_analysis_status() != JOB_STATUS_DONE: return # loop through scheduled jobs: job_schedules = series.state.get_all_job_schedules() for job_config_name in series.config.job_config: if job_config_name in job_schedules and \ series.is_job_due(job_config_name): await EnodoJobManager.create_job(job_config_name, series_name) continue if job_config_name not in job_schedules: # Job has not been schedules yet, let's add it async with SeriesManager.get_series(series_name) as s: s.schedule_job(job_config_name, initial=True)
async def start_up(self): """All connections and classes will be prepared """ self.loop = asyncio.get_running_loop() for signame in ('SIGINT', 'SIGTERM'): self.loop.add_signal_handler( getattr(signal, signame), lambda: asyncio.ensure_future(self.stop_server())) # Setup server state object if Config.storage_type == "thingsdb": storage = ThingsDBStorage() else: storage = DiskStorage(Config.base_dir) await storage.startup() await ServerState.async_setup(sio=self.sio, storage=storage) # Setup internal security token for authenticating # backend socket connections logging.info('Setting up internal communications token...') Config.setup_internal_security_token() # Setup backend socket connection self.backend_socket = SocketServer( Config.socket_server_host, Config.socket_server_port, Config.internal_security_token, { LISTENER_NEW_SERIES_POINTS: receive_new_series_points, WORKER_JOB_RESULT: EnodoJobManager.receive_job_result, WORKER_UPDATE_BUSY: receive_worker_status_update, WORKER_REFUSED: received_worker_refused, WORKER_JOB_CANCELLED: EnodoJobManager.receive_worker_cancelled_job }) # Setup REST API handlers ApiHandlers.prepare() # Setup websocket handlers and routes if self.sio is not None: SocketIoHandler.prepare(self.sio) SocketIoRouter(self.sio) ServerState.series_rm = ResourceManager('series', Series) await ServerState.series_rm.load() ServerState.job_config_template_rm = ResourceManager( 'job_config_templates', SeriesJobConfigTemplate) await ServerState.job_config_template_rm.load() ServerState.series_config_template_rm = ResourceManager( 'series_config_templates', SeriesConfigTemplate, cache_only=True) await ServerState.series_config_template_rm.load() # Setup internal managers for handling and managing series, # clients, jobs, events and modules await SeriesManager.prepare( SocketIoHandler.internal_updates_series_subscribers) await ClientManager.setup(SeriesManager) EnodoJobManager.setup( SocketIoHandler.internal_updates_queue_subscribers) await EnodoJobManager.load_from_disk() await EnodoEventManager.async_setup() await ClientManager.load_from_disk() scheduler = ServerState.scheduler self._watch_series_task = await scheduler.spawn(self.watch_series()) self._check_jobs_task = await scheduler.spawn( EnodoJobManager.check_for_jobs()) self._connection_management_task = await scheduler.spawn( self._manage_connections()) self._watch_tasks_task = await scheduler.spawn(self.watch_tasks()) self._cleanup_resource_managers = await scheduler.spawn( self.clean_resource_manager()) # Open backend socket connection await self.backend_socket.create() ServerState.readiness = True
def resp_get_failed_jobs(cls): return {'data': [ EnodoJob.to_dict(j) for j in EnodoJobManager.get_failed_jobs()]}
def resp_get_active_jobs(cls): return {'data': EnodoJobManager.get_active_jobs()}
def resp_get_open_jobs(cls): return {'data': EnodoJobManager.get_open_queue()}