def schedule_job(self, job_config_name: str, initial=False): job_config = self.config.get_config_for_job(job_config_name) if job_config is None: return False job_schedule = self.state.get_job_schedule(job_config_name) if job_schedule is None: job_schedule = {"value": 0, "type": job_config.job_schedule_type} next_value = None if job_schedule["type"] == "TS": current_ts = int(time.time()) if initial: next_value = current_ts elif job_schedule["value"] <= current_ts: next_value = current_ts + job_config.job_schedule elif job_schedule["type"] == "N": if initial: next_value = self.state.datapoint_count elif job_schedule["value"] <= self.state.datapoint_count: next_value = \ self.state.datapoint_count + job_config.job_schedule if next_value is not None: job_schedule['value'] = next_value self.state.set_job_schedule(job_config_name, job_schedule) ServerState.index_series_schedules(self)
async def prepare(cls, update_cb=None): cls._update_cb = update_cb cls._labels_last_update = None cls._srm = ResourceManager("series", Series) await cls._srm.load() async for series in cls._srm.itter(): ServerState.index_series_schedules(series)
async def create_resource(self, resource: dict): rc = self._resource_class(**resource) yield rc await rc.store() self._resources[rc.rid] = None if self._resource_type == "series": ServerState.index_series_schedules(rc)
async def add_static_rule_hits_to_series(cls, series_name, job_config_name, points): if cls._srm.rid_exists(series_name): await drop_series( ServerState.get_siridb_output_conn(), f'"enodo_{series_name}_static_rules_{job_config_name}"') await insert_points( ServerState.get_siridb_output_conn(), f'enodo_{series_name}_static_rules_{job_config_name}', points)
async def stop_server(self): """Stop all parts of the server for a clean shutdown """ if self._shutdown_trigger: self._force_shutdown = True return self._shutdown_trigger = True logging.info('Stopping Hub...') ServerState.readiness = False if self.sio is not None: clients = [] if '/' in self.sio.manager.rooms and \ None in self.sio.manager.rooms['/']: clients = [sid for sid in self.sio.manager.rooms['/'][None]] for sid in clients: await self.sio.disconnect(sid) rooms = self.sio.manager.rooms self.sio.manager.set_server(None) for room in rooms: print("closing room: ", room) await self.sio.close_room(room) await asyncio.sleep(1) del self.sio self.sio = None ServerState.running = False await self.wait_for_queue() logging.info('...Doing clean up') await self.clean_up() if ServerState.storage is not None: await ServerState.storage.close() ServerState.stop() logging.info('...Stopping all running tasks') logging.info('...Going down in 1') await asyncio.sleep(1) tasks = [ task for task in asyncio.all_tasks() if task is not asyncio.current_task() ] for task in tasks: try: task.cancel() await asyncio.wait([task]) except asyncio.CancelledError as _: pass await asyncio.gather(*tasks, return_exceptions=True) self.loop.stop() print('Bye!')
async def add_anomalies_to_series(cls, series_name, job_config_name, points): series = await cls._srm.get_resource(series_name) if series is not None: event = EnodoEvent( 'Anomaly detected!', f'{len(points)} anomalies detected for series {series_name} \ via job {job_config_name}', ENODO_EVENT_ANOMALY_DETECTED, series=series) await EnodoEventManager.handle_event(event) await drop_series( ServerState.get_siridb_output_conn(), f'"enodo_{series_name}_anomalies_{job_config_name}"') if len(points) > 0: await insert_points( ServerState.get_siridb_output_conn(), f'enodo_{series_name}_anomalies_{job_config_name}', points)
async def get_enodo_readiness(cls, request): """Get ready status of this hub instance Args: request (Request): aiohttp request Returns: _type_: _description_ """ ready = ServerState.get_readiness() return web.Response( body="OK\r\n" if ready else "SERVICE UNAVAILABLE\r\n", status=200 if ready else 503)
async def add_label(cls, description, name, series_config): if name not in cls._labels: # TODO: Change auto type == "group" to a input value # when tags are added group_expression = await query_group_expression_by_name( ServerState.get_siridb_data_conn(), name) cls._labels[name] = { "description": description, "name": name, "series_config": series_config, "type": "group", "selector": group_expression } await cls._update_listeners()
async def resp_get_series_anomalies(cls, series_name): """Get series anomalies results Args: series_name (string): name of series Returns: dict: dict with data """ series = await SeriesManager.get_series_read_only(series_name) if series is None: return web.json_response(data={'data': ''}, status=404) return {'data': await query_series_anomalies( ServerState.get_siridb_output_conn(), series_name)}
async def _add_series(cls, series: dict): if cls._srm.rid_exists(series.get('name')): return False collected_datapoints = await query_series_datapoint_count( ServerState.get_siridb_data_conn(), series.get('name')) # If collected_datapoints is None, the series does not exist. if collected_datapoints is not None: async with cls._srm.create_resource(series) as resp: resp.state.datapoint_count = collected_datapoints asyncio.ensure_future( cls.series_changed(SUBSCRIPTION_CHANGE_TYPE_ADD, series.get('name'))) asyncio.ensure_future( cls.update_listeners(await cls.get_listener_series_info())) return True return None
async def get_series_anomalies(cls, series_name): values = await query_series_data(ServerState.get_siridb_output_conn(), f'anomalies_{series_name}') if values is not None: return values.get(f'anomalies_{series_name}', None) return None
async def cleanup_series(cls, series_name): if series_name in ServerState.job_schedule_index: del ServerState.job_schedule_index[series_name] await drop_series(ServerState.get_siridb_output_conn(), f"/enodo_{re.escape(series_name)}.*?.*?$/")