def init_db_interface(self, daemon_mode, run_mode): ''' Setup the underlying data store connection ''' cf_reg = registry.get_service(SERVICE_CONFIGURATION) for data_store in self.load_plugins(CONFIG_DB_INTERFACE, run_mode, singleton=True): registry.register_service(SERVICE_DB_INTERFACE, data_store[0](dict(cf_reg.items(data_store[2])))) if daemon_mode: # Make sure the DB is up and running before we continue, since this might be # being invoked during IPL and order of startup is not guaranteed timeout = 180 db_exception = None dbi = registry.get_service(SERVICE_DB_INTERFACE) while (timeout > 0): try: cnxn = dbi.get_connection() cnxn.close() break except Exception, e: db_exception = e time.sleep(3) timeout -= 3 if timeout <= 0: raise TealError("Cannot connect to database: {0}".format(db_exception))
def init_cfg_service(self, config_file): """ Initialize the configuration service """ conf_str = '' # Go get the configuration files from the default location if config_file is None: config_file = os.path.join(registry.get_service(TEAL_CONF_DIR),'teal') conf_str += 'None -> ' # Need to create the list of files to pass to the configuration service # so determine if this is a file or directory to recover the proper set if os.path.isfile(config_file): conf_files = [config_file] conf_str += 'File -> {0}'.format(repr(config_file)) elif os.path.isdir(config_file): # Find all the configuration files in the specified directory conf_qry = os.path.join(config_file,'*.conf') conf_files = glob.glob(conf_qry) conf_str = 'Dir -> {0}'.format(repr(config_file)) else: conf_files = [] if not conf_files: raise ConfigurationError('Configuration file/directory specification of \'{0}\' resulted in no configuration files'.format(config_file)) registry.register_service(SERVICE_CONFIGURATION, Configuration(conf_files)) return conf_str
def init_temp_log_service(self, msg_level, extra_log_id): """ Initialize the temporary logging service to record logs until know where to log to This is done by using a Memory handler temporarily """ # Create and register the logger logging.setLoggerClass(TealLogger) logger = logging.getLogger('tealLogger') hdlr = logging.handlers.MemoryHandler(100, logging.NOTSET, target=None) # Set the logging format for this logger use_eli = extra_log_id if len(use_eli) != 0: use_eli = extra_log_id[:4] use_eli = use_eli.strip() use_eli = use_eli + ':' log_format = "%(asctime)-15s [%(process)d:%(thread)d] {0}%(module)s - %(levelname)s: %(message)s".format(use_eli) formatter = logging.Formatter(log_format) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # Define the string levels and set them in the logger levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} # Set the lowest level of message to log level = levels.get(msg_level, logging.NOTSET) logger.setLevel(level) registry.register_service(SERVICE_LOGGER, logger) registry.register_service(SERVICE_MSG_LEVEL, msg_level)
def init_run_mode(self, run_mode): ''' Validate the run mode ''' if run_mode != RUN_MODE_REALTIME and run_mode != RUN_MODE_HISTORIC: raise ConfigurationError('Unrecognized run mode specified: {0}'.format(run_mode)) # Save in registry registry.register_service(SERVICE_RUN_MODE, run_mode)
def init_non_configurable_environment(self): ''' Set the environment variables that cannot be changed as part of the configuration files ''' # Currently only the location of the configuration file can't be set conf_dir = os.environ.get(TEAL_CONF_DIR, os.path.join(os.sep,'etc')) abs_conf_dir = os.path.abspath(conf_dir) self.test_dir(abs_conf_dir) registry.register_service(TEAL_CONF_DIR, abs_conf_dir)
def create_temp_logger(self, msg_level): ''' Create a temporary logger if one isn't available ''' if get_logger() is None: # Create and register the logger logging.setLoggerClass(TealLogger) logger = logging.getLogger('tealLogger') if msg_level is None: # basically throw away logs hdlr = logging.handlers.MemoryHandler(100, logging.NOTSET, target=None) else: # send logs to stdout hdlr = logging.StreamHandler(sys.stdout) log_format = "%(asctime)-15s [%(process)d:%(thread)d] UT:%(module)s - %(levelname)s: %(message)s" formatter = logging.Formatter(log_format) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # Define the string levels and set them in the logger levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} # Set the lowest level of message to log level = levels.get(msg_level, logging.NOTSET) logger.setLevel(level) register_service(SERVICE_LOGGER, logger) return
def get_generator(self, config_dict): ''' Return the appropriate SQL generator based on the configuration information retrieved from the bg.properties ''' # 1) see if person calling has it specified properties_file = config_dict.get('bgproperties', None) # 2) go to env variable PROPERTIES_FILE if not properties_file: properties_file = os.environ.get('PROPERTIES_FILE',None) if not properties_file: properties_file = os.environ.get('BG_PROPERTIES_FILE',None) # 3) then look in /bgsys/local/etc if not properties_file: properties_file = BG_PROPERTIES_FILE cfg = ConfigParser.ConfigParser() cfg.readfp(BgqPropertiesFile(properties_file)) # get ras filter info try: rasFilter = cfg.get('ras','filter') except ConfigParser.NoOptionError: rasFilter = '/bgsys/drivers/ppcfloor/ras/etc/ras_environment_filter.xml' registry.get_logger().debug('RAS Environment filter file: ' + rasFilter) registry.register_service('BGQ_RAS_FILTER', rasFilter) config_service = ConfigService() registry.register_service('BGQ_CONFIG_SERVICE', config_service) # get database info db = cfg.get('database','name') try: usr_schema = cfg.get('database','schema_name') + '.' except ConfigParser.NoOptionError: usr_schema = '' pwless = False try: usr = cfg.get('database','user') pw = cfg.get('database','password') except ConfigParser.NoOptionError: registry.get_logger().debug('Database user and/or password is not specified.') pwless = True # Set the table names db_interface.TABLE_EVENT_LOG = usr_schema + 'x_tealeventlog' db_interface.TABLE_EVENT_LOG_EXT = usr_schema + 'x_tealeventlogext' db_interface.TABLE_BG_EVENT_LOG = usr_schema + 'tbgqeventlog' db_interface.TABLE_CHECKPOINT = usr_schema + 'x_tealcheckpoint' db_interface.TABLE_ALERT_LOG = usr_schema + 'x_tealalertlog' db_interface.TABLE_ALERT2ALERT = usr_schema + 'x_tealalert2alert' db_interface.TABLE_ALERT2EVENT = usr_schema + 'x_tealalert2event' db_interface.TABLE_TEMPLATE = usr_schema + 'x_{0}' if pwless: return SQLGeneratorDB2({'dsn':db}) else: return SQLGeneratorDB2({'dsn':db, 'uid':usr, 'pwd':pw})
def init_location_service(self, run_mode): ''' Load the Location Service based on the XML configuration in the configuration file ''' cfg_reg = registry.get_service(SERVICE_CONFIGURATION) for result in cfg_reg.get_active_sections(CONFIG_LOCATION, run_mode, name_required=False, singleton=True): # result is (section, name), but name not used location_file = cfg_reg.get(result[0],'config') data_dir = registry.get_service(TEAL_DATA_DIR) teal_loc_file_path = os.path.join(data_dir,location_file) registry.register_service(SERVICE_LOCATION,LocationService(teal_loc_file_path))
def init_checkpoint_service(self, commit_checkpoints, restart): ''' Initialize the checkpoint service ''' if commit_checkpoints is None: use_db = (registry.get_service(SERVICE_RUN_MODE)=='realtime') else: use_db = commit_checkpoints checkpoint_mgr = CheckpointMgr(use_db=use_db, restart_mode=restart) registry.register_service(SERVICE_CHECKPOINT_MGR, checkpoint_mgr)
def testGetViaEvent(self): '''Test getting via an event''' esm1 = Metadata(META_TYPE_EVENT, ['metadata_test/event_metadata_05.xml']) unregister_service(SERVICE_EVENT_METADATA) register_service(SERVICE_EVENT_METADATA, esm1) event_id = 'idvalue1' event_comp = 'TST' e1 = teal.Event.fromDict({EVENT_ATTR_REC_ID:1, EVENT_ATTR_EVENT_ID:event_id, EVENT_ATTR_SRC_COMP: 'TST', EVENT_ATTR_TIME_OCCURRED: datetime.now()}) meta_dict2 = e1.get_metadata() self.assertEqual(meta_dict2[META_EVENT_ID], event_id) self.assertEqual(meta_dict2[META_EVENT_COMP], event_comp) self.assertEqual(meta_dict2[META_EVENT_MSG], 'This is test message 1') return
def init_monitor(self, run_mode): ''' Setup the event monitor. There must only be one monitor configured and must be called after the processing pipeline is set up so that 1) There is an queue to connect up to 2) Events will be processed and not dropped ''' cf_reg = registry.get_service(SERVICE_CONFIGURATION) for monitor in self.load_plugins(CONFIG_EVENT_MONITORS, run_mode, singleton=True): tmp_mon_class = monitor[0] tmp_conf_dict = dict(cf_reg.items(monitor[2])) registry.register_service(SERVICE_EVENT_MONITOR, tmp_mon_class(tmp_conf_dict)) if registry.get_service(SERVICE_EVENT_MONITOR) is None: raise TealError('No monitor configured - must have one monitor configured and enabled')
def init_metadata_service(self, run_mode): ''' Load the Location Service based on the XML configuration in the configuration file ''' event_metadata = Metadata(META_TYPE_EVENT, []) registry.register_service(SERVICE_EVENT_METADATA, event_metadata) alert_metadata = Metadata(META_TYPE_ALERT, []) registry.register_service(SERVICE_ALERT_METADATA, alert_metadata) cfg_reg = registry.get_service(SERVICE_CONFIGURATION) # Get the package entries for (section, name) in cfg_reg.get_active_sections(CONFIG_PACKAGE, run_mode, name_required=True, singleton=False): get_logger().debug('Loading metadata from config package entry %s' % name) for option in cfg_reg.options(section): if option == 'alert_metadata': alert_files = cfg_reg.get(section,'alert_metadata') if alert_files is not None: alert_metadata.add_files(alert_files.split(',')) elif option == 'event_metadata': event_files = cfg_reg.get(section, 'event_metadata') if event_files is not None: event_metadata.add_files(event_files.split(',')) else: # Only those two options right now pass
def init_actual_log_service(self, log_file): """ Initialize the actual logging service and roll in the entries in the temporary log """ # Get the current logger (which has the temporary handler) logger = registry.get_service(SERVICE_LOGGER) # Create the actual handler # If log file is not specified, set to default path/file if log_file is None: log_dir = registry.get_service(TEAL_LOG_DIR) log_file = os.path.join(log_dir,TEAL_LOG_FILE) # TODO: python bug 4749 in RotatingFileHandler #actual_hdlr = RotatingFileHandler(log_file, maxBytes=1*1024*1024, backupCount=5) actual_hdlr = logging.FileHandler(log_file) elif log_file == 'stderr': actual_hdlr = logging.StreamHandler(sys.stderr) elif log_file == 'stdout': actual_hdlr = logging.StreamHandler(sys.stdout) else: # Allow the user to symbolically specify the TEAL_LOG_DIR in their file name template_file = string.Template(log_file) log_dir = registry.get_service(TEAL_LOG_DIR) full_filename = template_file.substitute({TEAL_LOG_DIR:log_dir}) actual_hdlr = logging.FileHandler(full_filename) # Set formatter from the formatter already being used actual_hdlr.setFormatter(logger.handler.formatter) # Get logs out of temporary handler in to actual handler logger.handler.setTarget(actual_hdlr) logger.handler.flush() # Now replace the temporary handler logger.addHandler(actual_hdlr) registry.register_service(SERVICE_LOG_FILE, log_file)
def init_persistence_services(self, commit_alerts): ''' Initialize the services to persist and work with Events and Alerts ''' alert_mgr = AlertMgr(commit_alerts) registry.register_service(SERVICE_ALERT_MGR, alert_mgr)
def init_configurable_environment(self, run_mode): ''' Initialize the configurable environment (including directory paths other than config) used within TEAL Order of priority: * Environment variable * Configuration File * Default value Directory paths are set into special registry entries. Other environment settings will be pushed out environment. ''' # set default values teal_root_dir = os.path.join(os.sep,'opt','teal') teal_data_dir = None teal_log_dir = os.path.join(os.sep,'var','log','teal') self.event_not_analyzed_log_method = get_logger().warning tmp_event_q_not_analyzed_log_level = 'warning' tmp_shutdown_mode = SHUTDOWN_MODE_DEFERRED # Get configuration environment stanza options and process cf_reg = registry.get_service(SERVICE_CONFIGURATION) entries = cf_reg.get_active_sections(CONFIG_ENVIRONMENT, run_mode, name_required=False, singleton=True) if len(entries) != 0: section = entries[0][0] # iterate through the options for opt_name, opt_value in cf_reg.items(section): if opt_name == TEAL_ROOT_DIR: teal_root_dir = opt_value elif opt_name == TEAL_DATA_DIR: teal_data_dir = opt_value elif opt_name == TEAL_LOG_DIR: teal_log_dir = opt_value elif opt_name == TEAL_CONF_DIR: raise ConfigurationError('Option \'{0}\' is not allowed in the \'{1}\' stanza'.format(TEAL_CONF_DIR, CONFIG_ENVIRONMENT)) elif opt_name == TEAL_EVENT_Q_NOT_ANALYZED_LOG_LEVEL: tmp_event_q_not_analyzed_log_level = opt_value elif opt_name == TEAL_SHUTDOWN_MODE: tmp_shutdown_mode = opt_value else: # see if already set if os.environ.get(opt_name, None) is None: # Set it in the python environment os.environ[opt_name] = opt_value # Now see if the dirs are overridden in the environment root_dir = os.environ.get(TEAL_ROOT_DIR, teal_root_dir) abs_root_dir = os.path.abspath(root_dir) self.test_dir(abs_root_dir) # Default data dir is relative to the root dir if teal_data_dir is None: teal_data_dir = os.path.join(root_dir,'data') data_dir = os.environ.get(TEAL_DATA_DIR, teal_data_dir) abs_data_dir = os.path.abspath(data_dir) self.test_dir(abs_data_dir) log_dir = os.environ.get(TEAL_LOG_DIR, teal_log_dir) abs_log_dir = os.path.abspath(log_dir) self.test_dir(abs_log_dir, stat.S_IRUSR|S_IWUSR) # Log them in the registry for usage throughout the framework registry.register_service(TEAL_ROOT_DIR, abs_root_dir) os.environ[TEAL_ROOT_DIR] = abs_root_dir registry.register_service(TEAL_DATA_DIR, abs_data_dir) os.environ[TEAL_DATA_DIR] = abs_data_dir registry.register_service(TEAL_LOG_DIR, abs_log_dir) os.environ[TEAL_LOG_DIR] = abs_log_dir # See if the Event Q not analyzed log level overridden tmp_event_q_not_analyzed_log_level = os.environ.get(TEAL_EVENT_Q_NOT_ANALYZED_LOG_LEVEL, tmp_event_q_not_analyzed_log_level) if tmp_event_q_not_analyzed_log_level == 'debug': self.event_not_analyzed_log_method = get_logger().debug elif tmp_event_q_not_analyzed_log_level == 'info': self.event_not_analyzed_log_method = get_logger().info elif tmp_event_q_not_analyzed_log_level == 'warning': self.event_not_analyzed_log_method = get_logger().warning elif tmp_event_q_not_analyzed_log_level == 'error': self.event_not_analyzed_log_method = get_logger().error elif tmp_event_q_not_analyzed_log_level == 'critical': self.event_not_analyzed_log_method = get_logger().critical else: raise ConfigurationError('A value of \'{0}\' is not supported for option \'{1}\' in the \'{2}\'stanza'.format(tmp_event_q_not_analyzed_log_level, TEAL_EVENT_Q_NOT_ANALYZED_LOG_LEVEL, CONFIG_ENVIRONMENT)) # Shutdown mode processing if run_mode == RUN_MODE_HISTORIC: tmp_shutdown_mode = SHUTDOWN_MODE_DEFERRED else: tmp_shutdown_mode = os.environ.get(TEAL_SHUTDOWN_MODE, tmp_shutdown_mode) if tmp_shutdown_mode != SHUTDOWN_MODE_DEFERRED and tmp_shutdown_mode != SHUTDOWN_MODE_IMMEDIATE: raise ConfigurationError('A value of \'{0}\' is not supported for environment variable \'{1}\''.format(tmp_shutdown_mode, TEAL_SHUTDOWN_MODE)) registry.unregister_service(SERVICE_SHUTDOWN_MODE) registry.register_service(SERVICE_SHUTDOWN_MODE, tmp_shutdown_mode)
def testRegisterDuplicateService(self): ''' Test a service cannot be overridden''' registry.register_service(SERVICE_REGISTRY_TEST, self) self.assertRaises(ValueError, registry.register_service, SERVICE_REGISTRY_TEST, self) self.assertRaises(DuplicateKeyError, registry.register_service, SERVICE_REGISTRY_TEST, self) registry.unregister_service(SERVICE_REGISTRY_TEST)
def testGetService(self): ''' Test get_service successful''' self.assertTrue(SERVICE_REGISTRY_TEST not in registry.registry) registry.register_service(SERVICE_REGISTRY_TEST, self) self.assertEquals(registry.get_service(SERVICE_REGISTRY_TEST), self) registry.unregister_service(SERVICE_REGISTRY_TEST)
def testUnregisterService(self): ''' Test unregister_service successful''' self.assertTrue(SERVICE_REGISTRY_TEST not in registry.registry) registry.register_service(SERVICE_REGISTRY_TEST, self) registry.unregister_service(SERVICE_REGISTRY_TEST) self.assertTrue(SERVICE_REGISTRY_TEST not in registry.registry)
def testRegisterService(self): ''' Test register_service successful''' self.assertTrue(SERVICE_REGISTRY_TEST not in registry.registry) registry.register_service(SERVICE_REGISTRY_TEST, self) self.assertEquals(registry.registry[SERVICE_REGISTRY_TEST], self) del registry.registry[SERVICE_REGISTRY_TEST]
def init_processing_pipe(self, run_mode): '''Setup the pipe to process the events through the analyzers, Filters and Listeners ''' pipe_str_list = [] #Setup the queues event_q = ListenableQueue('Event input Q', self.event_not_analyzed_callback) registry.register_service(SERVICE_EVENT_Q, event_q) alert_analyzer_q = ListenableQueue('Alert Analyzer input Q', self.alert_not_analyzed_callback) registry.register_service(SERVICE_ALERT_ANALYZER_Q, alert_analyzer_q) alert_delivery_q = ListenableQueue('Alert Delivery Q', self.event_not_analyzed_callback) registry.register_service(SERVICE_ALERT_DELIVERY_Q, alert_delivery_q) # Get configuration cf_reg = registry.get_service(SERVICE_CONFIGURATION) # Setup the event analyzers # For each configured event analyzer # create using inQ as event_q and outQ as alert_analyzer_q count = 0 analyzer_names = [] # All (event and alert) for (analyzer, analyzer_name, section) in self.load_plugins(CONFIG_EVENT_ANALYZERS, run_mode): count += 1 pipe_str_list.append('EA:{0}'.format(analyzer_name)) get_logger().debug('adding event analyzer: {0}'.format(analyzer_name)) analyzer_names.append(analyzer_name) analyzer(analyzer_name, event_q, alert_analyzer_q, dict(cf_reg.items(section)), count) # For each configured alert analyzer # create using inQ as event_q, alert_analyzer_q and outQ as alert_filter_q count = 0 for (analyzer, analyzer_name, section) in self.load_plugins(CONFIG_ALERT_ANALYZERS, run_mode): count += 1 pipe_str_list.append('AA:{0}'.format(analyzer_name)) get_logger().debug('adding alert analyzer: {0}'.format(analyzer_name)) analyzer_names.append(analyzer_name) analyzer(analyzer_name, event_q, alert_analyzer_q, alert_delivery_q, dict(cf_reg.items(section)), count) # Configure Filters and Listeners alert_delivery = AlertDelivery(alert_delivery_q) registry.register_service(SERVICE_ALERT_DELIVERY, alert_delivery) # Add Filters for (aFilter, filter_name, section) in self.load_plugins(CONFIG_ALERT_FILTERS, run_mode): pipe_str_list.append('F:{0}'.format(filter_name)) get_logger().debug('adding filter: {0}'.format(filter_name)) alert_delivery.add_filter(aFilter(filter_name, dict(cf_reg.items(section)))) # Add Listeners for (listener, listener_name, section) in self.load_plugins(CONFIG_ALERT_LISTENERS, run_mode): pipe_str_list.append('L:{0}'.format(listener_name)) get_logger().debug('adding listener: {0}'.format(listener_name)) alert_delivery.add_listener(listener(listener_name, dict(cf_reg.items(section)))) # Resolve and validate delivery get_logger().debug('Resolve and validate alert delivery') alert_delivery.resolve_and_validate(analyzer_names) get_logger().debug('Completed loading of pipeline') return pipe_str_list
def __init__(self, configFile, logFile=None, msgLevel='info', restart=None, run_mode=TEAL_RUN_MODE_REALTIME, commit_alerts=True, data_only=False, historic_qry=None, daemon_mode=False, extra_log_id='', commit_checkpoints=None, use_time_occurred=False): """ Construct the ELA framework @param configFile: the TEAL configuration file. This is mandatory @param logFile: the full pathname of the logging file. If no logging file is specified, logging will be to stdout @param msgLevel: the lowest message level that will be logged. The default is informational messages and above @param restart: determines how teal will start processing events in realtime mode @param run_mode: how the monitor will be configured - realtime or historic @param commit_alerts: Certain run modes may want to not commit alerts if they are created because the user is debugging rules or trying to determine relationships @param data_only: do not initialize the processing pipeline Only the data that is configured for TEAL should be set up and used @param history_query: The query to use to get the events to do historic analysis on @param daemon_mode: run in daemon mode @param extra_log_id: additional string to add to all log entries for this instance of TEAL @param commit_checkpoints: control whether checkpoints should be committed or not @param use_time_occurred: Use time occurred instead of time logged for analysis """ os.umask(0o002) # Set the umask for files created so group users can access too log_str_list = [] self.data_only = data_only # Needed for shutdown # Initialize the registry. This will be used by subsequent initialization self.init_reg_service() # Register a shutdown service for users registry.register_service(SERVICE_SHUTDOWN_MODE, SHUTDOWN_MODE_DEFERRED) registry.register_service(SERVICE_SHUTDOWN, Shutdown(self.shutdown)) # Setup logging with temporary handler # Determine if prefix should be used and if so, which one if run_mode == TEAL_RUN_MODE_HISTORIC: if commit_alerts == True: extra_log_id = 'C' + extra_log_id else: extra_log_id = 'H' + extra_log_id self.init_temp_log_service(msgLevel, extra_log_id) get_logger().info("******* TEAL({0}) Startup initiated on {1}".format(id(self), datetime.now())) log_str_list.append('\tMessage level: {0}'.format(repr(registry.get_service(SERVICE_MSG_LEVEL)))) get_logger().info(log_str_list[-1]) try: # Initialize the mode self.init_run_mode(run_mode) log_str_list.append('\tRun mode: {0}'.format(registry.get_service(SERVICE_RUN_MODE))) get_logger().info(log_str_list[-1]) # Initialize the TEAL environment that can't be changed in the configuration file self.init_non_configurable_environment() # Read in the configuration files and prepare it for use config_str = self.init_cfg_service(configFile) log_str_list.append('\tConfiguration: {0}'.format(config_str)) get_logger().info(log_str_list[-1]) # Initialize the rest of the TEAL environment which can be changed in the configuration file self.init_configurable_environment(run_mode) # Setup logging to the actual log self.init_actual_log_service(logFile) log_str_list.append('\tLog file: {0}'.format(repr(registry.get_service(SERVICE_LOG_FILE)))) get_logger().info(log_str_list[-1]) if historic_qry is not None and len(historic_qry) > 0: log_str_list.append('\t Query: {0}'.format(historic_qry)) get_logger().info(log_str_list[-1]) if use_time_occurred == True: registry.register_service(SERVICE_TIME_MODE, 'time_occurred') log_str_list.append('\t Time mode = time occurred') get_logger().info(log_str_list[-1]) else: registry.register_service(SERVICE_TIME_MODE, 'time_logged') # Create the location code service self.init_location_service(run_mode) # Load the metadata self.init_metadata_service(run_mode) # Initialize the DB interface so persistence and monitor can use self.init_db_interface(daemon_mode, run_mode) # Initialize the persistence services self.init_persistence_services(commit_alerts) # Validate the historic query string registry.register_service(SERVICE_HISTORIC_QUERY, command.validate_qry_str(qry_info, historic_qry)) if (not data_only): # Initialize Checkpointing service self.init_checkpoint_service(commit_checkpoints, restart) # Build the TEAL event/alert processing pipeline pipe_str_list = self.init_processing_pipe(run_mode) if len(pipe_str_list) > 0: log_str_list.append('\tPipeline plug-ins: {0}'.format(', '.join(pipe_str_list))) get_logger().info(log_str_list[-1]) else: log_str_list.append('\tPipeline plug-ins: --None--') get_logger().info(log_str_list[-1]) # Record startup information # Note before monitor because monitor may start processing immediately if ((daemon_mode == True) and not data_only) or (commit_alerts == True and run_mode == TEAL_RUN_MODE_HISTORIC): # Create TEAL started alert create_teal_alert(TEAL_ALERT_ID_TEAL_STARTED, 'TEAL started', '; '.join(log_str_list), recommendation='None') if (not data_only): # Start the monitor. self.init_monitor(run_mode) get_logger().info('TEAL startup complete') except: get_logger().exception('TEAL startup failed') raise