def run(): """ Main loop. Run this TA forever """ # Sleep 5 seconds here for KV store ready time.sleep(5) kc.setup_signal_handler(None, None) kconfig = kc.create_kafka_config() task_configs = kconfig.get_task_configs() if not task_configs and not kconfig.is_dispatcher(): return loader = kcdl.KafkaDataLoaderManager(task_configs) kc.setup_signal_handler(loader, kconfig) monitor = fm.FileMonitor(_handle_file_changes(loader), _get_conf_files()) loader.add_timer(monitor.check_changes, time.time(), 10) orphan_checker = opm.OrphanProcessChecker(loader.stop) loader.add_timer(orphan_checker.check_orphan, time.time(), 1) for i in range(15): if loader.received_stop_signal(): return time.sleep(1) topic_interval = int(os.environ.get("kafka_topic_check_internval", 3600)) if kconfig.is_dispatcher(): topic_handler = _handle_topic_changes(loader, kconfig) loader.add_timer(topic_handler, time.time(), topic_interval) loader.start()
def create_conf_monitor(callback): files = (AWSCloudWatchConf.app_file, AWSCloudWatchConf.task_file_w_path, AWSCloudWatchConf.passwords_file_w_path, AWSCloudWatchConf.conf_file_w_path) return fm.FileMonitor(callback, files)
def run(collector_cls, settings, checkpoint_cls=None, config_cls=None, log_suffix=None): """ Main loop. Run this TA forever """ # This is for stdout flush utils.disable_stdout_buffer() # http://bugs.python.org/issue7980 time.strptime("2016-01-01", "%Y-%m-%d") tconfig = tc.create_ta_config(settings, config_cls or tc.TaConfig, log_suffix) stulog.set_log_level(tconfig.get_log_level()) task_configs = tconfig.get_task_configs() if not task_configs: stulog.logger.debug("No task and exiting...") return meta_config = tconfig.get_meta_config() if tconfig.is_shc_but_not_captain(): # In SHC env, only captain is able to collect data stulog.logger.debug("This search header is not captain, will exit.") return loader = dl.create_data_loader(meta_config) jobs = [ tdc.create_data_collector( loader, tconfig, meta_config, task_config, collector_cls, checkpoint_cls=checkpoint_cls or cpmgr.TACheckPointMgr, ) for task_config in task_configs ] # handle signal _setup_signal_handler(loader, settings["basic"]["title"]) # monitor files to reboot if settings["basic"].get("monitor_file"): monitor = fm.FileMonitor( _handle_file_changes(loader), _get_conf_files(settings["basic"]["monitor_file"]), ) loader.add_timer(monitor.check_changes, time.time(), 10) # add orphan process handling, which will check each 1 second orphan_checker = opm.OrphanProcessChecker(loader.tear_down) loader.add_timer(orphan_checker.check_orphan, time.time(), 1) loader.run(jobs)
def create_conf_monitor(callback, files): return fm.FileMonitor(callback, get_conf_files(files))
def __init__(self, server_uri, session_key, load_ucc_server_input_callback, monitor_files, ucc_server_input_filters, division_schema, dispatch_schema, get_forwarders_snapshot_callback, update_forwarders_snapshot_callback, get_dispatch_snapshot_callback, update_dispatch_snapshot_callback, ucc_server_id, get_log_level_callback=None): """ Init ucc server. @server_uri: local ucc server uri. @session_key: local ucc server session key. @load_ucc_server_input_callback: load ucc server input callback. @monitor_files: files to monitor. @ucc_server_input_filters: filters for ucc server input filtering. @division_schema_settings: schema settings for input settings division. @dispatch_schema_settings: schema settings for input settings dispatch. @get_forwarders_snapshot_callback: get forwarders snapshot callback. @update_forwarders_snapshot_callback: update forwarders snapshot callback. @get_dispatch_snapshot_callback: get dispatch snapshot callback. @update_dispatch_snapshot_callback: update dispatch snapshot callback. @ucc_server_id: ucc server id. @get_log_level_callback: get log level callback. """ assert server_uri, \ UCCServerException("server_uri is None.") assert session_key, \ UCCServerException("session_key is None.") assert load_ucc_server_input_callback, \ UCCServerException("load_ucc_server_input_callback is None.") assert monitor_files, \ UCCServerException("monitor_files is None.") assert ucc_server_input_filters, \ UCCServerException("ucc_server_input_filters is None.") assert division_schema, \ UCCServerException("division_schema_settings is None.") assert dispatch_schema, \ UCCServerException("dispatch_schema_settings is None.") assert get_forwarders_snapshot_callback, \ UCCServerException("get_forwarders_snapshot_callback is None.") assert update_forwarders_snapshot_callback, \ UCCServerException("update_forwarders_snapshot_callback is None.") assert get_dispatch_snapshot_callback, \ UCCServerException("get_dispatch_snapshot_callback is None.") assert update_dispatch_snapshot_callback, \ UCCServerException("update_dispatch_snapshot_callback is None.") assert ucc_server_id, \ UCCServerException("ucc_server_id is None.") self._load_ucc_server_input_callback = \ load_ucc_server_input_callback self._file_monitor = fm.FileMonitor(None, monitor_files) self._filter_manager = FilterManager(ucc_server_input_filters) self._discovery_engine = DiscoveryEngine(division_schema, ucc_server_id) self._dispatch_engine = DispatchEngine( server_uri, session_key, dispatch_schema, get_forwarders_snapshot_callback, update_forwarders_snapshot_callback, get_dispatch_snapshot_callback, update_dispatch_snapshot_callback, ucc_server_id) self._get_log_level = get_log_level_callback self._log_level = self.DEFAULT_LOG_LEVEL self._init = True self._stopped = False
def create_conf_monitor(callback): files = (AWSS3Conf.app_file, AWSS3Conf.task_file_w_path, AWSS3Conf.passwords_file_w_path, AWSS3Conf.log_info_w_path) return fm.FileMonitor(callback, files)
def run(collector_cls, settings, checkpoint_cls=None, config_cls=None, log_suffix=None, single_instance=True, cc_json_file=None): """ Main loop. Run this TA forever """ ta_short_name = settings["meta"]["name"].lower() # This is for stdout flush utils.disable_stdout_buffer() # http://bugs.python.org/issue7980 time.strptime('2016-01-01', '%Y-%m-%d') loader = dl.create_data_loader() # handle signal _setup_signal_handler(loader, ta_short_name) # monitor files to reboot try: monitor = fm.FileMonitor(_handle_file_changes(loader), _get_conf_files(settings)) loader.add_timer(monitor.check_changes, time.time(), 10) except Exception: stulog.logger.exception("Fail to add files for monitoring") # add orphan process handling, which will check each 1 second orphan_checker = opm.OrphanProcessChecker(loader.tear_down) loader.add_timer(orphan_checker.check_orphan, time.time(), 1) tconfig = tc.create_ta_config(settings, config_cls or tc.TaConfig, log_suffix, single_instance=single_instance) task_configs = tconfig.get_task_configs() if not task_configs: stulog.logger.debug("No task and exiting...") return meta_config = tconfig.get_meta_config() meta_config["cc_json_file"] = cc_json_file if tconfig.is_shc_member(): # Don't support SHC env stulog.logger.error( "This host is in search head cluster environment , " "will exit.") return # In this case, use file for checkpoint if _is_checkpoint_dir_length_exceed_limit(tconfig, meta_config["checkpoint_dir"]): stulog.logger.error( "The length of the checkpoint directory path: '{}' " "is too long. The max length we support is {}", meta_config["checkpoint_dir"], __CHECKPOINT_DIR_MAX_LEN__) return jobs = [ tdc.create_data_collector(loader, tconfig, meta_config, task_config, collector_cls, checkpoint_cls=checkpoint_cls or cpmgr.TACheckPointMgr) for task_config in task_configs ] loader.run(jobs)