def main(): """ Main entry point """ logging.setup_root_logger(app_name='splunk_ta_aws', modular_name='inspector') tacommon.main(print_scheme, run)
def main(): """ Main entry point """ # description is single-instance, output to one log file logging.setup_root_logger(app_name=tac.splunk_ta_aws, modular_name=adcon.mod_name) tacommon.main(print_scheme, run)
def main(): """ Main entry point """ # cloudwatch is single instance, output to one log file logging.setup_root_logger(app_name=tac.splunk_ta_aws, modular_name=acc.mod_name) tacommon.main(print_scheme, run)
def main(): """ Main entry point """ httplib2shim.patch() logging.setup_root_logger('splunk_ta_google-cloudplatform', 'google_cloud_monitoring') tacommon.main(print_scheme, run)
def _load_data(tear_down_q, task_configs, log_ctx): try: if log_ctx: logging.RootHandler.teardown() logging.setup_root_logger(**log_ctx) do_load_data(tear_down_q, task_configs) except Exception: logger.error("Failed to load data, error=%s", traceback.format_exc())
def _get_tasks(self): if not self.stanza_configs: return None conf_mgr = cm.ConfManager(self.metas[tac.server_uri], self.metas[tac.session_key]) logging_settings = conf_mgr.get_stanza(self.log_info, asc.log_stanza, do_reload=False) # set the log level read from conf for our logger set_log_level(logging_settings[asc.log_level]) # entry point for this stanza task, setup root logger here # Generic S3 can be configured to be single-instance # or multiple instance # through env variable stanza_name = '' try: if len(self.stanza_configs) == 1: # only one stanza exists stanza_name = self.stanza_configs[0].get('name', '') except Exception: logger.exception('Failed to get stanza name!') stanza_name = extract_datainput_name(stanza_name) logging.setup_root_logger(app_name=tac.splunk_ta_aws, modular_name=asc.mod_name, stanza_name=stanza_name) proxy_info = tpc.get_proxy_info(self.metas[tac.session_key]) tasks, creds = [], {} for stanza in self.stanza_configs: task = {} task.update(stanza) task.update(self.metas) task.update(proxy_info) task[tac.log_level] = logging_settings[asc.log_level] task[tac.interval] = tacommon.get_interval(task, 3600) task[tac.polling_interval] = task[tac.interval] task[asc.max_retries] = int(task.get(asc.max_retries, 3)) task[asc.prefix] = task.get(asc.key_name) task[asc.last_modified] = self._get_last_modified_time( task[asc.initial_scan_datetime]) task[ asc. terminal_scan_datetime] = self._convert_terminal_scan_datetime( task.get(asc.terminal_scan_datetime)) input_name = scutil.extract_datainput_name(task[tac.name]) task[asc.data_input] = input_name task[tac.sourcetype] = task.get(tac.sourcetype, "aws:s3") task[asc.bucket_name] = str(task[asc.bucket_name]) if not task.get(asc.whitelist): task[asc.whitelist] = s3common.sourcetype_to_keyname_regex.get( task[tac.sourcetype]) tasks.append(task) logger.info("Done with configuration read from conf.") s3ckpt.handle_ckpts(tasks) return tasks
def main(): util.remove_http_proxy_env_vars() logger.setLevel(logging.INFO) logging.setup_root_logger(app_name=splunk_ta_aws, modular_name='sns_alert_modular') AwsSnsModularAlert(logger, parse()).run()