def getBaseAppName(): """Base App name, which this script belongs to. """ appName = util.get_appname_from_path(__file__) if appName is None: raise Exception('Cannot get app name from file: %s' % __file__) return appName
def __init__(self, namespace=None, default_level=logging.INFO): self._loggers = {} self._default_level = default_level if namespace is None: namespace = cutil.get_appname_from_path(op.abspath(__file__)) if namespace: namespace = namespace.lower() self._namespace = namespace
def __init__(self, conf_file, splunkd_uri, session_key, appname=None): if appname is None: appname = utils.get_appname_from_path(op.abspath(__file__)) self._conf_file = conf.conf_file2name(conf_file) self._conf_mgr = conf.ConfManager(splunkd_uri, session_key, app_name=appname) self._cred_mgr = cred.CredentialManager( splunkd_uri, session_key, app=appname, owner="nobody", realm=appname ) self._keys = None
def __init__(self, conf_file, splunkd_uri, session_key, appname=None): if appname is None: appname = utils.get_appname_from_path(op.abspath(__file__)) self._conf_file = conf.conf_file2name(conf_file) self._conf_mgr = conf.ConfManager(splunkd_uri, session_key, app_name=appname) self._cred_mgr = cred.CredentialManager( splunkd_uri, session_key, app=appname, owner="nobody", realm=appname) self._keys = None
def __init__(self, namespace=None, default_level=logging.INFO): warnings.warn( "This class is deprecated. " "Please see https://github.com/splunk/addonfactory-ta-library-python/issues/38", DeprecationWarning, stacklevel=2, ) self._loggers = {} self._default_level = default_level if namespace is None: namespace = cutil.get_appname_from_path(op.abspath(__file__)) if namespace: namespace = namespace.lower() self._namespace = namespace
import logging from inspect import ismethod from os import path as op from splunk import ResourceNotFound, RESTException, admin, entity, rest from splunktalib.common import util as sc_util from splunktalib.rest import splunkd_request import splunktaucclib.common.log as stulog from splunktaucclib.rest_handler.cred_mgmt import CredMgmt from splunktaucclib.rest_handler.error_ctl import RestHandlerError as RH_Err from splunktaucclib.rest_handler.util import makeConfItem __all__ = ["user_caps", "BaseRestHandler", "BaseModel", "ResourceHandler"] APP_NAME = sc_util.get_appname_from_path(op.abspath(__file__)) def get_entities(endpoint, session_key, user, app, get_args): url = rest.makeSplunkdUri() + "servicesNS/" + user + "/" + app + "/" + endpoint try: response, content = rest.simpleRequest( url, sessionKey=session_key, method="GET", getargs=get_args, raiseAllErrors=True, ) res = json.loads(content) if "entry" in res: return {entry["name"]: entry["content"] for entry in res["entry"]}
class TaConfig(object): _current_hostname = socket.gethostname() _appname = util.get_appname_from_path(op.abspath(__file__)) def __init__(self, meta_config, client_schema): self._meta_config = meta_config self._task_configs = [] self._client_schema = client_schema self._server_info = sc.ServerInfo(meta_config[c.server_uri], meta_config[c.session_key]) self._all_conf_contents = {} self._get_division_settings = {} self._load_task_configs() self._log_level = self._get_log_level() def is_shc_but_not_captain(self): return self._server_info.is_shc_member() and \ not self._server_info.is_captain() def get_meta_config(self): return self._meta_config def get_task_configs(self): return self._task_configs def get_all_conf_contents(self): return self._all_conf_contents def get_divide_settings(self): return self._divide_settings def get_log_level(self): return self._log_level def _load_task_configs(self): config_handler = th.ConfigSchemaHandler(self._meta_config, self._client_schema) self._all_conf_contents = config_handler.get_all_conf_contents() self._divide_settings = config_handler.get_division_settings() assert self._divide_settings, "division is empty" self._generate_task_configs(self._all_conf_contents, self._divide_settings) def _generate_task_configs(self, all_conf_contents, divide_settings): all_task_configs = list() for division_endpoint, divide_setting in divide_settings.iteritems(): task_configs = self._get_task_configs(all_conf_contents, division_endpoint, divide_setting) all_task_configs = all_task_configs + task_configs for task_config in all_task_configs: task_config[c.use_kv_store] = task_config.get( c.use_kv_store, False) task_config[c.appname] = TaConfig._appname task_config[c.index] = task_config.get(c.index, "default") if self._server_info.is_shc_member(): task_config[c.use_kv_store] = True stulog.logger.debug("Task info: %s", task_config) self.process_task_configs(all_task_configs) #interval for task_config in all_task_configs: assert task_config.get(c.interval), "task config has no interval " \ "field" task_config[c.interval] = int(task_config[c.interval]) self._task_configs = all_task_configs stulog.logger.info("Totally generated {} task configs".format( len(self._task_configs))) # Override this method if some transforms or validations needs to be done # before task_configs is exposed def process_task_configs(self, task_configs): pass def _get_log_level(self): if not self._client_schema["basic"].get("config_meta"): return "INFO" if not self._client_schema["basic"]["config_meta"].get( "logging_setting"): return "INFO" paths = self._client_schema["basic"]["config_meta"][ "logging_setting"].split(">") global_setting = self.get_all_conf_contents()[paths[0].strip()] if not global_setting: return "INFO" log_level = self.get_all_conf_contents() for i in xrange(len(paths)): log_level = log_level[paths[i].strip()] if not log_level: return "INFO" else: return log_level def _get_task_configs(self, all_conf_contents, division_endpoint, divide_setting): task_configs = list() orig_task_configs = all_conf_contents.get(division_endpoint) for orig_task_config_stanza, orig_task_config_contents in \ orig_task_configs.iteritems(): if util.is_true(orig_task_config_contents.get(c.disabled, False)): stulog.logger.debug("Stanza %s is disabled", orig_task_config_contents) continue orig_task_config_contents[c.divide_endpoint] = division_endpoint divide_tasks = self._divide_task_config(orig_task_config_stanza, orig_task_config_contents, divide_setting, all_conf_contents) task_configs = task_configs + divide_tasks return task_configs def _divide_task_config(self, task_config_stanza, task_config_contents, divide_setting, all_conf_contents): task_config = dict() task_config[c.stanza_name] = [ self._get_stanza_name(task_config_stanza) ] multi = 1 for key, value in task_config_contents.iteritems(): task_config[key] = [value] for divide_rule in divide_setting: if divide_rule.metric() == key: if divide_rule.type() == th.ConfigSchemaHandler.TYPE_MULTI: task_config[key] = value.split(divide_rule.separator()) multi = multi * len(task_config[key]) scale_task_config = {} times = 0 for key, value in task_config.iteritems(): count = multi / len(value) scale_task_config[key] = value * count if len(value) == 1: continue times += 1 if times % 2 == 0: scale_task_config[key].sort() return self._build_task_configs(scale_task_config, all_conf_contents, divide_setting, multi) def _build_task_configs(self, raw_task_config, all_conf_contents, divide_setting, length): task_configs = list() # split task configs for i in xrange(length): task_config = dict() # handle endpoint config for raw_key, raw_value in raw_task_config.iteritems(): value = raw_value[i] task_config[raw_key] = value # handle divide settings task_config[c.divide_key] = list() for divide_rule in divide_setting: task_config[c.divide_key].append(divide_rule.metric()) task_config[c.divide_key].sort() task_configs.append(task_config) return task_configs def _get_stanza_name(self, input_item): if isinstance(input_item, basestring): in_name = input_item else: in_name = input_item[c.name] pos = in_name.find("://") if pos > 0: in_name = in_name[pos + 3:] return in_name
def get_appname_from_path(absolute_path): return scu.get_appname_from_path(absolute_path)
class ConfigSchemaHandler(object): _app_name = util.get_appname_from_path(op.abspath(__file__)) # Division schema keys. TYPE = "type" TYPE_SINGLE = "single" TYPE_MULTI = "multi" REFER = "refer" SEPARATOR = "separator" def __init__(self, meta_configs, client_schema): self._config = sc.Config(splunkd_uri=meta_configs[c.server_uri], session_key=meta_configs[c.session_key], schema=json.dumps(client_schema[ c.config]), user="******", app=ConfigSchemaHandler._app_name) self._client_schema = client_schema self._all_conf_contents = {} self._load_conf_contents() self._division_settings = self._divide_settings() def get_endpoints(self): return self._config.get_endpoints() def get_all_conf_contents(self): return self._all_conf_contents def get_single_conf_contents(self, endpoint): return self._all_conf_contents.get(endpoint) def get_division_settings(self): return self._division_settings def _divide_settings(self): division_schema = self._client_schema[c.division] division_settings = dict() for division_endpoint, division_contents in division_schema.iteritems(): division_settings[division_endpoint] = self._process_division( division_endpoint, division_contents) return division_settings def _load_conf_contents(self): self._all_conf_contents = self._config.load() def _process_division(self, division_endpoint, division_contents): division_metrics = [] assert isinstance(division_contents, dict) for division_key, division_value in division_contents.iteritems(): try: assert self.TYPE in division_value and \ division_value[self.TYPE] in \ [self.TYPE_SINGLE, self.TYPE_MULTI] and \ self.SEPARATOR in division_value if \ division_value[self.TYPE] == self.TYPE_MULTI else True except Exception: raise Exception("Invalid division schema") division_metrics.append(DivisionRule(division_endpoint, division_key, division_value[self.TYPE], division_value.get( self.SEPARATOR, ), division_value.get( self.REFER, ))) return division_metrics
class TaConfig(object): _current_hostname = socket.gethostname() _appname = util.get_appname_from_path(op.abspath(__file__)) def __init__(self, meta_config, client_schema, log_suffix=None, stanza_name=None, input_type=None, single_instance=True): self._meta_config = meta_config self._stanza_name = stanza_name self._input_type = input_type self._log_suffix = log_suffix self._single_instance = single_instance self._task_configs = [] self._client_schema = client_schema self._server_info = sc.ServerInfo(meta_config[c.server_uri], meta_config[c.session_key]) self._all_conf_contents = {} self._get_division_settings = {} self.set_logging() self._load_task_configs() def is_shc_member(self): return self._server_info.is_shc_member() def is_search_head(self): return self._server_info.is_search_head() def is_single_instance(self): return self._single_instance def get_meta_config(self): return self._meta_config def get_task_configs(self): return self._task_configs def get_all_conf_contents(self): if self._all_conf_contents: return self._all_conf_contents.get(c.inputs), \ self._all_conf_contents.get(c.all_configs), \ self._all_conf_contents.get(c.global_settings) inputs, configs, global_settings = th.get_all_conf_contents( self._meta_config[c.server_uri], self._meta_config[c.session_key], self._client_schema, self._input_type) self._all_conf_contents[c.inputs] = inputs self._all_conf_contents[c.all_configs] = configs self._all_conf_contents[c.global_settings] = global_settings return inputs, configs, global_settings def set_logging(self): # The default logger name is "cloud_connect_engine" if self._stanza_name and self._log_suffix: logger_name = self._log_suffix + "_" + th.format_name_for_file( self._stanza_name) stulog.reset_logger(logger_name) inputs, configs, global_settings = self.get_all_conf_contents() log_level = "INFO" for item in global_settings.get("settings"): if item.get(c.name) == "logging" and item.get("loglevel"): log_level = item["loglevel"] break stulog.set_log_level(log_level) stulog.logger.info("Set log_level={}".format(log_level)) stulog.logger.info("Start {} task".format(self._stanza_name)) def get_input_type(self): return self._input_type def _get_checkpoint_storage_type(self, config): cs_type = config.get(c.checkpoint_storage_type) stulog.logger.debug("Checkpoint storage type=%s", cs_type) cs_type = cs_type.strip() if cs_type else c.checkpoint_auto # Allow user configure 'auto' and 'file' only. if cs_type not in (c.checkpoint_auto, c.checkpoint_file): stulog.logger.warning( "Checkpoint storage type='%s' is invalid, change it to '%s'", cs_type, c.checkpoint_auto ) cs_type = c.checkpoint_auto if cs_type == c.checkpoint_auto and self.is_search_head(): stulog.logger.info( "Checkpoint storage type is '%s' and instance is " "search head, set checkpoint storage type to '%s'.", c.checkpoint_auto, c.checkpoint_kv_storage ) cs_type = c.checkpoint_kv_storage return cs_type def _load_task_configs(self): inputs, configs, global_settings = self.get_all_conf_contents() if self._input_type: inputs = inputs.get(self._input_type) if not self._single_instance: inputs = [input for input in inputs if input[c.name] == self._stanza_name] all_task_configs = [] for input in inputs: task_config = {} task_config.update(input) task_config[c.configs] = configs task_config[c.settings] = \ {item[c.name]: item for item in global_settings["settings"]} if self.is_single_instance(): collection_interval = "collection_interval" task_config[c.interval] = task_config.get(collection_interval) task_config[c.interval] = int(task_config[c.interval]) if task_config[c.interval] <= 0: raise ValueError( "The interval value {} is invalid." " It should be a positive integer".format( task_config[c.interval])) task_config[c.checkpoint_storage_type] = \ self._get_checkpoint_storage_type(task_config) task_config[c.appname] = TaConfig._appname task_config[c.mod_input_name] = self._input_type task_config[c.stanza_name] = task_config[c.name] all_task_configs.append(task_config) self._task_configs = all_task_configs # Override this method if some transforms or validations needs to be done # before task_configs is exposed def process_task_configs(self, task_configs): pass