def _create_state_store(self, meta_config, storage_type, app_name): stulog.logger.debug('Got checkpoint storage type=%s', storage_type) if storage_type == c.checkpoint_kv_storage: collection_name = self._get_collection_name() stulog.logger.debug( 'Creating KV state store, collection name=%s', collection_name ) return ss.get_state_store( meta_config, appname=app_name, collection_name=collection_name, use_kv_store=True ) use_cache_file = self._use_cache_file() max_cache_seconds = \ self._get_max_cache_seconds() if use_cache_file else None stulog.logger.debug( 'Creating file state store, use_cache_file=%s, max_cache_seconds=%s', use_cache_file, max_cache_seconds ) return ss.get_state_store( meta_config, app_name, use_cached_store=use_cache_file )
def create_state_store(meta_configs, appname, server_info): if _use_kv_store(server_info): store = ss.get_state_store( meta_configs, appname, appname.lower(), use_kv_store=True) logger.info("Use KVStore to do ckpt") else: store = ss.get_state_store(meta_configs, appname) return store
def create_state_store(meta_configs, appname, server_info): if _use_kv_store(server_info): store = ss.get_state_store(meta_configs, appname, appname.lower(), use_kv_store=True) logger.info("Use KVStore to do ckpt") else: store = ss.get_state_store(meta_configs, appname) return store
def __init__(self, config, stream): self._key = None self._ckpt = None if scutil.is_true(config.get(tac.use_kv_store)): self._store = ss.get_state_store( config, config[tac.app_name], collection_name=aclc.cloudwatch_logs_log_ns, use_kv_store=True) else: self._store = ss.get_state_store(config, config[tac.app_name]) self._pop_ckpt(config, stream)
def create_state_store(config): store = ss.get_state_store(config, config[tac.app_name], collection_name="aws_s3_" + config[asc.data_input], use_kv_store=config.get(tac.use_kv_store)) return store
def __init__(self, task_config): """ :task_config: dict object { "kafka_topic": [[<topic_info>], ...] # <topic_info>: [topic_name, partition, offset, index] or # [topic_name, offset, index] } """ self._idx_tbl = self._build_index_lookup_tbl(task_config) logger.debug("index table=%s", self._idx_tbl) self._task_config = task_config max_bytes = self._task_config.get(c.fetch_message_max_bytes) try: max_bytes = int(max_bytes) except ValueError: max_bytes = 1024 * 1024 self._task_config[c.fetch_message_max_bytes] = max_bytes self._running = False self._stopped = False self._store = ss.get_state_store(task_config, task_config[c.appname], use_kv_store=task_config.get( c.use_kv_store, False)) self._brokers = b64encode(self._task_config[c.kafka_brokers])
def __init__(self, config): self._config = config self._state_store = ss.get_state_store( config, config[tac.app_name], collection_name="aws_config_rule", use_kv_store=config.get(tac.use_kv_store))
def __init__(self, meta_config, task_config): self._task_config = task_config self._store = ss.get_state_store(meta_config, task_config[c.appname], use_kv_store=self._use_kv_store()) if isinstance(self._store, ss.CachedFileStateStore): stulog.logger.info("State store type is CachedFileStateStore")
def _delete_data_inputs(self, clusters, global_settings, stanzas_to_be_deleted): """ :return: false if something goes wrong, true if everything is good """ has_exception = False global_settings = global_settings[c.global_settings] store = ss.get_state_store(self._meta_configs, self._appname, use_kv_store=global_settings.get( c.use_kv_store, False)) conf_mgr = cm.ConfManager(self._meta_configs[c.server_uri], self._meta_configs[c.session_key], app_name=self._appname) kafka_clients = {} for stanza in stanzas_to_be_deleted: try: self._delete_ckpts(stanza, clusters, kafka_clients, store) conf_mgr.set_appname(stanza[c.appname]) conf_mgr.delete_data_input(c.kafka_mod, stanza[c.name]) except req.ConfNotExistsException: pass except Exception: has_exception = True logger.error("Failed to remove data input=%s, error=%s", stanza[c.name], traceback.format_exc()) continue if not has_exception: conf_mgr.set_appname(self._appname) conf_mgr.delete_stanza(c.myta_cred_conf, stanza[c.kafka_cluster]) return not has_exception
def __init__(self, config): self._config = config self._ckpt = ss.get_state_store( config, c.splunk_app_iwork, collection_name="iwork", use_kv_store=True) self._lock = threading.Lock() self._cached = self._ckpt.get_all_states() if self._cached is None: self._cached = {}
def _handle_exit(signum, frame): if kconfig is not None: logger.info("Remove file lock") store = ss.get_state_store(kconfig._meta_configs, kconfig._appname) store.delete_state(FileLock.lock_key) logger.info("%s TA is going to exit...", c.ta_name) if data_loader is not None: data_loader.stop()
def __init__(self, config): self._config = config key = "{stanza_name}|{metric_name}".format( stanza_name=config[ggc.name], metric_name=config[gmc.google_metric]) self._key = base64.b64encode(key) self._store = sss.get_state_store( config, config[ggc.appname], collection_name=self._key, use_kv_store=config.get(ggc.use_kv_store)) self._state = self._get_state()
def __init__(self, config): self._config = config key = "{stanza_name}|{metric_name}".format( stanza_name=config[ggc.name], metric_name=config[gmc.google_metrics]) self._key = base64.b64encode(key.encode('utf-8')) self._store = sss.get_state_store( config, config[ggc.appname], collection_name=ggc.google_cloud_monitor, use_kv_store=config.get(ggc.use_kv_store)) self._state = self._get_state()
def __init__(self, config, client, account_id): self._cli = client self._state_store = state_store.get_state_store( config, config[tac.app_name], collection_name="aws_inspector", use_kv_store=config.get(tac.use_kv_store)) self._config = config self._finding_arns = [] self._last_check_at = 0 region = config[tac.region] data_input = config[tac.datainput] self._source = '{}:{}:inspector:finding'.format(account_id, region) self._source_type = self._config.get(tac.sourcetype, 'aws::inspector') self._state_key = base64.b64encode("findings_{}_{}".format( data_input, region))
def _cleanup_checkpoints(tasks, config): store = state_store.get_state_store(config, config[tac.app_name], collection_name="aws_inspector", use_kv_store=config.get( tac.use_kv_store)) previous_ckpts = None internals = store.get_state("internals") if internals: previous_ckpts = internals.get('checkpoints') else: internals = dict() valid_ckpts = set([make_assessment_runs_ckpt_key(task) for task in tasks] + [make_findings_ckpt_key(task) for task in tasks]) if previous_ckpts: previous_ckpts = set(previous_ckpts) for ckpt in previous_ckpts: if ckpt not in valid_ckpts: store.delete_state(ckpt) internals['checkpoints'] = list(valid_ckpts) store.update_state('internals', internals)
def __init__(self, meta_configs, appname): self._locked = False random.seed(time.time()) time.sleep(random.uniform(1, 3)) self._store = ss.get_state_store(meta_configs, appname) while 1: state = self._store.get_state(self.lock_key) if not state or time.time() - state["time"] > 300: state = { "pid": multiprocessing.current_process().ident, "time": time.time(), } # grap it self._store.update_state(self.lock_key, state) self._locked = True logger.info("Grapped ckpt.lock") break else: logger.debug("Ckpt lock is held by other mod process") time.sleep(1)
def insert_all_emps(config, charts): store = ss.get_state_store(config, c.splunk_app_iwork) state = store.get_state("batch_insert_done") if state: logger.info("batch insert already done") return ckpt = EmployeeDetailLookup(config) charts = [{"_key": chart["name"], "value": chart} for chart in charts] start, end, n = 0, 1000, len(charts) while 1: if end > n: end = n ckpt.update_in_batch(charts[start:end]) if end == n: break start = end end += 1000 store.update_state( "batch_insert_done", {"done": True, "timestamp": time.time()}) logger.info("batch insert done")
def clean_up_ckpt_for_deleted_data_input(tasks): if not tasks: return now_ckpts = {} for task in tasks: if task[tac.datainput] not in now_ckpts: now_ckpts[task[tac.datainput]] = [] now_ckpts[task[tac.datainput]].append(get_ckpt_key(task)) store = ss.get_state_store( tasks[0], tasks[0][tac.app_name], collection_name="aws_kinesis", use_kv_store=tasks[0][tac.use_kv_store]) previous_ckpts = store.get_state("data_input_ckpts") if previous_ckpts: for datainput, ckpt_keys in previous_ckpts.iteritems(): if datainput not in now_ckpts: logger.info( "Detect datainput=%s has been deleted, remove its ckpts", datainput) for key in ckpt_keys: store.delete_state(key) store.update_state("data_input_ckpts", now_ckpts)
def __init__(self, config): self._config = config self._state_store = ss.get_state_store( config, config[tac.app_name], collection_name="aws_kinesis", use_kv_store=config.get(tac.use_kv_store)) self._key = get_ckpt_key(config)
def __init__(self, config): self._config = config self._ckpt = ss.get_state_store(config, c.splunk_app_iwork)
def __init__(self, meta_config, task_config): self._task_config = task_config self._store = ss.get_state_store(meta_config, task_config[c.appname], use_kv_store=self._use_kv_store())