def __init__(self, config): """ :task_config: dict { bucket_name: xxx, host: xxx, prefix: xxx, after: xxx, key_character_set: xxx, secret_key: xxx, checkpoint_dir: xxx, server_uri: xxx, session_key: xxx, use_kv_store: xxx, is_secure: xxx, proxy_hostname: xxx, proxy_port: xxx, proxy_username: xxx, proxy_password: xxx, data_loader: xxx, } """ self._config = config self._lock = threading.Lock() self._config[asc.bucket_name] = str(self._config[asc.bucket_name]) self._stopped = False self._credentials_service = tacommon.create_credentials_service( self._config[tac.server_uri], self._config[tac.session_key]) # Set proxy before getting credential by boto3 tacommon.set_proxy_env(self._config)
def _gather_results(metric_name): tacommon.set_proxy_env(config) try: server_uri = config[tac.server_uri] session_key = config[tac.session_key] aws_account = config[tac.aws_account] aws_iam_role = config[tac.aws_iam_role] credentials = tacommon.load_credentials_from_cache( server_uri, session_key, aws_account, aws_iam_role) client = boto3.client( "cloudwatch", region_name=config[tac.region], aws_access_key_id=credentials.aws_access_key_id, aws_secret_access_key=credentials.aws_secret_access_key, aws_session_token=credentials.aws_session_token) (metrics, filtered) = list_metrics_by_metric_name( client, config[acc.metric_namespace], metric_name, dimension_regex_filters) if scutil.is_true(os.environ.get("cloudwatch_filter", "true")): metrics = filter_invalid_dimensions( config[acc.metric_namespace], metrics, config) q.put((metrics, filtered)) except Exception: logger.exception("Failed to list metric.", datainput=config[tac.datainput], metric_name=metric_name, namespace=config[acc.metric_namespace], region=config[tac.region])
def get_tasks(self): conf_mgr = cm.ConfManager(self.metas[tac.server_uri], self.metas[tac.session_key]) tasks = self._get_config_rule_tasks(conf_mgr) settings = conf_mgr.all_stanzas_as_dicts(self.conf_file, do_reload=False) proxy_info = tpc.get_proxy_info(self.metas[tac.session_key]) # set proxy here for validating credentials tacommon.set_proxy_env(proxy_info) set_log_level(settings[tac.log_stanza][tac.log_level]) valid_tasks = [] for task in tasks: try: # validate credentials tacommon.get_service_client(task, tac.config) task[tac.log_level] = settings[tac.log_stanza][tac.log_level] task.update(settings[tac.global_settings]) task.update(proxy_info) valid_tasks.append(task) except Exception as e: input_name = scutil.extract_datainput_name(task[tac.name]) logger.exception( 'Failed to load credentials, ignore this input.', datainput=input_name) return tacommon.handle_hec(valid_tasks, "aws_config_rule")
def handleList(self, confInfo): # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) for queue_url in self._list_queues(): confInfo[queue_url].append('label', queue_url.split('/')[-1])
def _get_region_host(self, session_key, payload): config = pc.get_proxy_info(session_key) tacommon.set_proxy_env(config) credentials_service = tacommon.create_credentials_service( get_splunkd_uri(), session_key) credentials = credentials_service.load( payload[tac.aws_account], payload[tac.aws_iam_role], ) config[tac.key_id] = credentials.aws_access_key_id config[tac.secret_key] = credentials.aws_secret_access_key config['aws_session_token'] = credentials.aws_session_token config[asc.bucket_name] = payload[asc.bucket_name] config[asc.host_name] = tac.CATEGORY_HOST_NAME_MAP[ credentials.category] if config[asc.host_name] == asc.default_host: region = get_region_for_bucketname(config) with open(ENDPOINTS_PATH, 'r') as endpoints_file: endpoints = json.load(endpoints_file) host_name = EndpointResolver(endpoints).construct_endpoint( 's3', region).get('hostname', asc.default_host) else: pattern = r's3[.-]([\w-]+)\.amazonaws.com' m = re.search(pattern, config[asc.host_name]) region = m.group(1) if m else 'us-east-1' host_name = config[asc.host_name] return (region, host_name)
def handleList(self, confInfo): # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) queue_names = map( lambda queue_url: queue_url.split('/')[-1], self._list_queues(), ) for queue in queue_names: confInfo[queue].append('sqs_queue', queue)
def validate_keys(session_key, **account_info): # Set proxy proxy = pc.get_proxy_info(session_key) set_proxy_env(proxy) try: _get_caller_identity(account_info) except ClientError as e: if e.response['Error']['Code'] == 'InvalidClientTokenId': return False return True
def handleList(self, conf_info): logger.info("start listing config rules") for required in self.valid_params: if not self.callerArgs or not self.callerArgs.get(required): logger.error('Missing "%s"', required) raise Exception('Missing "{}"'.format(required)) # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) self._list_rules(conf_info) logger.info("end of listing config rules")
def _prepare(self, splunkd_uri, session_key, aws_account, region, topic_name): # Set proxy proxy = pc.get_proxy_info(session_key) tacommon.set_proxy_env(proxy) if self._aws_account != aws_account: self._aws_account = aws_account self._client = get_aws_sns_client(splunkd_uri, session_key, aws_account, region) self._topic_name = topic_name self._topic_arn = get_aws_sns_topic_arn(self._client, topic_name) if self._aws_account != aws_account or self._topic_name != topic_name: self._topic_name = topic_name self._topic_arn = get_aws_sns_topic_arn(self._client, topic_name)
def __init__(self, config): """ :config: a list of dict object { "polling_interval": 60, "sourcetype": yyy, "index": zzz, "region": xxx, "key_id": aws key id, "secret_key": aws secret key "period": 60, "metric_namespace": namespace, "statistics": statistics "metric_configs": [ { "Dimensions": [{"Value": "i-8b9eaa2f", "Name": "InstanceId"}], "MetricName": metric_name, }, ], } """ tacommon.set_proxy_env(config) self._config = config self._stopped = False self._lock = threading.Lock() self._ckpt = ackpt.CloudWatchCheckpointer(config) self._source = "{}:{}".format(config[tac.region], config[acc.metric_namespace]) self._max_api_saver_count = \ self._config[acc.max_api_saver_time] / self._config[acc.period] self._client = CloudWatchClient(config) self._supplemental_data = { acc.period: config[acc.period], tac.account_id: self._client.get_account_id(), } self._instances_data_list = [] self._metadata_name_list = [ 'ImageId', 'InstanceId', 'InstanceType', 'PrivateIpAddress', 'PublicIpAddress', 'PrivateDnsName', 'PublicDnsName', 'Architecture' ]
def handleList(self, confInfo): # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) try: attrs = query_queue_attributes( self.getSessionKey(), self.callerArgs.data['aws_account'][0], self.callerArgs.data.get('aws_iam_role', [None])[0], self.callerArgs.data['aws_region'][0], self.callerArgs.data['sqs_queue_url'][0], ) except ClientError as exc: RestHandlerError.ctl(400, msgx=exc) return confInfo['Attributes']['VisibilityTimeout'] = attrs.visibility_timeout confInfo['Attributes']['RedrivePolicy'] = attrs.redrive_policy
def handleList(self, confInfo): aws_account = None aws_account_category = tac.RegionCategory.COMMERCIAL if self.callerArgs['aws_account'] is not None: aws_account = self.callerArgs['aws_account'][0] aws_iam_role = None if self.callerArgs.get('aws_iam_role') is not None: aws_iam_role = self.callerArgs['aws_iam_role'][0] if not aws_account: confInfo['bucket_name'].append('bucket_name', []) return # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) cred_service = tacommon.create_credentials_service( get_splunkd_uri(), self.getSessionKey()) try: cred = cred_service.load(aws_account, aws_iam_role) aws_account_category = cred.category except ClientError as err: raise RestError(400, str(err.message) + '. Please make sure the AWS Account and Assume Role are correct.') host_name = tac.CATEGORY_HOST_NAME_MAP[aws_account_category] connection = connect_s3( cred.aws_access_key_id, cred.aws_secret_access_key, self.getSessionKey(), host_name, security_token=cred.aws_session_token, ) rs = timed(25, all_buckets, [], (connection,)) rlist = [] for r in rs: rlist.append(r.name) for bucket in rlist: confInfo[bucket].append('bucket_name', bucket) confInfo[bucket].append('host_name', host_name)
def _prepare(self): if self.input_item['aws_account'] \ not in self.configs[Configs.ACCOUNTS]: raise Exception('AWS account not found for datainput') # Set Logging set_log_level( self.configs[Configs.SETTINGS_LOGGING]['logging']['level']) logger.debug('Running Started', datainput=self.input_name) # Set Proxy tacommon.set_proxy_env(self.configs[Configs.SETTINGS_PROXY]) aws_account_name = self.input_item['aws_account'] self.aws_account = self.configs[Configs.ACCOUNTS][aws_account_name] self.sqs_client = get_sqs_client(self.input_item['aws_region'], self.aws_account.get('key_id'), self.aws_account.get('secret_key'), self.aws_account.get('token'))
def get_account_id(account, session_key): # we can directly get account_id in EC2 Role if account.get('iam') and account.get('account_id'): return account.get('account_id') # Set proxy proxy = pc.get_proxy_info(session_key) set_proxy_env(proxy) # get arn arn = _get_caller_identity(account)['Arn'] match_results = re.findall(r"^arn:aws(-\S+)?:iam::(\d+):", arn) if len(match_results) == 1: partition_name, account_id = match_results[0] return account_id return None
def handleList(self, conf_info): logger.info("start listing kinesis streams") for required in self.valid_params: if not self.callerArgs or not self.callerArgs.get(required): logger.error('Missing "%s"', required) raise Exception('Missing "{}"'.format(required)) aws_account = "" if self.callerArgs[tac.account] is not None: aws_account = self.callerArgs[tac.account][0] aws_iam_role = None if self.callerArgs.get(tac.aws_iam_role) is not None: aws_iam_role = self.callerArgs[tac.aws_iam_role][0] # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) cred_service = tacommon.create_credentials_service( get_splunkd_uri(), self.getSessionKey()) cred = cred_service.load(aws_account, aws_iam_role) proxy[tac.server_uri] = get_splunkd_uri() proxy[tac.session_key] = self.getSessionKey() proxy[tac.aws_account] = aws_account proxy[tac.aws_iam_role] = aws_iam_role proxy[tac.region] = self.callerArgs[tac.region][0] proxy[tac.key_id] = cred.aws_access_key_id proxy[tac.secret_key] = cred.aws_secret_access_key proxy['aws_session_token'] = cred.aws_session_token client = akc.KinesisClient(proxy, logger) streams = client.list_streams() for stream in streams: conf_info[stream].append("stream_names", stream) logger.info("end of listing kinesis streams")
def get_tasks(self): conf_mgr = cm.ConfManager(self.metas[tac.server_uri], self.metas[tac.session_key]) tasks = self._get_description_tasks(conf_mgr) logging_settings = conf_mgr.get_stanza(self.conf_file, tac.log_stanza, do_reload=False) # set logging level for our logger set_log_level(logging_settings[tac.log_level]) proxy_info = tpc.get_proxy_info(self.metas[tac.session_key]) # Set proxy for loading credentials by boto3 tacommon.set_proxy_env(proxy_info) for task in tasks: task[tac.log_level] = logging_settings[tac.log_level] task.update(proxy_info) self._assign_source(tasks) return tasks
def get_tasks(self): conf_mgr = cm.ConfManager(self.metas[tac.server_uri], self.metas[tac.session_key]) stanzas = conf_mgr.all_stanzas(self.task_file, do_reload=False) settings = conf_mgr.all_stanzas_as_dicts(self.conf_file, do_reload=False) proxy_info = tpc.get_proxy_info(self.metas[tac.session_key]) # set proxy here for validating credentials tacommon.set_proxy_env(proxy_info) level = settings[tac.log_stanza][tac.log_level] set_log_level(level) tasks = self._get_inspector_tasks(stanzas, settings, proxy_info) config = dict() config.update(self.metas) config.update(settings[tac.global_settings]) _cleanup_checkpoints(tasks, config) tasks = [ task for task in tasks if not scutil.is_true(task.get('disabled')) ] return tacommon.handle_hec(tasks, "aws_inspector")