def _index_compliance_summary(self, rule_name): writer = self._config[tac.event_writer] sourcetype = self._config.get(tac.sourcetype, "aws:config:rule") response = self._client.get_compliance_summary_by_config_rule( ConfigRuleName=rule_name) if not tacommon.is_http_ok(response): logger.error( "Failed to collect compliance summary for " "rule=%s, errorcode=%s", rule_name, tacommon.http_code(response)) return summary = response.get("ComplianceSummary") if not summary: return evt_time = tacommon.total_seconds( summary["ComplianceSummaryTimestamp"]) summary["ComplianceSummaryTimestamp"] = "{}".format( summary["ComplianceSummaryTimestamp"]) summary["ConfigRuleName"] = rule_name event = writer.create_event(index=self._config.get( tac.index, "default"), host=self._config.get(tac.host, ""), source=self._source_summary, sourcetype=sourcetype, time=evt_time, unbroken=False, done=False, events=summary) writer.write_events((event, ))
def check_sqs_response(content): """ Check if it is successful while acting on SQS queue. :param content: acting response :return: """ if not is_http_ok(content): err = json.dumps(content.get('Failed') or content) raise SQSCollectorException(err)
def publish_message(self, topic_arn, subject, message): resp = self._client.publish( TargetArn=topic_arn, Subject=subject, Message=message, MessageStructure='string', ) if not is_http_ok(resp): raise SNSPublisherError(json.dumps(resp)) return resp
def _collect_run(self, arn): response = self._cli.describe_assessment_runs(assessmentRunArns=[arn]) if not tacommon.is_http_ok(response): return None run = response.get('assessmentRuns')[0] template_arn = run['assessmentTemplateArn'] package_arns = run['rulesPackageArns'] response = self._cli.describe_assessment_templates( assessmentTemplateArns=[template_arn]) if not tacommon.is_http_ok(response): return None template = response.get('assessmentTemplates')[0] response = self._cli.describe_rules_packages( rulesPackageArns=package_arns) if not tacommon.is_http_ok(response): return None packages = response.get('rulesPackages') run['assessmentTemplate'] = template run['rulesPackages'] = packages return run
def _last_evaluation_times(self, rule_name=""): writer = self._config[tac.event_writer] response = self._client.describe_config_rule_evaluation_status( ConfigRuleNames=[rule_name]) if not tacommon.is_http_ok(response): logger.error( "Failed to describe config rule evaluation status, " "errorcode=%s", tacommon.http_code(response)) return None statuses = response.get("ConfigRulesEvaluationStatus") if not statuses: return None dkeys = [ "LastSuccessfulInvocationTime", "LastFailedInvocationTime", "LastSuccessfulEvaluationTime", "LastFailedEvaluationTime", "FirstActivatedTime" ] sourcetype = self._config.get(tac.sourcetype, "aws:config:rule") last_times, events = [], [] for status in statuses: if status.get("LastSuccessfulEvaluationTime"): evt_time = tacommon.total_seconds( status["LastSuccessfulEvaluationTime"]) else: evt_time = "" ckpt_time = self._ckpt.last_evaluation_time( self._config[tac.region], self._config[tac.datainput], status["ConfigRuleName"]) last_times.append((evt_time, ckpt_time)) if ckpt_time == evt_time: continue for key in dkeys: if key in status: status[key] = "{}".format(status[key]) event = writer.create_event( index=self._config.get(tac.index, "default"), host=self._config.get(tac.host, ""), source=self._source_status, sourcetype=sourcetype, time=evt_time, unbroken=False, done=False, events=status, ) events.append(event) if events: writer.write_events(events) return last_times
def lambda_functions(config): client = get_lambda_client(config) params = {'MaxItems': 1000} while True: resp = client.list_functions(**params) if not is_http_ok(resp): logger.error('Fetch Lambda functions failed', response=resp.get('Failed', resp)) for func in resp.get('Functions', []): func[tac.region] = config[tac.region] yield json.dumps(func) try: params['Marker'] = resp['NextMarker'] except Exception: break
def _index_compliance_details(self, rule_name): sourcetype = self._config.get(tac.sourcetype, "aws:config:rule") writer = self._config[tac.event_writer] next_token = "" while 1: response = self._client.get_compliance_details_by_config_rule( ConfigRuleName=rule_name, NextToken=next_token) if not tacommon.is_http_ok(response): logger.error( "Failed to collect compliance details for " "rule=%s, errorcode=%s", rule_name, tacommon.http_code(response)) return compliances = response.get("EvaluationResults") if not compliances: return events = [] for compliance in compliances: evt_time = compliance["ResultRecordedTime"] compliance["ResultRecordedTime"] = "{}".format(evt_time) compliance["ConfigRuleInvokedTime"] = "{}".format( compliance["ConfigRuleInvokedTime"]) compliance["EvaluationResultIdentifier"][ "OrderingTimestamp"] = "{}".format( compliance["EvaluationResultIdentifier"] ["OrderingTimestamp"]) evt_time = tacommon.total_seconds(evt_time) event = writer.create_event( index=self._config.get(tac.index, "default"), host=self._config.get(tac.host, ""), source=self._source_detail, sourcetype=sourcetype, time=evt_time, unbroken=False, done=False, events=compliance) events.append(event) writer.write_events(events) next_token = response.get("NextToken") if not next_token: return
def _do_one_dimension(self, client, dimension, start_time, end_time): if start_time == end_time: return None # perf_start = time.time() logger.debug( "Collect dimensions=%s, start_time=%s, end_time=%s for datainput=%s", dimension, start_time, end_time, self._config[tac.datainput]) for i in xrange(4): try: response = client.get_metric_statistics( Namespace=self._config[acc.metric_namespace], MetricName=dimension["MetricName"], Dimensions=dimension["Dimensions"], StartTime=start_time, EndTime=end_time, Period=self._config[acc.period], Statistics=self._config[acc.statistics]) except Exception as ex: if "Rate exceeded" in ex.message: tacommon.sleep_until(random.randint(20, 60), self.stopped) logger.exception( "Failed to get metrics for namespace=%s, dimension=%s," "metric_name=%s, datainput=%s, start_time=%s, " "end_time=%s.", self._config[acc.metric_namespace], dimension["Dimensions"], dimension["MetricName"], self._config[tac.datainput], start_time, end_time) self._handle_too_many_datapoints(ex, dimension) tacommon.sleep_until(2**(i + 1), self.stopped) else: break else: return None if not tacommon.is_http_ok(response): logger.error( "Failed to get metrics for namespace=%s, dimension=%s, " "metric_name=%s, errorcode=%s.", self._config[acc.metric_namespace], dimension["Dimensions"], dimension["MetricName"], tacommon.http_code(response)) return None # logger.debug("one_dimension_cost=%s", time.time() - perf_start) return response.get("Datapoints")
def _list_rules(self, conf_info): aws_account = self.callerArgs.data['aws_account'][0] aws_iam_role = self.callerArgs.data.get('aws_iam_role', [None])[0] region_name = self.callerArgs.data['aws_region'][0] scheme, host, port = get_splunkd_access_info() service = Service(scheme=scheme, host=host, port=port, token=self.getSessionKey()) config = ConfigManager(service) factory = AWSCredentialsProviderFactory(config) provider = factory.create(aws_account, aws_iam_role) credentials_cache = AWSCredentialsCache(provider) client = credentials_cache.client('config', region_name) all_rules = [] next_token = "" while 1: try: response = client.describe_config_rules(NextToken=next_token) except Exception as e: logger.error('Failed to describe config rules') msg = str(e.message) logger.error(msg) raise RestError(400, 'Failed to describe config rules: ' + msg) if not tacommon.is_http_ok(response): logger.error("Failed to describe config rules, errorcode=%s", tacommon.http_code(response)) return rules = response.get("ConfigRules") if not rules: break all_rules.extend(rule["ConfigRuleName"] for rule in rules) next_token = response.get("NextToken") if not next_token: break for rule in all_rules: conf_info[rule].append("rule_names", rule)
def list_metrics_by_metric_name(client, namespace, metric_name, dimension_regex_filters): all_metrics = [] filtered = False params = { "Namespace": namespace, } if metric_name: params["MetricName"] = metric_name num_of_metrics_fetched = 0 while 1: response = client.list_metrics(**params) if not tacommon.is_http_ok(response): logger.error("Failed to list_metrics for %s, error=%s", params, response) break for metric in response["Metrics"]: if not metric["Dimensions"]: continue num_of_metrics_fetched += 1 if not match_dimension(metric, dimension_regex_filters): continue del metric["Namespace"] all_metrics.append(metric) token = response.get("NextToken") if token is None: break else: params["NextToken"] = token if num_of_metrics_fetched and not all_metrics: logger.warning("All dimensions of this metric were filtered out.", metric_name=metric_name, filtered_metrics_num=num_of_metrics_fetched, ErrorCode="ConfigurationError") filtered = True return (all_metrics, filtered)
def _get_instance_metadata(self, client): result_key = "Reservations" instances_data_list = [] params = {"DryRun": False} try: paginator = client.get_paginator('describe_instances') for response in paginator.paginate(**params): logger.debug(response) if not tacommon.is_http_ok(response): logger.error("Failed to describe instances, error=%s", response) break if not response.get(result_key): logger.error("describe instances return no result", ) break for item in response[result_key]: for instance in item["Instances"]: instance_data = dict() # add Tags, Tags is dictionary format data. instance_data['Tags'] = instance['Tags'] for name in self._metadata_name_list: # if name is not listed in instance object, skip it. if name in instance: instance_data[name] = instance[name] instances_data_list.append(instance_data) except Exception: # When we encountered any errors, just return what we have logger.exception("Failed to describe instances.") return instances_data_list
def _list_findings_by_time_window(self, begin, end): # boto3 do not accept unix timestamp on windows # cast to datetime by hand begin = datetime.datetime.utcfromtimestamp(begin) end = datetime.datetime.utcfromtimestamp(end) params = { "filter": { 'creationTimeRange': { 'beginDate': begin, 'endDate': end } } } arns = [] while True: response = self._cli.list_findings(**params) if not tacommon.is_http_ok(response): return None items = response.get('findingArns') arns.extend(items) next_token = response.get("nextToken") if next_token is None: return arns params["nextToken"] = next_token
def _do_index_data(self): if self._credentials.need_retire(): self._client, self._credentials = tacommon.get_service_client( self._config, tac.config) next_token = "" rule_names = self._config.get("rule_names", []) throttling = 0 while 1: try: response = self._client.describe_config_rules( ConfigRuleNames=rule_names, NextToken=next_token) except ClientError as e: error_code = e.response["Error"].get("Code", "Unknown") error_message = e.response["Error"].get("Message", "Unknown") if error_code == "NoSuchConfigRuleException" and \ error_message.startswith('The ConfigRule'): invalid_rule = error_message.split("'")[1] rule_names.remove(invalid_rule) logger.info("Neglect invalid rule and retry.", invalid_rule=invalid_rule) if rule_names: next_token = "" continue else: # empty rule_names will list all, # directly returns to avoid logger.info("No valid config rule found.", region=self._config[tac.region], datainput=self._config[tac.datainput]) return elif error_code == "ThrottlingException": logger.info( "Throttling happened when DescribeConfigRules, " "use exponential back off logic.") time.sleep(2**(throttling + 1)) throttling += 1 else: logger.exception( "Unknown error code returned when describing config rules.", region=self._config[tac.region], datainput=self._config[tac.datainput]) return except: logger.exception( "Unknown exception happened when describing config rules.", region=self._config[tac.region], datainput=self._config[tac.datainput]) return if not tacommon.is_http_ok(response): logger.error("Failed to describe config rules, errorcode=%s", tacommon.http_code(response)) return rules = response.get("ConfigRules") if not rules: return self._index_rules(rules) next_token = response.get("NextToken") if not next_token: return
def _collect_findings(self, arns): response = self._cli.describe_findings(findingArns=arns) if not tacommon.is_http_ok(response): return None return response.get('findings')
def _do_filter_invalid_dimensions(describe_func, result_key, instance_key, id_key, metrics, tag): exists = set() params = {"DryRun": False} while 1: try: response = describe_func(**params) except Exception: # When we encountered any errors, just return what we have logger.exception("Failed to describe instances for %s.", tag) return metrics, [] if not tacommon.is_http_ok(response): logger.error("Failed to describe instances for %s, error=%s", tag, response) return metrics, [] if not response.get(result_key): break for instance in response[result_key]: if instance_key: for dim in instance[instance_key]: exists.add(dim[id_key]) else: exists.add(instance[id_key]) token = response.get("NextToken") if token is None: break else: params["NextToken"] = token new_metrics, removed = [], [] def _should_keep(metric): for dimension in metric["Dimensions"]: if dimension["Name"] == id_key: return dimension["Value"] in exists # if dimension does not match, do not filter return True for m in metrics: if _should_keep(m): new_metrics.append(m) else: removed.append(m) if logger.isEnabledFor(logging.INFO): logger.info("%s total=%d, valid=%d, filtered=%d", tag, len(metrics), len(new_metrics), len(metrics) - len(new_metrics)) i, total = 0, len(removed) while 1: filtered_ids = ",".join(d["Value"] for m in removed[i:i + 100] for d in m["Dimensions"]) if filtered_ids: logger.info("filtered_ids=%s", filtered_ids) if i >= total: break i += 100 return new_metrics, removed