def get_scan_results(request_id): config = Config() main_account = Account(region=config.aws.region) api_table = main_account.resource("dynamodb").Table( config.api.ddb_table_name) request_info = DDB.get_request_data(api_table, request_id) if not request_info: status_code = 404 body = {"message": "Request id has not been found."} elif request_info['progress'] == request_info['total']: status_code = 200 body = { "scan_status": "COMPLETE", "scan_results": collect_results(request_info, main_account) } elif time.time() - request_info['updated'] <= 300: status_code = 200 body = {"scan_status": "IN_PROGRESS"} else: status_code = 200 body = {"scan_status": "FAILED"} return { "statusCode": status_code, "body": json.dumps(body, indent=4, default=utility.jsonEncoder) }
def create_tickets_securitygroups(self): """ Class function to create jira tickets """ table_name = self.config.sg.ddb_table_name main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.sg.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, SecurityGroupIssue) for issue in issues: group_id = issue.issue_id group_name = issue.issue_details.name group_region = issue.issue_details.region group_vpc_id = issue.issue_details.vpc_id tags = issue.issue_details.tags # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]: logging.debug(f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue") comment = (f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue " f"in '{account_name} / {account_id}' account, '{group_region}' region") if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, label=IssueStatus.Whitelisted.value ) jira.close_issue( ticket_id=issue.jira_details.ticket, comment=comment ) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: logging.debug(f"Updating security group '{group_name} / {group_id}' issue") comment = "Issue details are changed, please check again.\n" comment += self.build_open_ports_table_jira(issue.issue_details.perms) comment += JiraOperations.build_tags_table(tags) jira.update_issue( ticket_id=issue.jira_details.ticket, comment=comment ) slack.report_issue( msg=f"Security group '{group_name} / {group_id}' issue is changed " f"in '{account_name} / {account_id}' account, '{group_region}' region" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}" f"\n" f"{self.build_open_ports_table_slack(issue.issue_details.perms)}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{group_name} / {group_id}'") # issue has not been reported yet else: logging.debug(f"Reporting security group '{group_name} / {group_id}' issue") status = RestrictionStatus(issue.issue_details.status) # if owner/bu/product tags exist on security group - use it group_owner = tags.get("owner", None) group_bu = tags.get("bu", None) group_product = tags.get("product", None) open_port_details = self.build_open_ports_table_jira(issue.issue_details.perms) account_details = (f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*SG Name*: {group_name}\n" f"*SG ID*: {group_id}\n" f"*Region*: {group_region}\n") account_details += f"*VPC*: {group_vpc_id}\n\n" if group_vpc_id else "\n" account = Account(id=account_id, name=account_name, region=group_region, role_name=self.config.aws.role_name_reporting) ec2_client = account.client("ec2") if account.session is not None else None sg_instance_details = ec2_owner = ec2_bu = ec2_product = None sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = None sg_public = sg_blind_public = False rds_client = account.client("rds") if account.session is not None else None elb_client = account.client("elb") if account.session is not None else None elbv2_client = account.client("elbv2") if account.session is not None else None iam_client = account.client("iam") if account.session is not None else None rds_instance_details = elb_instance_details = None if ec2_client is not None: ec2_instances = EC2Operations.get_instance_details_of_sg_associated(ec2_client, group_id) sg_instance_details, instance_profile_details,\ sg_in_use_ec2, sg_public, sg_blind_public, \ ec2_owner, ec2_bu, ec2_product = self.build_instances_table(iam_client, ec2_instances) if elb_client is not None and elbv2_client is not None: try: elb_instances = EC2Operations.get_elb_details_of_sg_associated(elb_client, elbv2_client, group_id) elb_instance_details, sg_in_use_elb = self.build_elb_instances_table(elb_instances) except Exception: logging.exception(f"Failed to build ELB details for '{group_name} / {group_id}' in {account}") if rds_client is not None: try: rds_instances = RDSOperations.get_rds_instance_details_of_sg_associated(rds_client, group_id) rds_instance_details, sg_in_use_rds = self.build_rds_instances_table(rds_instances) except Exception: logging.exception(f"Failed to build RDS details for '{group_name} / {group_id}' in {account}") sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds owner = group_owner if group_owner is not None else ec2_owner bu = group_bu if group_bu is not None else ec2_bu product = group_product if group_product is not None else ec2_product if bu is None: bu = self.config.get_bu_by_name(group_name) source_description = f"has {status.value} status" if status == RestrictionStatus.OpenCompletely: source_description = "allows access from any IP address (0.0.0.0/0, ::/0)" elif status == RestrictionStatus.OpenPartly: source_description = "allows access from some definite public ip addresses or networks" if sg_public: priority = "Critical" summary_status = "Internet" issue_description = (f"Security group has EC2 instances in public subnets " f"with public IP address attached and " f"{source_description} " f"for following ports:\n") threat = ( f"*Threat*: " f"Instances associated with this security group are accessible via public route over Internet and " f"have ingress rules which allows access to critical services which should be accessible " f"only from VPN or Direct Connect. Accessing these instances via Internet can lead to leakage " f"to third parties of login credentials for such services as databases/remote access." f"Open and Unrestricted access from Internet increases opportunities for " f"malicious activity from public internet which can potentially result into " f"hacking, denial-of-service attacks, loss of data, etc. This also provides " f"an ingress point to the attackers to gain backdoor access within the other " f"critical services.\n" ) elif sg_blind_public: priority = "Critical" summary_status = "Internet" issue_description = (f"Security group has EC2 instances in private subnets " f"with public IP address attached and " f"{source_description} " f"for following ports:\n") threat = (f"*Threat*: " f"Instances listed below can be probed by external attack vectors and " f"make them vulnerable to blind injection based attacks, as although " f"the EC2 instances is in a private subnet, if security group and NACL " f"are allowing access from the internet incoming, traffic will reach " f"instances when someone is probing the public IP of the instances. " f"However, there will be no return traffic due to the lack of an IGW.\n") elif not sg_in_use: priority = "Minor" summary_status = "Unused" issue_description = (f"Security group has no EC2 instances attached and " f"{source_description} " f"for following ports:\n") threat = (f"*Threat*: " f"An unused SG can be leveraged to gain control/access within the network " f"if attached to any exposed instance. This unrestricted access increases " f"opportunities for malicious activity (hacking, denial-of-service attacks, " f"loss of data).\n") else: priority = "Major" summary_status = "Intranet" issue_description = ( f"Security group has EC2 instances in in private subnets and " f"{source_description} " f"for following ports:\n") threat = (f"*Threat*: " f"Open access within the network not only provides unrestricted access to " f"other servers but increases opportunities for malicious activity (hacking, " f"denial-of-service attacks, loss of data) if attacker gains access to the " f"services within the network, thus providing lateral movement.\n") tags_table = JiraOperations.build_tags_table(tags) issue_description = ( f"{issue_description}" f"{open_port_details}" f"{threat}" f"{account_details}") if status == RestrictionStatus.OpenCompletely: auto_remediation_date = (self.config.now + self.config.sg.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += f"{tags_table}" issue_description += f"{sg_instance_details if sg_instance_details else ''}" issue_description += f"{rds_instance_details if rds_instance_details else ''}" issue_description += f"{elb_instance_details if elb_instance_details else ''}" issue_description += f"{instance_profile_details if instance_profile_details else ''}" issue_description += ( f"*Recommendation*: " f"Allow access only for a minimum set of required ip addresses/ranges from [RFC1918|https://tools.ietf.org/html/rfc1918]. " ) if self.config.whitelisting_procedure_url: issue_description += (f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] " f"and provide a strong business reasoning. ") issue_description += f"Be sure to delete overly permissive rules after creating rules that are more restrictive.\n" issue_summary = (f"{summary_status} open security group '{group_name}'" f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}") try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, priority=priority, labels=["insecure-services"], owner=owner, account_id=account_id, bu=bu, product=product, ) except Exception: logging.exception("Failed to create jira ticket") continue if response is not None: issue.jira_details.ticket = response.ticket_id issue.jira_details.ticket_assignee_id = response.ticket_assignee_id issue.jira_details.public = sg_public issue.jira_details.blind_public = sg_blind_public issue.jira_details.in_use = sg_in_use issue.jira_details.owner = owner issue.jira_details.business_unit = bu issue.jira_details.product = product slack.report_issue( msg=f"Discovered {issue_summary}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}" f"\n" f"{self.build_open_ports_table_slack(issue.issue_details.perms)}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_reported(ddb_table, issue)
def create_tickets_ebsvolumes(self): """ Class method to create jira tickets """ table_name = self.config.ebsVolume.ddb_table_name main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.ebsVolume.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues( ddb_table, account_id, EBSUnencryptedVolumeIssue) for issue in issues: volume_id = issue.issue_id region = issue.issue_details.region tags = issue.issue_details.tags # issue has been already reported if issue.timestamps.reported is not None: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product if issue.status in [ IssueStatus.Resolved, IssueStatus.Whitelisted ]: logging.debug( f"Closing {issue.status.value} EBS unencrypted volume '{volume_id}' issue" ) comment = ( f"Closing {issue.status.value} EBS unencrypted volume '{volume_id}' issue " f"in '{account_name} / {account_id}' account, '{region}' region" ) jira.close_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: logging.error( f"TODO: update jira ticket with new data: {table_name}, {account_id}, {volume_id}" ) slack.report_issue( msg= f"EBS unencrypted volume '{volume_id}' issue is changed " f"in '{account_name} / {account_id}' account, '{region}' region" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{volume_id}'") # issue has not been reported yet else: logging.debug( f"Reporting EBS unencrypted volume '{volume_id}' issue" ) # if owner/bu/product tags exist on volume - use it volume_owner = tags.get("owner", None) volume_bu = tags.get("bu", None) volume_product = tags.get("product", None) issue_description = ( f"EBS volume needs to be encrypted.\n\n" f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*Region*: {region}\n" f"*Volume ID*: {volume_id}\n") ec2_details = ec2_owner = ec2_bu = ec2_product = None if issue.issue_details.attachments: account = Account( id=account_id, name=account_name, region=region, role_name=self.config.aws.role_name_reporting) if account.session is not None: ec2_client = account.client("ec2") ec2_instances = [] for instance_id, state in issue.issue_details.attachments.items( ): metadata = EC2Operations.get_instance_meta_data( ec2_client, instance_id) if metadata is not None: ec2_instances.append({ 'ec2': metadata, 'state': state }) ec2_details, ec2_owner, ec2_bu, ec2_product = self.build_instances_table( ec2_instances) owner = volume_owner if volume_owner is not None else ec2_owner bu = volume_bu if volume_bu is not None else ec2_bu product = volume_product if volume_product is not None else ec2_product issue_description += JiraOperations.build_tags_table(tags) issue_description += ec2_details if ec2_details else '' issue_description += "*Recommendation*: Encrypt EBS volume. " if self.config.whitelisting_procedure_url: issue_description += ( f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] " f"and provide a strong business reasoning. ") issue_summary = ( f"EBS unencrypted volume '{volume_id}' " f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}" ) try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, priority="Major", labels=["unencrypted-ebs-volumes"], owner=owner, account_id=account_id, bu=bu, product=product, ) except Exception: logging.exception("Failed to create jira ticket") continue if response is not None: issue.jira_details.ticket = response.ticket_id issue.jira_details.ticket_assignee_id = response.ticket_assignee_id issue.jira_details.owner = owner issue.jira_details.business_unit = bu issue.jira_details.product = product slack.report_issue( msg=f"Discovered {issue_summary}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_reported(ddb_table, issue)
def create_tickets_cloud_trail_logging(self): """ Class function to create jira tickets """ table_name = self.config.cloudtrails.ddb_table_name main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.cloudtrails.accounts.items( ): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues( ddb_table, account_id, CloudTrailIssue) for issue in issues: region = issue.issue_id # issue has been already reported if issue.timestamps.reported is not None: if issue.status in [ IssueStatus.Resolved, IssueStatus.Whitelisted ]: logging.debug( f"Closing {issue.status.value} '{region}' CloudTrail logging issue" ) comment = ( f"Closing {issue.status.value} issue with '{region}' CloudTrail logging in " f"'{account_name} / {account_id}'") jira.close_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", account_id=account_id, ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: logging.debug(f"Updating '{region}' issue") comment = "Issue details are changed, please check again.\n" comment += self.build_trail_status( issue.issue_details.disabled, issue.issue_details.delivery_errors) comment += f"\n\n" comment += self.build_trails_table( issue.issue_details.trails) jira.update_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg= f"CloudTrail logging '{region}' ssue is changed in " f"'{account_name} / {account_id}'" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", account_id=account_id, ) IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{region}' issue") # issue has not been reported yet else: logging.debug( f"Reporting '{region}' CloudTrail logging issue") if issue.issue_details.disabled: issue_summary = f"Disabled CloudTrail in '{account_name} / {account_id} / {region}' " issue_description = "No enabled CloudTrails for region available." recommendation = f"Create CloudTrail for region" elif issue.issue_details.delivery_errors: issue_summary = f"CloudTrail logging issues in '{account_name} / {account_id} / {region}' " issue_description = "CloudTrail has issues with logging." recommendation = f"Check policies for CloudTrail logging" else: raise Exception( "not disabled and no errors, this should not have happened" ) issue_description = (f"{issue_description}\n\n" f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*Region*: {region}\n") issue_description += self.build_trail_status( issue.issue_details.disabled, issue.issue_details.delivery_errors) issue_description += self.build_trails_table( issue.issue_details.trails) issue_description += f"\n\n*Recommendation*: {recommendation}. " if self.config.whitelisting_procedure_url: issue_description += ( f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] " f"and provide a strong business reasoning. ") try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, priority="Major", labels=["cloud-trail-disabled"], account_id=account_id, ) except Exception: logging.exception("Failed to create jira ticket") continue if response is not None: issue.jira_details.ticket = response.ticket_id issue.jira_details.ticket_assignee_id = response.ticket_assignee_id slack.report_issue( msg=f"Discovered {issue_summary}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", account_id=account_id, ) IssueOperations.set_status_reported(ddb_table, issue)
def start_scan(account_id, regions, security_features, tags, ids): config = Config() account_name = config.aws.accounts.get(account_id, None) if not account_id: return bad_request(text="account_id is required parameter") if account_name is None: return bad_request(text=f"account '{account_id}' is not defined") valid_security_features = [module.section for module in config.modules] for security_feature in security_features: if security_feature not in valid_security_features: return bad_request( text= f"wrong security feature - '{security_feature}', available choices - {valid_security_features}" ) if not security_features: security_features = valid_security_features all_regions = config.aws.regions for region in regions: if region not in all_regions: return bad_request(text=f"Region '{region} is not supported") # empty list means we want to scan all supported regions if not regions: regions = all_regions if ids is not None and not isinstance(ids, list): return bad_request(text=f"'ids' parameter must be list") if tags is not None and not isinstance(tags, dict): return bad_request(text=f"'tags' parameter must be dict") main_account = Account(region=config.aws.region) api_table = main_account.resource("dynamodb").Table( config.api.ddb_table_name) to_scan = [] for security_feature in security_features: accounts = config.get_module_config_by_name(security_feature).accounts if account_id in accounts: to_scan.append(security_feature) regional_services = set(to_scan) - set(GLOBAL_SECURITY_FEATURES) global_services = set(to_scan).intersection(set(GLOBAL_SECURITY_FEATURES)) total = len(regional_services) * len(regions) + len(global_services) request_params = { "account_id": account_id, "regions": regions, "security_features": to_scan, "tags": tags } request_id = uuid.uuid4().hex DDB.add_request(api_table, request_id, request_params, total) for security_feature in to_scan: topic_name = config.get_module_config_by_name( security_feature).sns_topic_name topic_arn = get_sns_topic_arn(config, topic_name) payload = { "account_id": account_id, "account_name": account_name, "regions": regions, "sns_arn": topic_arn, "request_id": request_id } Sns.publish(topic_arn, payload) response = {'request_id': request_id} return { "statusCode": 200, "body": json.dumps(response, indent=4) if isinstance(response, dict) else response }
def lambda_handler(event, context): """ Lambda handler to evaluate iam user keys rotation """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.iamUserKeysRotation.ddb_table_name) account = Account(id=account_id, name=account_name, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for IAM user keys rotation for {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, IAMKeyRotationIssue) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} logging.debug( f"Users with keys to rotate in DDB:\n{open_issues.keys()}") checker = IAMKeyChecker(account=account, now=config.now, rotation_criteria_days=config. iamUserKeysRotation.rotation_criteria_days) if not checker.check(last_used_check_enabled=False): return for user in checker.users: for key in user.stale_keys: issue = IAMKeyRotationIssue(account_id, key.id) issue.issue_details.username = user.id issue.issue_details.create_date = key.create_date.isoformat() if config.iamUserKeysRotation.in_whitelist( account_id, key.id) or config.iamUserKeysRotation.in_whitelist( account_id, user.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug( f"Setting {key.id}/{user.id} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(key.id, None) logging.debug(f"Keys to rotate in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated keys for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) except Exception: logging.exception( f"Failed to check IAM user keys rotation for '{account_id} ({account_name})'" ) return logging.debug( f"Checked IAM user keys rotation for '{account_id} ({account_name})'")
def create_tickets_s3buckets(self): """ Class method to create jira tickets """ table_name = self.config.s3policy.ddb_table_name main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues( ddb_table, account_id, S3PolicyIssue) for issue in issues: bucket_name = issue.issue_id tags = issue.issue_details.tags policy = issue.issue_details.policy # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product if issue.status in [ IssueStatus.Resolved, IssueStatus.Whitelisted ]: logging.debug( f"Closing {issue.status.value} S3 bucket '{bucket_name}' public policy issue" ) comment = ( f"Closing {issue.status.value} S3 bucket '{bucket_name}' public policy " f"in '{account_name} / {account_id}' account ") jira.close_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: logging.debug( f"Updating S3 bucket '{bucket_name}' public policy issue" ) comment = "Issue details are changed, please check again.\n" # Adding new bucket policy json as attachment to Jira ticket. attachment = jira.add_attachment( ticket_id=issue.jira_details.ticket, filename=self.attachment_name( account_id, bucket_name), text=policy) if attachment is not None: comment += f"New policy - [^{attachment.filename}].\n" comment += JiraOperations.build_tags_table(tags) jira.update_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg= f"S3 bucket '{bucket_name}' pubic policy issue is changed " f"in '{account_name} / {account_id}' account" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{bucket_name}'") # issue has not been reported yet else: logging.debug( f"Reporting S3 bucket '{bucket_name}' public policy issue" ) owner = tags.get("owner", None) bu = tags.get("bu", None) product = tags.get("product", None) if bu is None: bu = self.config.get_bu_by_name(bucket_name) issue_summary = ( f"S3 bucket '{bucket_name}' with public policy " f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}" ) issue_description = ( f"Bucket policy allows unrestricted public access.\n\n" f"*Threat*: " f"This creates potential security vulnerabilities by allowing anyone to add, modify, or remove items in a bucket.\n\n" f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*S3 Bucket name*: {bucket_name}\n" f"*Bucket Owner*: {owner}\n" f"\n") auto_remediation_date = ( self.config.now + self.config.s3policy.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) issue_description += f"\n" issue_description += ( f"*Recommendation*: " f"Check if global access is truly needed and " f"if not - update bucket permissions to restrict access to specific private IP ranges from RFC1819." ) try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, priority="Major", labels=["publics3"], owner=owner, account_id=account_id, bu=bu, product=product, ) except Exception: logging.exception("Failed to create jira ticket") continue if response is not None: issue.jira_details.ticket = response.ticket_id issue.jira_details.ticket_assignee_id = response.ticket_assignee_id # Adding bucket policy json as attachment to Jira ticket. jira.add_attachment( ticket_id=issue.jira_details.ticket, filename=self.attachment_name( account_id, bucket_name), text=policy) issue.jira_details.owner = owner issue.jira_details.business_unit = bu issue.jira_details.product = product slack.report_issue( msg=f"Discovered {issue_summary}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_reported(ddb_table, issue)
def clean_ami_public_access(self): """ Class method to clean AMI public access which are violating aws best practices """ main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( self.config.publicAMIs.ddb_table_name) retention_period = self.config.publicAMIs.remediation_retention_period jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_open_issues( ddb_table, account_id, PublicAMIIssue) for issue in issues: ami_id = issue.issue_id in_whitelist = self.config.publicAMIs.in_whitelist( account_id, ami_id) if in_whitelist: logging.debug(f"Skipping {ami_id} (in whitelist)") # Adding label with "whitelisted" to jira ticket. jira.add_label(ticket_id=issue.jira_details.ticket, label=IssueStatus.Whitelisted.value) continue if issue.timestamps.reported is None: logging.debug(f"Skipping '{ami_id}' (was not reported)") continue if issue.timestamps.remediated is not None: logging.debug( f"Skipping {ami_id} (has been already remediated)") continue updated_date = issue.timestamp_as_datetime no_of_days_issue_created = (self.config.now - updated_date).days if no_of_days_issue_created >= retention_period: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product try: account = Account( id=account_id, name=account_name, region=issue.issue_details.region, role_name=self.config.aws.role_name_reporting) if account.session is None: continue checker = PublicAMIChecker(account=account) checker.check(amis_to_check=[ami_id]) ami = checker.get_ami(ami_id) if ami is None: logging.debug(f"AMI {ami_id} was removed by user") elif not ami.public_access: logging.debug( f"AMI {ami.name} public access issue was remediated by user" ) else: logging.debug(f"Remediating '{ami.name}' ") remediation_succeed = True if ami.modify_image_attribute(): comment = ( f"AMI '{ami.name}' public access issue " f"in '{account_name} / {account_id}' account " f"was remediated by hammer") else: remediation_succeed = False comment = ( f"Failed to remediate AMI '{ami.name}' public access issue " f"in '{account_name} / {account_id}' account " f"due to some limitations. Please, check manually" ) jira.remediate_issue( ticket_id=issue.jira_details.ticket, comment=comment, reassign=remediation_succeed, ) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_remediated( ddb_table, issue) except Exception: logging.exception( f"Error occurred while updating AMI '{ami_id}' access " f"in '{account_name} / {account_id}'") else: logging.debug( f"Skipping '{ami_id}' " f"({retention_period - no_of_days_issue_created} days before remediation)" )
def lambda_handler(event, context): """ Lambda Handler to describe cloud trails enabled or not for each region """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.cloudtrails.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for CloudTrail logging issues in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, CloudTrailIssue) # make dictionary for fast search by id # and filter by current region open_issues = { issue.issue_id: issue for issue in open_issues if issue.issue_id == region } logging.debug(f"CloudTrail region in DDB:\n{open_issues.keys()}") checker = CloudTrailChecker(account=account) if checker.check(): if checker.disabled or checker.delivery_errors: issue = CloudTrailIssue(account_id, region) issue.issue_details.disabled = checker.disabled issue.issue_details.delivery_errors = checker.delivery_errors issue.add_trails(checker.trails) if config.cloudtrails.in_whitelist(account_id, region): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug(f"Setting {region} status {issue.status}") IssueOperations.update(ddb_table, issue) # issue exists in ddb and was fixed elif region in open_issues: IssueOperations.set_status_resolved(ddb_table, open_issues[region]) # track the progress of API request to scan specific account/region/feature if request_id: api_table = main_account.resource("dynamodb").Table( config.api.ddb_table_name) DDB.track_progress(api_table, request_id) except Exception: logging.exception( f"Failed to check CloudTrail in '{region}' for '{account_id} ({account_name})'" ) # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain CloudTrail checking") logging.debug( f"Checked CloudTrail in '{region}' for '{account_id} ({account_name})'" )
def create_tickets_s3_unencrypted_buckets(self): """ Class method to create jira tickets """ table_name = self.config.s3Encrypt.ddb_table_name main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues( ddb_table, account_id, S3EncryptionIssue) for issue in issues: bucket_name = issue.issue_id tags = issue.issue_details.tags # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product if issue.status in [ IssueStatus.Resolved, IssueStatus.Whitelisted ]: logging.debug( f"Closing {issue.status.value} S3 bucket '{bucket_name}' unencrypted issue" ) comment = ( f"Closing {issue.status.value} S3 bucket '{bucket_name}' unencrypted issue " f"in '{account_name} / {account_id}' account") jira.close_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: logging.debug( f"Updating S3 bucket '{bucket_name}' unencrypted issue" ) comment = "Issue details are changed, please check again.\n" comment += JiraOperations.build_tags_table(tags) jira.update_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg= f"S3 bucket '{bucket_name}' unencrypted issue is changed " f"in '{account_name} / {account_id}' account" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{bucket_name}'") # issue has not been reported yet else: logging.debug( f"Reporting S3 bucket '{bucket_name}' unencrypted issue" ) owner = tags.get("owner", None) bu = tags.get("bu", None) product = tags.get("product", None) if bu is None: bu = self.config.get_bu_by_name(bucket_name) issue_summary = ( f"S3 bucket '{bucket_name}' unencrypted " f"in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}" ) issue_description = ( f"Bucket is unencrypted.\n\n" f"*Threat*: " f"Based on data protection policies, data that is classified as sensitive information or " f"intellectual property of the organization needs to be encrypted. Additionally, as part of the " f"initiative of Encryption Everywhere, it is necessary to encrypt the data in order to ensure the " f"confidentiality and integrity of the data.\n\n" f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*S3 Bucket name*: {bucket_name}\n" f"*Bucket Owner*: {owner}\n" f"\n") auto_remediation_date = ( self.config.now + self.config.s3Encrypt.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) issue_description += f"\n" issue_description += ( f"*Recommendation*: " f"Encrypt the bucket by enabling server-side encryption with either " f"Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS)." ) try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, priority="Major", labels=["s3-unencrypted"], owner=owner, account_id=account_id, bu=bu, product=product, ) except Exception: logging.exception("Failed to create jira ticket") continue if response is not None: issue.jira_details.ticket = response.ticket_id issue.jira_details.ticket_assignee_id = response.ticket_assignee_id issue.jira_details.owner = owner issue.jira_details.business_unit = bu issue.jira_details.product = product slack.report_issue( msg=f"Discovered {issue_summary}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_reported(ddb_table, issue)
def clean_security_groups(self, batch=False): """ Class function to clean security groups which are violating aws best practices """ main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table(self.config.sg.ddb_table_name) backup_bucket = config.aws.s3_backup_bucket retention_period = self.config.sg.remediation_retention_period jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.sg.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SecurityGroupIssue) for issue in issues: group_name = issue.issue_details.name group_vpc_id = issue.issue_details.vpc_id group_id = issue.issue_id group_region = issue.issue_details.region # status = issue.jira_details.status name_in_whitelist = self.config.sg.in_whitelist(account_id, f"{group_vpc_id}:{group_name}") id_in_whitelist = self.config.sg.in_whitelist(account_id, group_id) if name_in_whitelist or id_in_whitelist: logging.debug(f"Skipping '{group_name} / {group_id}' (in whitelist)") # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, label=IssueStatus.Whitelisted.value ) continue if issue.timestamps.reported is None: logging.debug(f"Skipping '{group_name} / {group_id}' (was not reported)") continue if issue.timestamps.remediated is not None: logging.debug(f"Skipping '{group_name} / {group_id}' (has been already remediated)") continue updated_date = issue.timestamp_as_datetime no_of_days_issue_created = (self.config.now - updated_date).days if no_of_days_issue_created >= retention_period: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product try: account = Account(id=account_id, name=account_name, region=group_region, role_name = self.config.aws.role_name_reporting) if account.session is None: continue checker = SecurityGroupsChecker(account=account, restricted_ports=self.config.sg.restricted_ports) checker.check(ids=[group_id]) sg = checker.get_security_group(group_id) if sg is None: logging.debug(f"Security group '{group_name} / {group_id}' was removed by user") elif sg.restricted: logging.debug(f"Security group '{group_name} / {group_id}' issue was remediated by user") elif sg.status != RestrictionStatus.OpenCompletely: logging.debug(f"Security group '{group_name} / {group_id}' is not completely open") else: if not batch and \ not confirm(f"Do you want to remediate security group '{group_name} / {group_id}'", False): continue logging.debug(f"Remediating '{group_name} / {group_id}' rules") backup_path = sg.backup_s3(main_account.client("s3"), backup_bucket) remediation_succeed = True processed = sg.restrict(RestrictionStatus.OpenCompletely) if processed == 0: logging.debug(f"No rules were detected to remediate in '{group_name} / {group_id}'") comment = None elif processed is None: remediation_succeed = False comment = (f"Failed to remediate security group '{group_name} / {group_id}' issue " f"in '{account_name} / {account_id}' account, '{group_region}' region " f"due to some limitations. Please, check manually") else: comment = (f"Rules backup was saved to " f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. " f"Security group '{group_name} / {group_id}' `{RestrictionStatus.OpenCompletely.value}` issue " f"in '{account_name} / {account_id}' account, '{group_region}' region " f"was remediated by hammer") if comment is not None: jira.remediate_issue( ticket_id=issue.jira_details.ticket, comment=comment, reassign=remediation_succeed, ) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_remediated(ddb_table, issue) except Exception: logging.exception(f"Error occurred while updating security group '{group_name} / {group_id}' rules " f"in '{account_name} / {account_id} / {group_region}'") else: logging.debug(f"Skipping '{group_name} / {group_id}' " f"({retention_period - no_of_days_issue_created} days before remediation)")
def create_tickets_sqs_policy(self): """ Class method to create jira tickets """ table_name = self.config.sqspolicy.ddb_table_name main_account = Account(region=self.config.aws.region) ddb_table = main_account.resource("dynamodb").Table(table_name) jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.aws.accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_not_closed_issues( ddb_table, account_id, SQSPolicyIssue) for issue in issues: queue_url = issue.issue_id queue_name = issue.issue_details.name queue_region = issue.issue_details.region tags = issue.issue_details.tags policy = issue.issue_details.policy # issue has been already reported if issue.timestamps.reported is not None: owner = issue.issue_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product if issue.status in [ IssueStatus.Resolved, IssueStatus.Whitelisted ]: logging.debug( f"Closing {issue.status.value} SQS queue '{queue_name}' public policy issue" ) comment = ( f"Closing {issue.status.value} SQS queue '{queue_name}' public policy " f"in '{account_name} / {account_id}' account, '{queue_region}' region" ) if issue.status == IssueStatus.Whitelisted: # Adding label with "whitelisted" to jira ticket. jira.add_label(ticket_id=issue.jira_details.ticket, label=IssueStatus.Whitelisted.value) jira.close_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_closed(ddb_table, issue) # issue.status != IssueStatus.Closed (should be IssueStatus.Open) elif issue.timestamps.updated > issue.timestamps.reported: logging.debug( f"Updating SQS queue '{queue_name}' public policy issue" ) comment = "Issue details are changed, please check again.\n" # Adding new SQS queue policy json as attachment to Jira ticket. attachment = jira.add_attachment( ticket_id=issue.jira_details.ticket, filename=self.attachment_name( account_id, queue_region, queue_name), text=policy) if attachment is not None: comment += f"New policy - [^{attachment.filename}].\n" comment += JiraOperations.build_tags_table(tags) jira.update_issue(ticket_id=issue.jira_details.ticket, comment=comment) slack.report_issue( msg= f"SQS queue '{queue_name}' pubic policy issue is changed " f"in '{account_name} / {account_id}' account, '{queue_region}' region" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_updated(ddb_table, issue) else: logging.debug(f"No changes for '{queue_name}'") # issue has not been reported yet else: logging.debug( f"Reporting SQS queue '{queue_name}' public policy issue" ) owner = tags.get("owner", None) bu = tags.get("bu", None) product = tags.get("product", None) if bu is None: bu = self.config.get_bu_by_name(queue_name) issue_summary = ( f"SQS queue '{queue_name}' with public policy " f"in '{account_name} / {account_id}' account, '{queue_region}' region" f"{' [' + bu + ']' if bu else ''}") issue_description = ( f"Queue policy allows unrestricted public access.\n\n" f"*Threat*: " f"This creates potential security vulnerabilities by allowing anyone to add, modify, or remove items in a SQS.\n\n" f"*Risk*: High\n\n" f"*Account Name*: {account_name}\n" f"*Account ID*: {account_id}\n" f"*SQS queue url*: {queue_url}\n" f"*SQS queue name*: {queue_name}\n" f"*SQS queue region*: {queue_region}\n" f"\n") auto_remediation_date = ( self.config.now + self.config.sqspolicy.issue_retention_date).date() issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n" issue_description += JiraOperations.build_tags_table(tags) issue_description += f"\n" issue_description += ( f"*Recommendation*: " f"Check if global access is truly needed and " f"if not - update SQS queue policy with " f"an [*IpAddress* condition|https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-all-permissions-to-all-users-in-cidr-range] " f"in order to restrict access to specific private IP ranges from [RFC1918|https://tools.ietf.org/html/rfc1918]." ) if self.config.whitelisting_procedure_url: issue_description += ( f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] " f"and provide a strong business reasoning. ") try: response = jira.add_issue( issue_summary=issue_summary, issue_description=issue_description, priority="Major", labels=["publicsqs"], owner=owner, account_id=account_id, bu=bu, product=product, ) except Exception: logging.exception("Failed to create jira ticket") continue if response is not None: issue.jira_details.ticket = response.ticket_id issue.jira_details.ticket_assignee_id = response.ticket_assignee_id # Adding SQS queue json as attachment to Jira ticket. jira.add_attachment( ticket_id=issue.jira_details.ticket, filename=self.attachment_name( account_id, queue_region, queue_name), text=policy) issue.jira_details.owner = owner issue.jira_details.business_unit = bu issue.jira_details.product = product slack.report_issue( msg=f"Discovered {issue_summary}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_reported(ddb_table, issue)
def lambda_handler(event, context): """ Lambda handler to evaluate SQS queue policy """ set_logging(level=logging.DEBUG) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() # region = payload['region'] except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table(config.sqspolicy.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for public SQS policies in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"SQS in DDB:\n{open_issues.keys()}") checker = SQSPolicyChecker(account=account) if checker.check(): for queue in checker.queues: logging.debug(f"Checking {queue.name}") if queue.public: issue = SQSPolicyIssue(account_id, queue.url) issue.issue_details.tags = queue.tags issue.issue_details.name = queue.name issue.issue_details.region = queue.account.region issue.issue_details.policy = queue.policy if config.sqspolicy.in_whitelist(account_id, queue.url): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug(f"Setting {queue.name} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(queue.url, None) logging.debug(f"SQS in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated queues for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) except Exception: logging.exception(f"Failed to check SQS policies for '{account_id} ({account_name})'") return # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain insecure services checking") logging.debug(f"Checked SQS policies for '{account_id} ({account_name})'")
class DDBackuper(object): """ TODO: this should be replaced with DynamoDB Point-in-Time recovery when CloudFormattion will support it. """ def __init__(self): self.config = Config() self.enabled = self.config.aws.ddb_backup_enabled self.retention_period = self.config.aws.ddb_backup_retention self.account = Account(region=self.config.aws.region) self.ddb_client = self.account.client('dynamodb') self.ddb_resource = self.account.resource('dynamodb') self.now = datetime.now(timezone.utc) # used as a part of backup name self.today = self.now.strftime("%Y-%m-%d") def filter_tables(self): """ Return list of hammer ddb tables with existing backups """ hammer_tables = {} for module in self.config.modules: table_name = module.ddb_table_name try: hammer_tables[table_name] = self.ddb_client.list_backups( TableName=table_name)['BackupSummaries'] except ClientError as err: if err.response['Error']['Code'] in [ "AccessDenied", "UnauthorizedOperation" ]: logging.error( f"Access denied in {self.account} " f"({self.ddb_client.__class__.__name__.lower()}:{err.operation_name})" ) else: logging.exception( f"Failed to list '{table_name}' backups in {self.account}" ) continue return hammer_tables def today_backup_name(self, table_name): return f"{table_name}_{self.today}" def today_backup_exists(self, table_name, backups): """ Returns if today's backup exists in provided list of backups Check is based on backup name and self.today """ return self.today_backup_name(table_name) in [ backup['BackupName'] for backup in backups ] def check_backups(self, table_name, backups): """ Log error if any backup status for provided list of backups is not AVAILABLE """ for backup in backups: name = backup["BackupName"] status = backup["BackupStatus"] if status != "AVAILABLE": logging.error(f"{table_name} backup is not available: {name}") def launch_backup(self, table_name): try: self.ddb_client.create_backup( TableName=table_name, BackupName=self.today_backup_name(table_name)) except ClientError as err: if err.response['Error']['Code'] in [ "AccessDenied", "UnauthorizedOperation" ]: logging.error( f"Access denied in {self.account} " f"({self.ddb_client.__class__.__name__.lower()}:{err.operation_name})" ) else: logging.exception( f"Failed to create '{table_name}' backup in {self.account}" ) return False return True def rotate_backups(self, table_name, backups): """ Removes all outdated backups from provided list of backups Check is based on backup creation date and retention period from config """ for backup in backups: creationDate = backup['BackupCreationDateTime'] name = backup['BackupName'] arn = backup['BackupArn'] if self.now - creationDate > self.retention_period: logging.debug( f"Deleting outdated backup '{name}' for '{table_name}' ({arn})" ) try: self.ddb_client.delete_backup(BackupArn=arn) except ClientError as err: if err.response['Error']['Code'] in [ "AccessDenied", "UnauthorizedOperation" ]: logging.error( f"Access denied in {self.account} " f"({self.ddb_client.__class__.__name__.lower()}:{err.operation_name})" ) else: logging.exception( f"Failed to delete '{arn}' backup in {self.account}" ) def run(self): if not self.enabled: logging.debug("DDB backup disabled") return for table_name, backups in self.filter_tables().items(): self.check_backups(table_name, backups) if not self.today_backup_exists(table_name, backups): logging.debug(f"Launching backup of {table_name}") if self.launch_backup(table_name): self.rotate_backups(table_name, backups) else: logging.warning( f"Today backup exists for {table_name}, skipping")