def pytest_generate_tests(metafunc): """ Entrypoint for tests (built-in pytest function for dynamic generation of test cases). """ # Launch EC2 mocking and env preparation mock_ec2.start() mock_ec2.create_env(secgroups, region) account = Account(region=region) checker = SecurityGroupsChecker(account, restricted_ports=restricted_ports) checker.check() for sg in checker.groups: sg.restrict() checker_remediated = SecurityGroupsChecker( account, restricted_ports=restricted_ports) checker_remediated.check() groups = [(group, False) for group in checker.groups] groups += [(group, True) for group in checker_remediated.groups] metafunc.parametrize("group,remediated", groups, ids=ident_test)
def lambda_handler(event, context): """ Lambda handler to evaluate insecure services """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.sg.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for insecure services in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, SecurityGroupIssue) # make dictionary for fast search by id # and filter by current region open_issues = { issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region } logging.debug(f"Security groups in DDB:\n{open_issues.keys()}") checker = SecurityGroupsChecker( account=account, restricted_ports=config.sg.restricted_ports) if checker.check(): for sg in checker.groups: logging.debug(f"Checking {sg.name} ({sg.id})") if not sg.restricted: # TODO: move instances detection for security group from reporting to identification #ec2_instances = EC2Operations.get_instance_details_of_sg_associated(account.client("ec2"), sg.id) #logging.debug(f"associated ec2 instances: {ec2_instances}") issue = SecurityGroupIssue(account_id, sg.id) issue.issue_details.name = sg.name issue.issue_details.region = sg.account.region issue.issue_details.tags = sg.tags issue.issue_details.status = sg.status.value for perm in sg.permissions: for ip_range in perm.ip_ranges: if not ip_range.restricted: issue.add_perm(perm.protocol, perm.from_port, perm.to_port, ip_range.cidr, ip_range.status) if config.sg.in_whitelist( account_id, sg.name) or config.sg.in_whitelist( account_id, sg.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug(f"Setting {sg.id} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(sg.id, None) logging.debug(f"Security groups in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated security groups for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) except Exception: logging.exception( f"Failed to check insecure services in '{region}' for '{account_id} ({account_name})'" ) # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain insecure services checking") logging.debug( f"Checked insecure services in '{region}' for '{account_id} ({account_name})'" )
def clean_security_groups(self, batch=False): """ Class function to clean security groups which are violating aws best practices """ main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table(self.config.sg.ddb_table_name) backup_bucket = config.aws.s3_backup_bucket retention_period = self.config.sg.remediation_retention_period jira = JiraReporting(self.config) slack = SlackNotification(self.config) for account_id, account_name in self.config.sg.remediation_accounts.items(): logging.debug(f"Checking '{account_name} / {account_id}'") issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SecurityGroupIssue) for issue in issues: group_name = issue.issue_details.name group_vpc_id = issue.issue_details.vpc_id group_id = issue.issue_id group_region = issue.issue_details.region # status = issue.jira_details.status name_in_whitelist = self.config.sg.in_whitelist(account_id, f"{group_vpc_id}:{group_name}") id_in_whitelist = self.config.sg.in_whitelist(account_id, group_id) if name_in_whitelist or id_in_whitelist: logging.debug(f"Skipping '{group_name} / {group_id}' (in whitelist)") # Adding label with "whitelisted" to jira ticket. jira.add_label( ticket_id=issue.jira_details.ticket, label=IssueStatus.Whitelisted.value ) continue if issue.timestamps.reported is None: logging.debug(f"Skipping '{group_name} / {group_id}' (was not reported)") continue if issue.timestamps.remediated is not None: logging.debug(f"Skipping '{group_name} / {group_id}' (has been already remediated)") continue updated_date = issue.timestamp_as_datetime no_of_days_issue_created = (self.config.now - updated_date).days if no_of_days_issue_created >= retention_period: owner = issue.jira_details.owner bu = issue.jira_details.business_unit product = issue.jira_details.product try: account = Account(id=account_id, name=account_name, region=group_region, role_name = self.config.aws.role_name_reporting) if account.session is None: continue checker = SecurityGroupsChecker(account=account, restricted_ports=self.config.sg.restricted_ports) checker.check(ids=[group_id]) sg = checker.get_security_group(group_id) if sg is None: logging.debug(f"Security group '{group_name} / {group_id}' was removed by user") elif sg.restricted: logging.debug(f"Security group '{group_name} / {group_id}' issue was remediated by user") elif sg.status != RestrictionStatus.OpenCompletely: logging.debug(f"Security group '{group_name} / {group_id}' is not completely open") else: if not batch and \ not confirm(f"Do you want to remediate security group '{group_name} / {group_id}'", False): continue logging.debug(f"Remediating '{group_name} / {group_id}' rules") backup_path = sg.backup_s3(main_account.client("s3"), backup_bucket) remediation_succeed = True processed = sg.restrict(RestrictionStatus.OpenCompletely) if processed == 0: logging.debug(f"No rules were detected to remediate in '{group_name} / {group_id}'") comment = None elif processed is None: remediation_succeed = False comment = (f"Failed to remediate security group '{group_name} / {group_id}' issue " f"in '{account_name} / {account_id}' account, '{group_region}' region " f"due to some limitations. Please, check manually") else: comment = (f"Rules backup was saved to " f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. " f"Security group '{group_name} / {group_id}' `{RestrictionStatus.OpenCompletely.value}` issue " f"in '{account_name} / {account_id}' account, '{group_region}' region " f"was remediated by hammer") if comment is not None: jira.remediate_issue( ticket_id=issue.jira_details.ticket, comment=comment, reassign=remediation_succeed, ) slack.report_issue( msg=f"{comment}" f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}", owner=owner, account_id=account_id, bu=bu, product=product, ) IssueOperations.set_status_remediated(ddb_table, issue) except Exception: logging.exception(f"Error occurred while updating security group '{group_name} / {group_id}' rules " f"in '{account_name} / {account_id} / {group_region}'") else: logging.debug(f"Skipping '{group_name} / {group_id}' " f"({retention_period - no_of_days_issue_created} days before remediation)")