def lambda_handler(event, context): """ Lambda handler to evaluate public EBS snapshots """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.ebsSnapshot.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for public EBS snapshots in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, EBSPublicSnapshotIssue) # make dictionary for fast search by id # and filter by current region open_issues = { issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region } logging.debug(f"Public EBS snapshots in DDB:\n{open_issues.keys()}") checker = EBSPublicSnapshotsChecker(account=account) if checker.check(): for snapshot in checker.snapshots: if snapshot.public: issue = EBSPublicSnapshotIssue(account_id, snapshot.id) issue.issue_details.region = snapshot.account.region issue.issue_details.volume_id = snapshot.volume_id issue.issue_details.tags = snapshot.tags if config.ebsSnapshot.in_whitelist(account_id, snapshot.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug( f"Setting {snapshot.id} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(snapshot.id, None) logging.debug( f"Public EBS snapshots in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated EBS snapshots for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) except Exception: logging.exception( f"Failed to check public EBS snapshots in '{region}' for '{account_id} ({account_name})'" ) # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain public EBS snapshots checking") logging.debug( f"Checked public EBS snapshots in '{region}' for '{account_id} ({account_name})'" )
def lambda_handler(event, context): """ Lambda handler to evaluate public ami issues""" set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.publicAMIs.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for Public AMI issues for {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, PublicAMIIssue) # make dictionary for fast search by id # and filter by current region open_issues = { issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region } logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}") checker = PublicAMIChecker(account=account) if checker.check(): for ami in checker.amis: logging.debug(f"Checking {ami.id}") if ami.public_access: issue = PublicAMIIssue(account_id, ami.id) issue.issue_details.tags = ami.tags issue.issue_details.name = ami.name issue.issue_details.region = region if config.publicAMIs.in_whitelist(account_id, ami.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug( f"Setting {ami.id}/{ami.id} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(ami.id, None) logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated keys for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) # track the progress of API request to scan specific account/region/feature if request_id: api_table = main_account.resource("dynamodb").Table( config.api.ddb_table_name) DDB.track_progress(api_table, request_id) except Exception: logging.exception( f"Failed to check AMI public access for '{account_id} ({account_name})'" ) return # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain public AMI checking") logging.debug( f"Checked AMI public access for '{account_id} ({account_name})'")
def lambda_handler(event, context): """ Lambda handler to evaluate insecure services """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.sg.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for insecure services in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, SecurityGroupIssue) # make dictionary for fast search by id # and filter by current region open_issues = { issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region } logging.debug(f"Security groups in DDB:\n{open_issues.keys()}") checker = SecurityGroupsChecker( account=account, restricted_ports=config.sg.restricted_ports) if checker.check(): for sg in checker.groups: logging.debug(f"Checking {sg.name} ({sg.id})") if not sg.restricted: # TODO: move instances detection for security group from reporting to identification #ec2_instances = EC2Operations.get_instance_details_of_sg_associated(account.client("ec2"), sg.id) #logging.debug(f"associated ec2 instances: {ec2_instances}") issue = SecurityGroupIssue(account_id, sg.id) issue.issue_details.name = sg.name issue.issue_details.region = sg.account.region issue.issue_details.tags = sg.tags issue.issue_details.status = sg.status.value for perm in sg.permissions: for ip_range in perm.ip_ranges: if not ip_range.restricted: issue.add_perm(perm.protocol, perm.from_port, perm.to_port, ip_range.cidr, ip_range.status) if config.sg.in_whitelist( account_id, sg.name) or config.sg.in_whitelist( account_id, sg.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug(f"Setting {sg.id} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(sg.id, None) logging.debug(f"Security groups in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated security groups for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) except Exception: logging.exception( f"Failed to check insecure services in '{region}' for '{account_id} ({account_name})'" ) # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain insecure services checking") logging.debug( f"Checked insecure services in '{region}' for '{account_id} ({account_name})'" )
def lambda_handler(event, context): """ Lambda handler to evaluate s3 buckets acl """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.s3acl.ddb_table_name) account = Account(id=account_id, name=account_name, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for public S3 ACLs in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, S3AclIssue) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} logging.debug(f"S3 in DDB:\n{open_issues.keys()}") checker = S3BucketsAclChecker(account=account) if not checker.check(): return for bucket in checker.buckets: logging.debug(f"Checking {bucket.name}") if bucket.public: issue = S3AclIssue(account_id, bucket.name) issue.issue_details.owner = bucket.owner issue.issue_details.public_acls = bucket.get_public_acls() issue.issue_details.tags = bucket.tags if config.s3acl.in_whitelist(account_id, bucket.name): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug(f"Setting {bucket.name} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(bucket.name, None) logging.debug(f"S3 in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated buckets for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) if request_id: api_table = main_account.resource("dynamodb").Table( config.api.ddb_table_name) DDB.track_progress(api_table, request_id) except Exception: logging.exception( f"Failed to check s3 acls for '{account_id} ({account_name})'") return logging.debug(f"Checked s3 acls for '{account_id} ({account_name})'")
def lambda_handler(event, context): """ Lambda handler to evaluate iam user keys rotation """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.iamUserKeysRotation.ddb_table_name) account = Account(id=account_id, name=account_name, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for IAM user keys rotation for {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, IAMKeyRotationIssue) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} logging.debug( f"Users with keys to rotate in DDB:\n{open_issues.keys()}") checker = IAMKeyChecker(account=account, now=config.now, rotation_criteria_days=config. iamUserKeysRotation.rotation_criteria_days) if not checker.check(last_used_check_enabled=False): return for user in checker.users: for key in user.stale_keys: issue = IAMKeyRotationIssue(account_id, key.id) issue.issue_details.username = user.id issue.issue_details.create_date = key.create_date.isoformat() if config.iamUserKeysRotation.in_whitelist( account_id, key.id) or config.iamUserKeysRotation.in_whitelist( account_id, user.id): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug( f"Setting {key.id}/{user.id} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(key.id, None) logging.debug(f"Keys to rotate in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated keys for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) except Exception: logging.exception( f"Failed to check IAM user keys rotation for '{account_id} ({account_name})'" ) return logging.debug( f"Checked IAM user keys rotation for '{account_id} ({account_name})'")
def lambda_handler(event, context): """ Lambda Handler to describe cloud trails enabled or not for each region """ set_logging(level=logging.INFO) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table( config.cloudtrails.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for CloudTrail logging issues in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues( ddb_table, account_id, CloudTrailIssue) # make dictionary for fast search by id # and filter by current region open_issues = { issue.issue_id: issue for issue in open_issues if issue.issue_id == region } logging.debug(f"CloudTrail region in DDB:\n{open_issues.keys()}") checker = CloudTrailChecker(account=account) if checker.check(): if checker.disabled or checker.delivery_errors: issue = CloudTrailIssue(account_id, region) issue.issue_details.disabled = checker.disabled issue.issue_details.delivery_errors = checker.delivery_errors issue.add_trails(checker.trails) if config.cloudtrails.in_whitelist(account_id, region): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug(f"Setting {region} status {issue.status}") IssueOperations.update(ddb_table, issue) # issue exists in ddb and was fixed elif region in open_issues: IssueOperations.set_status_resolved(ddb_table, open_issues[region]) # track the progress of API request to scan specific account/region/feature if request_id: api_table = main_account.resource("dynamodb").Table( config.api.ddb_table_name) DDB.track_progress(api_table, request_id) except Exception: logging.exception( f"Failed to check CloudTrail in '{region}' for '{account_id} ({account_name})'" ) # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain CloudTrail checking") logging.debug( f"Checked CloudTrail in '{region}' for '{account_id} ({account_name})'" )
def lambda_handler(event, context): """ Lambda handler to evaluate SQS queue policy """ set_logging(level=logging.DEBUG) try: payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() # region = payload['region'] except Exception: logging.exception(f"Failed to parse event\n{event}") return try: config = Config() main_account = Account(region=config.aws.region) ddb_table = main_account.resource("dynamodb").Table(config.sqspolicy.ddb_table_name) account = Account(id=account_id, name=account_name, region=region, role_name=config.aws.role_name_identification) if account.session is None: return logging.debug(f"Checking for public SQS policies in {account}") # existing open issues for account to check if resolved open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"SQS in DDB:\n{open_issues.keys()}") checker = SQSPolicyChecker(account=account) if checker.check(): for queue in checker.queues: logging.debug(f"Checking {queue.name}") if queue.public: issue = SQSPolicyIssue(account_id, queue.url) issue.issue_details.tags = queue.tags issue.issue_details.name = queue.name issue.issue_details.region = queue.account.region issue.issue_details.policy = queue.policy if config.sqspolicy.in_whitelist(account_id, queue.url): issue.status = IssueStatus.Whitelisted else: issue.status = IssueStatus.Open logging.debug(f"Setting {queue.name} status {issue.status}") IssueOperations.update(ddb_table, issue) # remove issue id from issues_list_from_db (if exists) # as we already checked it open_issues.pop(queue.url, None) logging.debug(f"SQS in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated queues for issue in open_issues.values(): IssueOperations.set_status_resolved(ddb_table, issue) except Exception: logging.exception(f"Failed to check SQS policies for '{account_id} ({account_name})'") return # push SNS messages until the list with regions to check is empty if len(payload['regions']) > 0: try: Sns.publish(payload["sns_arn"], payload) except Exception: logging.exception("Failed to chain insecure services checking") logging.debug(f"Checked SQS policies for '{account_id} ({account_name})'")