コード例 #1
0
    def add_open_issues_to_sheet(self, ddb_table, work_book, sheet_name,
                                 issue_class):
        worksheet = work_book.add_sheet(sheet_name)
        # Adding Headers to Execl sheet with DynamoDB table field names.
        AddRecordsToSheet.add_header_data(worksheet, sheet_name)
        row_number = 0
        for account_id, account_name in self.config.aws.accounts.items():
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, issue_class)

            for issue in issues:
                # updated_date = dateutil.parser.parse(issue.timestamps.updated)
                # no_of_days_issue_created = (self.config.now - updated_date).days
                # if no_of_days_issue_created <= 7:
                row_number = row_number + 1
                # Adding row data to Execl sheet with DynamoDB table field values.
                AddRecordsToSheet.add_records(worksheet, sheet_name,
                                              account_id, account_name, issue,
                                              row_number)
                # for remediation to work issue must be reported,
                # so if no other reporting available - set issue is reported here
                if issue.timestamps.reported is None and \
                   (not self.config.jira.enabled) and \
                   (not self.config.slack.enabled):
                    IssueOperations.set_status_reported(ddb_table, issue)
コード例 #2
0
ファイル: entrypoint.py プロジェクト: 5l1v3r1/hammer-2
def collect_results(request_info, main_account):
    security_features = request_info['request_params']['security_features']
    regions = request_info['request_params']['regions']
    scan_account_id = request_info['request_params']['account_id']
    tags = request_info['request_params']['tags']
    response = dict({'global': {}})
    for region in regions:
        response[region] = {}
        for sec_feature in security_features:
            if sec_feature not in GLOBAL_SECURITY_FEATURES:
                response[region][sec_feature] = []
            else:
                response['global'][sec_feature] = []

    config = Config()
    for security_feature in security_features:
        sec_feature_config = config.get_module_config_by_name(security_feature)
        ddb_table = main_account.resource("dynamodb").Table(
            sec_feature_config.ddb_table_name)
        for issue in IssueOperations.get_account_open_issues(
                ddb_table, scan_account_id):
            if issue.contains_tags(tags) and (
                    issue.issue_details.region in regions
                    or security_feature in GLOBAL_SECURITY_FEATURES):
                issue_region = issue.issue_details.region if issue.issue_details.region else 'global'
                response[issue_region][security_feature].append({
                    'id':
                    issue.issue_id,
                    'issue_details':
                    issue.issue_details.as_dict()
                })
    return response
コード例 #3
0
    def clean_s3bucket_policy_permissions(self, batch=False):
        """ Class method to clean S3 buckets which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.s3policy.ddb_table_name)
        backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.s3policy.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.s3policy.remediation_accounts.items(
        ):
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, S3PolicyIssue)
            for issue in issues:
                bucket_name = issue.issue_id

                in_whitelist = self.config.s3policy.in_whitelist(
                    account_id, bucket_name)
                #in_fixlist = self.config.s3policy.in_fixnow(account_id, bucket_name)

                if in_whitelist:
                    logging.debug(f"Skipping {bucket_name} (in whitelist)")

                    # Adding label with "whitelisted" to jira ticket.
                    jira.add_label(ticket_id=issue.jira_details.ticket,
                                   label=IssueStatus.Whitelisted.value)
                    continue
                # if not in_fixlist:
                #     logging.debug(f"Skipping {bucket_name} (not in fixlist)")
                #     continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{bucket_name}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping {bucket_name} (has been already remediated)"
                    )
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        account = Account(
                            id=account_id,
                            name=account_name,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = S3BucketsPolicyChecker(account=account)
                        checker.check(buckets=[bucket_name])
                        s3bucket = checker.get_bucket(bucket_name)
                        if s3bucket is None:
                            logging.debug(
                                f"Bucket {s3bucket.name} was removed by user")
                        elif not s3bucket.public_by_policy:
                            logging.debug(
                                f"Bucket {s3bucket.name} policy issue was remediated by user"
                            )
                        else:
                            if not batch and \
                               not confirm(f"Do you want to remediate '{bucket_name}' S3 bucket policy", False):
                                continue

                            logging.debug(
                                f"Remediating '{s3bucket.name}' policy")

                            backup_path = s3bucket.backup_policy_s3(
                                main_account.client("s3"), backup_bucket)
                            remediation_succeed = True
                            if s3bucket.restrict_policy():
                                comment = (
                                    f"Policy backup was saved to "
                                    f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. "
                                    f"Bucket '{s3bucket.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account "
                                    f"was remediated by hammer")
                            else:
                                remediation_succeed = False
                                comment = (
                                    f"Failed to remediate bucket '{s3bucket.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account "
                                    f"due to some limitations. Please, check manually"
                                )

                            jira.remediate_issue(
                                ticket_id=issue.jira_details.ticket,
                                comment=comment,
                                reassign=remediation_succeed,
                            )
                            slack.report_issue(
                                msg=f"{comment}"
                                f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                owner=owner,
                                account_id=account_id,
                                bu=bu,
                                product=product,
                            )
                            IssueOperations.set_status_remediated(
                                ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating bucket '{bucket_name}' policy "
                            f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(
                        f"Skipping '{bucket_name}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )
コード例 #4
0
    def clean_sqs_policy_permissions(self):
        """ Class method to clean SQS queues which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.sqspolicy.ddb_table_name)
        backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.sqspolicy.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.aws.accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, SQSPolicyIssue)
            for issue in issues:
                queue_url = issue.issue_id
                queue_name = issue.issue_details.name
                queue_region = issue.issue_details.region

                in_whitelist = self.config.sqspolicy.in_whitelist(
                    account_id, queue_url)

                if in_whitelist:
                    logging.debug(f"Skipping {queue_name} (in whitelist)")
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{queue_name}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping {queue_name} (has been already remediated)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        account = Account(
                            id=account_id,
                            name=account_name,
                            region=issue.issue_details.region,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = SQSPolicyChecker(account=account)
                        checker.check(queues=[queue_url])
                        queue = checker.get_queue(queue_name)
                        if queue is None:
                            logging.debug(
                                f"Queue {queue_name} was removed by user")
                        elif not queue.public:
                            logging.debug(
                                f"Queue {queue.name} policy issue was remediated by user"
                            )
                        else:
                            logging.debug(f"Remediating '{queue.name}' policy")

                            backup_path = queue.backup_policy_s3(
                                main_account.client("s3"), backup_bucket)
                            remediation_succeed = True
                            if queue.restrict_policy():
                                comment = (
                                    f"Policy backup was saved to "
                                    f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. "
                                    f"Queue '{queue.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account, '{queue_region}' region "
                                    f"was remediated by hammer")
                            else:
                                remediation_succeed = False
                                comment = (
                                    f"Failed to remediate queue '{queue.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account, '{queue_region}' region "
                                    f"due to some limitations. Please, check manually"
                                )

                            jira.remediate_issue(
                                ticket_id=issue.jira_details.ticket,
                                comment=comment,
                                reassign=remediation_succeed,
                            )
                            slack.report_issue(
                                msg=f"{comment}"
                                f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                owner=owner,
                                account_id=account_id,
                                bu=bu,
                                product=product,
                            )
                            IssueOperations.set_status_remediated(
                                ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating queue '{queue_url}' policy "
                            f"in '{account_name} / {account_id}', '{queue_region}' region"
                        )
                else:
                    logging.debug(
                        f"Skipping '{queue_name}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )
コード例 #5
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate public ami issues"""
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.publicAMIs.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for Public AMI issues for {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, PublicAMIIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}")

        checker = PublicAMIChecker(account=account)
        if checker.check():
            for ami in checker.amis:
                logging.debug(f"Checking {ami.id}")
                if ami.public_access:
                    issue = PublicAMIIssue(account_id, ami.id)
                    issue.issue_details.tags = ami.tags
                    issue.issue_details.name = ami.name
                    issue.issue_details.region = region
                    if config.publicAMIs.in_whitelist(account_id, ami.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(
                        f"Setting {ami.id}/{ami.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(ami.id, None)

            logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated keys
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
        # track the progress of API request to scan specific account/region/feature
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check AMI public access for '{account_id} ({account_name})'"
        )
        return

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain public AMI checking")

    logging.debug(
        f"Checked AMI public access for '{account_id} ({account_name})'")
コード例 #6
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate public EBS snapshots """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.ebsSnapshot.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public EBS snapshots in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, EBSPublicSnapshotIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Public EBS snapshots in DDB:\n{open_issues.keys()}")

        checker = EBSPublicSnapshotsChecker(account=account)
        if checker.check():
            for snapshot in checker.snapshots:
                if snapshot.public:
                    issue = EBSPublicSnapshotIssue(account_id, snapshot.id)
                    issue.issue_details.region = snapshot.account.region
                    issue.issue_details.volume_id = snapshot.volume_id
                    issue.issue_details.tags = snapshot.tags
                    if config.ebsSnapshot.in_whitelist(account_id,
                                                       snapshot.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(
                        f"Setting {snapshot.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(snapshot.id, None)

            logging.debug(
                f"Public EBS snapshots in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated EBS snapshots
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check public EBS snapshots in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain public EBS snapshots checking")

    logging.debug(
        f"Checked public EBS snapshots in '{region}' for '{account_id} ({account_name})'"
    )
コード例 #7
0
    def clean_iam_access_keys(self, batch=False):
        """ Class method to remediate IAM User access keys which are not used """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.iamUserInactiveKeys.ddb_table_name)

        retention_period = self.config.iamUserInactiveKeys.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.aws.accounts.items():
            logging.debug("* Account Name:" + account_name +
                          " :::Account ID:::" + account_id)
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, IAMKeyInactiveIssue)
            for issue in issues:
                key_id = issue.issue_id
                username = issue.issue_details.username

                user_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(
                    account_id, username)
                key_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(
                    account_id, key_id)

                if user_in_whitelist or key_in_whitelist:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' (in whitelist)")
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' (has been already remediated)"
                    )
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    try:
                        if not batch and \
                           not confirm(f"Do you want to remediate inactive access key '{key_id} / {username}'", False):
                            continue

                        account = Account(
                            id=account_id,
                            name=account_name,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        logging.debug(
                            f"Remediating inactive access key '{key_id} / {username}'"
                        )
                        remediation_succeed = True
                        try:
                            IAMOperations.disable_access_key(
                                account.client("iam"), username, key_id)
                            comment = (
                                f"Inactive access key '{key_id} / {username}' issue "
                                f"in '{account_name} / {account_id}' account "
                                f"was remediated by hammer")
                        except Exception:
                            remediation_succeed = False
                            logging.exception(
                                "Failed to disable '{key_id} / {username}' inactive access key"
                            )
                            comment = (
                                f"Failed to remediate inactive access key '{key_id} / {username}' issue "
                                f"in '{account_name} / {account_id}' account "
                                f"due to some limitations. Please, check manually"
                            )

                        jira.remediate_issue(
                            ticket_id=issue.jira_details.ticket,
                            comment=comment,
                            reassign=remediation_succeed,
                        )
                        slack.report_issue(
                            msg=f"{comment}"
                            f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                            account_id=account_id,
                        )
                        IssueOperations.set_status_remediated(ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while disabling '{username} / {key_id}' "
                            f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )
コード例 #8
0
    def cleans3bucketunencrypted(self, batch=False):
        """ Class method to clean S3 buckets which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(self.config.s3Encrypt.ddb_table_name)

        retention_period = self.config.s3Encrypt.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.aws.accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3EncryptionIssue)
            for issue in issues:
                bucket_name = issue.issue_id

                in_whitelist = self.config.s3Encrypt.in_whitelist(account_id, bucket_name)
                in_fixlist = True

                if in_whitelist:
                    logging.debug(f"Skipping {bucket_name} (in whitelist)")
                    continue
                if not in_fixlist:
                    logging.debug(f"Skipping {bucket_name} (not in fixlist)")
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(f"Skipping '{bucket_name}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(f"Skipping {bucket_name} (has been already remediated)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now - updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        if not batch and \
                           not confirm(f"Do you want to remediate '{bucket_name}' S3 bucket unencrypted", False):
                            continue

                        account = Account(id=account_id,
                                          name=account_name,
                                          role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = S3EncryptionChecker(account=account)
                        checker.check(buckets=[bucket_name])
                        s3bucket = checker.get_bucket(bucket_name)

                        if s3bucket is None:
                            logging.debug(f"Bucket {s3bucket.name} was removed by user")
                        elif s3bucket.encrypted:
                            logging.debug(f"Bucket {s3bucket.name} unencrypted issue was remediated by user")
                        else:
                            logging.debug(f"Remediating '{s3bucket.name}' unencrypted")
                            # kms_key_id = None
                            remediation_succeed = True
                            if s3bucket.encrypt_bucket():
                                comment = (f"Bucket '{s3bucket.name}' unencrypted issue "
                                           f"in '{account_name} / {account_id}' account "
                                           f"was remediated by hammer")
                            else:
                                remediation_succeed = False
                                comment = (f"Failed to remediate bucket '{s3bucket.name}' unencrypted issue "
                                           f"in '{account_name} / {account_id}' account "
                                           f"due to some limitations. Please, check manually")

                            jira.remediate_issue(
                                ticket_id=issue.jira_details.ticket,
                                comment=comment,
                                reassign=remediation_succeed,
                            )
                            slack.report_issue(
                                msg=f"{comment}"
                                    f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                owner=owner,
                                account_id=account_id,
                                bu=bu, product=product,
                            )
                            IssueOperations.set_status_remediated(ddb_table, issue)
                    except Exception:
                        logging.exception(f"Error occurred while updating bucket '{bucket_name}' unencrypted "
                                          f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(f"Skipping '{bucket_name}' "
                                  f"({retention_period - no_of_days_issue_created} days before remediation)")
コード例 #9
0
    def clean_public_rds_snapshots(self, batch=False):
        """ Class method to remediate public rds snapshot """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.rdsSnapshot.ddb_table_name)
        #backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.rdsSnapshot.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.rdsSnapshot.remediation_accounts.items(
        ):
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, RdsPublicSnapshotIssue)
            for issue in issues:
                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping '{issue.issue_id}' (has been already remediated)"
                    )
                    continue

                in_whitelist = self.config.rdsSnapshot.in_whitelist(
                    account_id, issue.issue_id)
                if in_whitelist:
                    logging.debug(
                        f"Skipping '{issue.issue_id}' (in whitelist)")

                    # Adding label with "whitelisted" to jira ticket.
                    jira.add_label(ticket_id=issue.jira_details.ticket,
                                   label=IssueStatus.Whitelisted.value)
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{issue.issue_id}' (was not reported)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        if not batch and \
                           not confirm(f"Do you want to remediate public RDS snapshot '{issue.issue_id}'", False):
                            continue

                        account = Account(
                            id=account_id,
                            name=account_name,
                            region=issue.issue_details.region,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        remediation_succeed = True
                        try:
                            RdsSnapshotOperations.make_private(
                                account.client("rds"),
                                issue.issue_details.engine,
                                issue.issue_details.name)
                            comment = (
                                f"RDS public snapshot '{issue.issue_id}' issue "
                                f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region "
                                f"was remediated by hammer")
                        except Exception:
                            remediation_succeed = False
                            logging.exception(
                                f"Failed to make private '{issue.issue_id}' RDS public snapshot"
                            )
                            comment = (
                                f"Failed to remediate RDS public snapshot '{issue.issue_id}' issue "
                                f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region "
                                f"due to some limitations. Please, check manually"
                            )

                        jira.remediate_issue(
                            ticket_id=issue.jira_details.ticket,
                            comment=comment,
                            reassign=remediation_succeed,
                        )
                        slack.report_issue(
                            msg=f"{comment}"
                            f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                            owner=owner,
                            account_id=account_id,
                            bu=bu,
                            product=product,
                        )
                        IssueOperations.set_status_remediated(ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating RDS snapshot {issue.issue_id} "
                            f"in {account_id}/{issue.issue_details.region}")
コード例 #10
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate insecure services """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.sg.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for insecure services in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, SecurityGroupIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Security groups in DDB:\n{open_issues.keys()}")

        checker = SecurityGroupsChecker(
            account=account, restricted_ports=config.sg.restricted_ports)
        if checker.check():
            for sg in checker.groups:
                logging.debug(f"Checking {sg.name} ({sg.id})")
                if not sg.restricted:
                    # TODO: move instances detection for security group from reporting to identification
                    #ec2_instances = EC2Operations.get_instance_details_of_sg_associated(account.client("ec2"), sg.id)
                    #logging.debug(f"associated ec2 instances: {ec2_instances}")
                    issue = SecurityGroupIssue(account_id, sg.id)
                    issue.issue_details.name = sg.name
                    issue.issue_details.region = sg.account.region
                    issue.issue_details.tags = sg.tags
                    issue.issue_details.status = sg.status.value
                    for perm in sg.permissions:
                        for ip_range in perm.ip_ranges:
                            if not ip_range.restricted:
                                issue.add_perm(perm.protocol, perm.from_port,
                                               perm.to_port, ip_range.cidr,
                                               ip_range.status)
                    if config.sg.in_whitelist(
                            account_id, sg.name) or config.sg.in_whitelist(
                                account_id, sg.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(f"Setting {sg.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(sg.id, None)

            logging.debug(f"Security groups in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated security groups
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check insecure services in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain insecure services checking")

    logging.debug(
        f"Checked insecure services in '{region}' for '{account_id} ({account_name})'"
    )
コード例 #11
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate s3 buckets acl """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.s3acl.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public S3 ACLs in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, S3AclIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {issue.issue_id: issue for issue in open_issues}
        logging.debug(f"S3 in DDB:\n{open_issues.keys()}")

        checker = S3BucketsAclChecker(account=account)
        if not checker.check():
            return

        for bucket in checker.buckets:
            logging.debug(f"Checking {bucket.name}")
            if bucket.public:
                issue = S3AclIssue(account_id, bucket.name)
                issue.issue_details.owner = bucket.owner
                issue.issue_details.public_acls = bucket.get_public_acls()
                issue.issue_details.tags = bucket.tags
                if config.s3acl.in_whitelist(account_id, bucket.name):
                    issue.status = IssueStatus.Whitelisted
                else:
                    issue.status = IssueStatus.Open
                logging.debug(f"Setting {bucket.name} status {issue.status}")
                IssueOperations.update(ddb_table, issue)
                # remove issue id from issues_list_from_db (if exists)
                # as we already checked it
                open_issues.pop(bucket.name, None)

        logging.debug(f"S3 in DDB:\n{open_issues.keys()}")
        # all other unresolved issues in DDB are for removed/remediated buckets
        for issue in open_issues.values():
            IssueOperations.set_status_resolved(ddb_table, issue)
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check s3 acls for '{account_id} ({account_name})'")
        return

    logging.debug(f"Checked s3 acls for '{account_id} ({account_name})'")
コード例 #12
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate iam user keys rotation """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.iamUserKeysRotation.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for IAM user keys rotation for {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, IAMKeyRotationIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {issue.issue_id: issue for issue in open_issues}
        logging.debug(
            f"Users with keys to rotate in DDB:\n{open_issues.keys()}")

        checker = IAMKeyChecker(account=account,
                                now=config.now,
                                rotation_criteria_days=config.
                                iamUserKeysRotation.rotation_criteria_days)
        if not checker.check(last_used_check_enabled=False):
            return

        for user in checker.users:
            for key in user.stale_keys:
                issue = IAMKeyRotationIssue(account_id, key.id)
                issue.issue_details.username = user.id
                issue.issue_details.create_date = key.create_date.isoformat()
                if config.iamUserKeysRotation.in_whitelist(
                        account_id,
                        key.id) or config.iamUserKeysRotation.in_whitelist(
                            account_id, user.id):
                    issue.status = IssueStatus.Whitelisted
                else:
                    issue.status = IssueStatus.Open
                logging.debug(
                    f"Setting {key.id}/{user.id} status {issue.status}")
                IssueOperations.update(ddb_table, issue)
                # remove issue id from issues_list_from_db (if exists)
                # as we already checked it
                open_issues.pop(key.id, None)

        logging.debug(f"Keys to rotate in DDB:\n{open_issues.keys()}")
        # all other unresolved issues in DDB are for removed/remediated keys
        for issue in open_issues.values():
            IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check IAM user keys rotation for '{account_id} ({account_name})'"
        )
        return

    logging.debug(
        f"Checked IAM user keys rotation for '{account_id} ({account_name})'")
コード例 #13
0
def lambda_handler(event, context):
    """ Lambda Handler to describe cloud trails enabled or not for each region """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.cloudtrails.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for CloudTrail logging issues in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, CloudTrailIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_id == region
        }
        logging.debug(f"CloudTrail region in DDB:\n{open_issues.keys()}")

        checker = CloudTrailChecker(account=account)
        if checker.check():
            if checker.disabled or checker.delivery_errors:
                issue = CloudTrailIssue(account_id, region)
                issue.issue_details.disabled = checker.disabled
                issue.issue_details.delivery_errors = checker.delivery_errors
                issue.add_trails(checker.trails)
                if config.cloudtrails.in_whitelist(account_id, region):
                    issue.status = IssueStatus.Whitelisted
                else:
                    issue.status = IssueStatus.Open
                logging.debug(f"Setting {region} status {issue.status}")
                IssueOperations.update(ddb_table, issue)
            # issue exists in ddb and was fixed
            elif region in open_issues:
                IssueOperations.set_status_resolved(ddb_table,
                                                    open_issues[region])
        # track the progress of API request to scan specific account/region/feature
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check CloudTrail in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain CloudTrail checking")

    logging.debug(
        f"Checked CloudTrail in '{region}' for '{account_id} ({account_name})'"
    )
コード例 #14
0
    def clean_ami_public_access(self):
        """ Class method to clean AMI public access which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.publicAMIs.ddb_table_name)

        retention_period = self.config.publicAMIs.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.aws.accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, PublicAMIIssue)
            for issue in issues:
                ami_id = issue.issue_id

                in_whitelist = self.config.publicAMIs.in_whitelist(
                    account_id, ami_id)

                if in_whitelist:
                    logging.debug(f"Skipping {ami_id} (in whitelist)")

                    # Adding label with "whitelisted" to jira ticket.
                    jira.add_label(ticket_id=issue.jira_details.ticket,
                                   label=IssueStatus.Whitelisted.value)
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(f"Skipping '{ami_id}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping {ami_id} (has been already remediated)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        account = Account(
                            id=account_id,
                            name=account_name,
                            region=issue.issue_details.region,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = PublicAMIChecker(account=account)
                        checker.check(amis_to_check=[ami_id])
                        ami = checker.get_ami(ami_id)
                        if ami is None:
                            logging.debug(f"AMI {ami_id} was removed by user")
                        elif not ami.public_access:
                            logging.debug(
                                f"AMI {ami.name} public access issue was remediated by user"
                            )
                        else:
                            logging.debug(f"Remediating '{ami.name}' ")

                            remediation_succeed = True
                            if ami.modify_image_attribute():
                                comment = (
                                    f"AMI '{ami.name}' public access issue "
                                    f"in '{account_name} / {account_id}' account "
                                    f"was remediated by hammer")
                            else:
                                remediation_succeed = False
                                comment = (
                                    f"Failed to remediate AMI '{ami.name}' public access issue "
                                    f"in '{account_name} / {account_id}' account "
                                    f"due to some limitations. Please, check manually"
                                )

                            jira.remediate_issue(
                                ticket_id=issue.jira_details.ticket,
                                comment=comment,
                                reassign=remediation_succeed,
                            )
                            slack.report_issue(
                                msg=f"{comment}"
                                f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                owner=owner,
                                account_id=account_id,
                                bu=bu,
                                product=product,
                            )
                            IssueOperations.set_status_remediated(
                                ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating AMI '{ami_id}' access "
                            f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(
                        f"Skipping '{ami_id}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )
コード例 #15
0
    def clean_security_groups(self, batch=False):
        """ Class function to clean security groups which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(self.config.sg.ddb_table_name)
        backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.sg.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.sg.remediation_accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SecurityGroupIssue)
            for issue in issues:
                group_name = issue.issue_details.name
                group_vpc_id = issue.issue_details.vpc_id
                group_id = issue.issue_id
                group_region = issue.issue_details.region
                # status = issue.jira_details.status

                name_in_whitelist = self.config.sg.in_whitelist(account_id, f"{group_vpc_id}:{group_name}")
                id_in_whitelist = self.config.sg.in_whitelist(account_id, group_id)

                if name_in_whitelist or id_in_whitelist:
                    logging.debug(f"Skipping '{group_name} / {group_id}' (in whitelist)")

                    # Adding label with "whitelisted" to jira ticket.
                    jira.add_label(
                        ticket_id=issue.jira_details.ticket,
                        label=IssueStatus.Whitelisted.value
                    )
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(f"Skipping '{group_name} / {group_id}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(f"Skipping '{group_name} / {group_id}' (has been already remediated)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now - updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        account = Account(id=account_id,
                                          name=account_name,
                                          region=group_region,
                                          role_name = self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = SecurityGroupsChecker(account=account,
                                                        restricted_ports=self.config.sg.restricted_ports)
                        checker.check(ids=[group_id])
                        sg = checker.get_security_group(group_id)
                        if sg is None:
                            logging.debug(f"Security group '{group_name} / {group_id}' was removed by user")
                        elif sg.restricted:
                            logging.debug(f"Security group '{group_name} / {group_id}' issue was remediated by user")
                        elif sg.status != RestrictionStatus.OpenCompletely:
                            logging.debug(f"Security group '{group_name} / {group_id}' is not completely open")
                        else:
                            if not batch and \
                               not confirm(f"Do you want to remediate security group '{group_name} / {group_id}'", False):
                                continue

                            logging.debug(f"Remediating '{group_name} / {group_id}' rules")

                            backup_path = sg.backup_s3(main_account.client("s3"), backup_bucket)
                            remediation_succeed = True
                            processed = sg.restrict(RestrictionStatus.OpenCompletely)
                            if processed == 0:
                                logging.debug(f"No rules were detected to remediate in '{group_name} / {group_id}'")
                                comment = None
                            elif processed is None:
                                remediation_succeed = False
                                comment = (f"Failed to remediate security group '{group_name} / {group_id}' issue "
                                           f"in '{account_name} / {account_id}' account, '{group_region}' region "
                                           f"due to some limitations. Please, check manually")
                            else:
                                comment = (f"Rules backup was saved to "
                                           f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. "
                                           f"Security group '{group_name} / {group_id}' `{RestrictionStatus.OpenCompletely.value}` issue "
                                           f"in '{account_name} / {account_id}' account, '{group_region}' region "
                                           f"was remediated by hammer")

                            if comment is not None:
                                jira.remediate_issue(
                                    ticket_id=issue.jira_details.ticket,
                                    comment=comment,
                                    reassign=remediation_succeed,
                                )
                                slack.report_issue(
                                    msg=f"{comment}"
                                        f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                    owner=owner,
                                    account_id=account_id,
                                    bu=bu, product=product,
                                )
                                IssueOperations.set_status_remediated(ddb_table, issue)
                    except Exception:
                        logging.exception(f"Error occurred while updating security group '{group_name} / {group_id}' rules "
                                          f"in '{account_name} / {account_id} / {group_region}'")
                else:
                    logging.debug(f"Skipping '{group_name} / {group_id}' "
                                  f"({retention_period - no_of_days_issue_created} days before remediation)")
コード例 #16
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate SQS queue policy """
    set_logging(level=logging.DEBUG)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
        # region = payload['region']
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(config.sqspolicy.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public SQS policies in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}
        logging.debug(f"SQS in DDB:\n{open_issues.keys()}")

        checker = SQSPolicyChecker(account=account)
        if checker.check():
            for queue in checker.queues:
                logging.debug(f"Checking {queue.name}")
                if queue.public:
                    issue = SQSPolicyIssue(account_id, queue.url)
                    issue.issue_details.tags = queue.tags
                    issue.issue_details.name = queue.name
                    issue.issue_details.region = queue.account.region
                    issue.issue_details.policy = queue.policy
                    if config.sqspolicy.in_whitelist(account_id, queue.url):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(f"Setting {queue.name} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(queue.url, None)

        logging.debug(f"SQS in DDB:\n{open_issues.keys()}")
        # all other unresolved issues in DDB are for removed/remediated queues
        for issue in open_issues.values():
            IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(f"Failed to check SQS policies for '{account_id} ({account_name})'")
        return

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain insecure services checking")

    logging.debug(f"Checked SQS policies for '{account_id} ({account_name})'")