Esempio n. 1
0
def lambda_handler(event, context):
    set_logging(level=logging.DEBUG)

    logging.debug(f"Got request\n{jsonDumps(event)}")

    if event.get('RequestType', "") == "Delete":
        send_response(event, context, "SUCCESS")
        return

    region = event.get('ResourceProperties', {}).get('Region', None)
    if region is None:
        logging.error("Failed to get region from event")
        send_response(event, context, "FAILED")

    try:
        account = Account(region=region)
        ec2 = account.client(
            'ec2', config=botocore.config.Config(retries={'max_attempts': 3}))
        images = ec2.describe_images(Filters=[{
            "Name": "product-code",
            "Values": [PRODUCT_CODE]
        }])['Images']
    except Exception:
        logging.exception("Failed to describe images")
        send_response(event, context, "FAILED")
        return

    if len(images) == 0:
        logging.error("No images were found")
        send_response(event, context, "FAILED")
        return

    latest = sorted(images, key=itemgetter('CreationDate'))[-1]['ImageId']
    logging.info(f"Latest '{PRODUCT_CODE}' AMI id - '{latest}'")
    send_response(event, context, "SUCCESS", {'Id': latest})
    def clean_s3bucket_policy_permissions(self, batch=False):
        """ Class method to clean S3 buckets which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.s3policy.ddb_table_name)
        backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.s3policy.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.s3policy.remediation_accounts.items(
        ):
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, S3PolicyIssue)
            for issue in issues:
                bucket_name = issue.issue_id

                in_whitelist = self.config.s3policy.in_whitelist(
                    account_id, bucket_name)
                #in_fixlist = self.config.s3policy.in_fixnow(account_id, bucket_name)

                if in_whitelist:
                    logging.debug(f"Skipping {bucket_name} (in whitelist)")

                    # Adding label with "whitelisted" to jira ticket.
                    jira.add_label(ticket_id=issue.jira_details.ticket,
                                   label=IssueStatus.Whitelisted.value)
                    continue
                # if not in_fixlist:
                #     logging.debug(f"Skipping {bucket_name} (not in fixlist)")
                #     continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{bucket_name}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping {bucket_name} (has been already remediated)"
                    )
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        account = Account(
                            id=account_id,
                            name=account_name,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = S3BucketsPolicyChecker(account=account)
                        checker.check(buckets=[bucket_name])
                        s3bucket = checker.get_bucket(bucket_name)
                        if s3bucket is None:
                            logging.debug(
                                f"Bucket {s3bucket.name} was removed by user")
                        elif not s3bucket.public_by_policy:
                            logging.debug(
                                f"Bucket {s3bucket.name} policy issue was remediated by user"
                            )
                        else:
                            if not batch and \
                               not confirm(f"Do you want to remediate '{bucket_name}' S3 bucket policy", False):
                                continue

                            logging.debug(
                                f"Remediating '{s3bucket.name}' policy")

                            backup_path = s3bucket.backup_policy_s3(
                                main_account.client("s3"), backup_bucket)
                            remediation_succeed = True
                            if s3bucket.restrict_policy():
                                comment = (
                                    f"Policy backup was saved to "
                                    f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. "
                                    f"Bucket '{s3bucket.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account "
                                    f"was remediated by hammer")
                            else:
                                remediation_succeed = False
                                comment = (
                                    f"Failed to remediate bucket '{s3bucket.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account "
                                    f"due to some limitations. Please, check manually"
                                )

                            jira.remediate_issue(
                                ticket_id=issue.jira_details.ticket,
                                comment=comment,
                                reassign=remediation_succeed,
                            )
                            slack.report_issue(
                                msg=f"{comment}"
                                f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                owner=owner,
                                account_id=account_id,
                                bu=bu,
                                product=product,
                            )
                            IssueOperations.set_status_remediated(
                                ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating bucket '{bucket_name}' policy "
                            f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(
                        f"Skipping '{bucket_name}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )
    def clean_sqs_policy_permissions(self):
        """ Class method to clean SQS queues which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.sqspolicy.ddb_table_name)
        backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.sqspolicy.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.aws.accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, SQSPolicyIssue)
            for issue in issues:
                queue_url = issue.issue_id
                queue_name = issue.issue_details.name
                queue_region = issue.issue_details.region

                in_whitelist = self.config.sqspolicy.in_whitelist(
                    account_id, queue_url)

                if in_whitelist:
                    logging.debug(f"Skipping {queue_name} (in whitelist)")
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{queue_name}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping {queue_name} (has been already remediated)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        account = Account(
                            id=account_id,
                            name=account_name,
                            region=issue.issue_details.region,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = SQSPolicyChecker(account=account)
                        checker.check(queues=[queue_url])
                        queue = checker.get_queue(queue_name)
                        if queue is None:
                            logging.debug(
                                f"Queue {queue_name} was removed by user")
                        elif not queue.public:
                            logging.debug(
                                f"Queue {queue.name} policy issue was remediated by user"
                            )
                        else:
                            logging.debug(f"Remediating '{queue.name}' policy")

                            backup_path = queue.backup_policy_s3(
                                main_account.client("s3"), backup_bucket)
                            remediation_succeed = True
                            if queue.restrict_policy():
                                comment = (
                                    f"Policy backup was saved to "
                                    f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. "
                                    f"Queue '{queue.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account, '{queue_region}' region "
                                    f"was remediated by hammer")
                            else:
                                remediation_succeed = False
                                comment = (
                                    f"Failed to remediate queue '{queue.name}' policy issue "
                                    f"in '{account_name} / {account_id}' account, '{queue_region}' region "
                                    f"due to some limitations. Please, check manually"
                                )

                            jira.remediate_issue(
                                ticket_id=issue.jira_details.ticket,
                                comment=comment,
                                reassign=remediation_succeed,
                            )
                            slack.report_issue(
                                msg=f"{comment}"
                                f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                owner=owner,
                                account_id=account_id,
                                bu=bu,
                                product=product,
                            )
                            IssueOperations.set_status_remediated(
                                ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating queue '{queue_url}' policy "
                            f"in '{account_name} / {account_id}', '{queue_region}' region"
                        )
                else:
                    logging.debug(
                        f"Skipping '{queue_name}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )
Esempio n. 4
0
    def clean_iam_access_keys(self, batch=False):
        """ Class method to remediate IAM User access keys which are not used """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.iamUserInactiveKeys.ddb_table_name)

        retention_period = self.config.iamUserInactiveKeys.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.aws.accounts.items():
            logging.debug("* Account Name:" + account_name +
                          " :::Account ID:::" + account_id)
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, IAMKeyInactiveIssue)
            for issue in issues:
                key_id = issue.issue_id
                username = issue.issue_details.username

                user_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(
                    account_id, username)
                key_in_whitelist = self.config.iamUserInactiveKeys.in_whitelist(
                    account_id, key_id)

                if user_in_whitelist or key_in_whitelist:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' (in whitelist)")
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' (has been already remediated)"
                    )
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    try:
                        if not batch and \
                           not confirm(f"Do you want to remediate inactive access key '{key_id} / {username}'", False):
                            continue

                        account = Account(
                            id=account_id,
                            name=account_name,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        logging.debug(
                            f"Remediating inactive access key '{key_id} / {username}'"
                        )
                        remediation_succeed = True
                        try:
                            IAMOperations.disable_access_key(
                                account.client("iam"), username, key_id)
                            comment = (
                                f"Inactive access key '{key_id} / {username}' issue "
                                f"in '{account_name} / {account_id}' account "
                                f"was remediated by hammer")
                        except Exception:
                            remediation_succeed = False
                            logging.exception(
                                "Failed to disable '{key_id} / {username}' inactive access key"
                            )
                            comment = (
                                f"Failed to remediate inactive access key '{key_id} / {username}' issue "
                                f"in '{account_name} / {account_id}' account "
                                f"due to some limitations. Please, check manually"
                            )

                        jira.remediate_issue(
                            ticket_id=issue.jira_details.ticket,
                            comment=comment,
                            reassign=remediation_succeed,
                        )
                        slack.report_issue(
                            msg=f"{comment}"
                            f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                            account_id=account_id,
                        )
                        IssueOperations.set_status_remediated(ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while disabling '{username} / {key_id}' "
                            f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(
                        f"Skipping '{key_id} / {username}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )
Esempio n. 5
0
    def clean_public_rds_snapshots(self, batch=False):
        """ Class method to remediate public rds snapshot """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            self.config.rdsSnapshot.ddb_table_name)
        #backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.rdsSnapshot.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.rdsSnapshot.remediation_accounts.items(
        ):
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(
                ddb_table, account_id, RdsPublicSnapshotIssue)
            for issue in issues:
                if issue.timestamps.remediated is not None:
                    logging.debug(
                        f"Skipping '{issue.issue_id}' (has been already remediated)"
                    )
                    continue

                in_whitelist = self.config.rdsSnapshot.in_whitelist(
                    account_id, issue.issue_id)
                if in_whitelist:
                    logging.debug(
                        f"Skipping '{issue.issue_id}' (in whitelist)")

                    # Adding label with "whitelisted" to jira ticket.
                    jira.add_label(ticket_id=issue.jira_details.ticket,
                                   label=IssueStatus.Whitelisted.value)
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(
                        f"Skipping '{issue.issue_id}' (was not reported)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now -
                                            updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        if not batch and \
                           not confirm(f"Do you want to remediate public RDS snapshot '{issue.issue_id}'", False):
                            continue

                        account = Account(
                            id=account_id,
                            name=account_name,
                            region=issue.issue_details.region,
                            role_name=self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        remediation_succeed = True
                        try:
                            RdsSnapshotOperations.make_private(
                                account.client("rds"),
                                issue.issue_details.engine,
                                issue.issue_details.name)
                            comment = (
                                f"RDS public snapshot '{issue.issue_id}' issue "
                                f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region "
                                f"was remediated by hammer")
                        except Exception:
                            remediation_succeed = False
                            logging.exception(
                                f"Failed to make private '{issue.issue_id}' RDS public snapshot"
                            )
                            comment = (
                                f"Failed to remediate RDS public snapshot '{issue.issue_id}' issue "
                                f"in '{account_name} / {account_id}' account, '{issue.issue_details.region}' region "
                                f"due to some limitations. Please, check manually"
                            )

                        jira.remediate_issue(
                            ticket_id=issue.jira_details.ticket,
                            comment=comment,
                            reassign=remediation_succeed,
                        )
                        slack.report_issue(
                            msg=f"{comment}"
                            f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                            owner=owner,
                            account_id=account_id,
                            bu=bu,
                            product=product,
                        )
                        IssueOperations.set_status_remediated(ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating RDS snapshot {issue.issue_id} "
                            f"in {account_id}/{issue.issue_details.region}")
Esempio n. 6
0
    def create_tickets_securitygroups(self):
        """ Class function to create jira tickets """
        table_name = self.config.sg.ddb_table_name

        main_account = Account(region=self.config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(table_name)
        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.sg.accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, SecurityGroupIssue)
            for issue in issues:
                group_id = issue.issue_id
                group_name = issue.issue_details.name
                group_region = issue.issue_details.region
                group_vpc_id = issue.issue_details.vpc_id
                tags = issue.issue_details.tags
                # issue has been already reported
                if issue.timestamps.reported is not None:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]:
                        logging.debug(f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue")

                        comment = (f"Closing {issue.status.value} security group '{group_name} / {group_id}' issue "
                                   f"in '{account_name} / {account_id}' account, '{group_region}' region")
                        if issue.status == IssueStatus.Whitelisted:
                            # Adding label with "whitelisted" to jira ticket.
                            jira.add_label(
                                ticket_id=issue.jira_details.ticket,
                                label=IssueStatus.Whitelisted.value
                            )
                        jira.close_issue(
                            ticket_id=issue.jira_details.ticket,
                            comment=comment
                        )
                        slack.report_issue(
                            msg=f"{comment}"
                                f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                            owner=owner,
                            account_id=account_id,
                            bu=bu, product=product,
                        )
                        IssueOperations.set_status_closed(ddb_table, issue)
                    # issue.status != IssueStatus.Closed (should be IssueStatus.Open)
                    elif issue.timestamps.updated > issue.timestamps.reported:
                        logging.debug(f"Updating security group '{group_name} / {group_id}' issue")

                        comment = "Issue details are changed, please check again.\n"
                        comment += self.build_open_ports_table_jira(issue.issue_details.perms)
                        comment += JiraOperations.build_tags_table(tags)
                        jira.update_issue(
                            ticket_id=issue.jira_details.ticket,
                            comment=comment
                        )
                        slack.report_issue(
                            msg=f"Security group '{group_name} / {group_id}' issue is changed "
                                f"in '{account_name} / {account_id}' account, '{group_region}' region"
                                f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}"
                                f"\n"
                                f"{self.build_open_ports_table_slack(issue.issue_details.perms)}",
                            owner=owner,
                            account_id=account_id,
                            bu=bu, product=product,
                        )
                        IssueOperations.set_status_updated(ddb_table, issue)
                    else:
                        logging.debug(f"No changes for '{group_name} / {group_id}'")
                # issue has not been reported yet
                else:
                    logging.debug(f"Reporting security group '{group_name} / {group_id}' issue")

                    status = RestrictionStatus(issue.issue_details.status)
                    # if owner/bu/product tags exist on security group - use it
                    group_owner = tags.get("owner", None)
                    group_bu = tags.get("bu", None)
                    group_product = tags.get("product", None)

                    open_port_details = self.build_open_ports_table_jira(issue.issue_details.perms)

                    account_details = (f"*Risk*: High\n\n"
                                       f"*Account Name*: {account_name}\n"
                                       f"*Account ID*: {account_id}\n"
                                       f"*SG Name*: {group_name}\n"
                                       f"*SG ID*: {group_id}\n"
                                       f"*Region*: {group_region}\n")

                    account_details += f"*VPC*: {group_vpc_id}\n\n" if group_vpc_id else "\n"

                    account = Account(id=account_id,
                                      name=account_name,
                                      region=group_region,
                                      role_name=self.config.aws.role_name_reporting)
                    ec2_client = account.client("ec2") if account.session is not None else None

                    sg_instance_details = ec2_owner = ec2_bu = ec2_product = None
                    sg_in_use = sg_in_use_ec2 = sg_in_use_elb = sg_in_use_rds = None
                    sg_public = sg_blind_public = False

                    rds_client = account.client("rds") if account.session is not None else None
                    elb_client = account.client("elb") if account.session is not None else None
                    elbv2_client = account.client("elbv2") if account.session is not None else None

                    iam_client = account.client("iam") if account.session is not None else None

                    rds_instance_details = elb_instance_details = None

                    if ec2_client is not None:
                        ec2_instances = EC2Operations.get_instance_details_of_sg_associated(ec2_client, group_id)
                        sg_instance_details, instance_profile_details,\
                            sg_in_use_ec2, sg_public, sg_blind_public, \
                            ec2_owner, ec2_bu, ec2_product = self.build_instances_table(iam_client, ec2_instances)

                    if elb_client is not None and elbv2_client is not None:
                        try:
                            elb_instances = EC2Operations.get_elb_details_of_sg_associated(elb_client, elbv2_client, group_id)
                            elb_instance_details, sg_in_use_elb = self.build_elb_instances_table(elb_instances)
                        except Exception:
                            logging.exception(f"Failed to build ELB details for '{group_name} / {group_id}' in {account}")

                    if rds_client is not None:
                        try:
                            rds_instances = RDSOperations.get_rds_instance_details_of_sg_associated(rds_client, group_id)
                            rds_instance_details, sg_in_use_rds = self.build_rds_instances_table(rds_instances)
                        except Exception:
                            logging.exception(f"Failed to build RDS details for '{group_name} / {group_id}' in {account}")

                    sg_in_use = sg_in_use_ec2 or sg_in_use_elb or sg_in_use_rds

                    owner = group_owner if group_owner is not None else ec2_owner
                    bu = group_bu if group_bu is not None else ec2_bu
                    product = group_product if group_product is not None else ec2_product

                    if bu is None:
                        bu = self.config.get_bu_by_name(group_name)

                    source_description = f"has {status.value} status"
                    if status == RestrictionStatus.OpenCompletely:
                        source_description = "allows access from any IP address (0.0.0.0/0, ::/0)"
                    elif status == RestrictionStatus.OpenPartly:
                        source_description = "allows access from some definite public ip addresses or networks"

                    if sg_public:
                        priority = "Critical"
                        summary_status = "Internet"
                        issue_description = (f"Security group has EC2 instances in public subnets "
                                             f"with public IP address attached and "
                                             f"{source_description} "
                                             f"for following ports:\n")
                        threat = (
                            f"*Threat*: "
                            f"Instances associated with this security group are accessible via public route over Internet and "
                            f"have ingress rules which allows access to critical services which should be accessible "
                            f"only from VPN or Direct Connect. Accessing these instances via Internet can lead to leakage "
                            f"to third parties of login credentials for such services as databases/remote access."
                            f"Open and Unrestricted access from Internet increases opportunities for "
                            f"malicious activity from public internet which can potentially result into "
                            f"hacking, denial-of-service attacks, loss of data, etc. This also provides "
                            f"an ingress point to the attackers to gain backdoor access within the other "
                            f"critical services.\n"
                        )
                    elif sg_blind_public:
                        priority = "Critical"
                        summary_status = "Internet"
                        issue_description = (f"Security group has EC2 instances in private subnets "
                                             f"with public IP address attached and "
                                             f"{source_description} "
                                             f"for following ports:\n")
                        threat = (f"*Threat*: "
                                  f"Instances listed below can be probed by external attack vectors and "
                                  f"make them vulnerable to blind injection based attacks, as although "
                                  f"the EC2 instances is in a private subnet, if security group and NACL "
                                  f"are allowing access from the internet incoming, traffic will reach "
                                  f"instances when someone is probing the public IP of the instances. "
                                  f"However, there will be no return traffic due to the lack of an IGW.\n")
                    elif not sg_in_use:
                        priority = "Minor"
                        summary_status = "Unused"
                        issue_description = (f"Security group has no EC2 instances attached and "
                                             f"{source_description} "
                                             f"for following ports:\n")
                        threat = (f"*Threat*: "
                                  f"An unused SG can be leveraged to gain control/access within the network "
                                  f"if attached to any exposed instance. This unrestricted access increases "
                                  f"opportunities for malicious activity (hacking, denial-of-service attacks, "
                                  f"loss of data).\n")
                    else:
                        priority = "Major"
                        summary_status = "Intranet"
                        issue_description = (
                            f"Security group has EC2 instances in in private subnets and "
                            f"{source_description} "
                            f"for following ports:\n")
                        threat = (f"*Threat*: "
                                  f"Open access within the network not only provides unrestricted access to "
                                  f"other servers but increases opportunities for malicious activity (hacking, "
                                  f"denial-of-service attacks, loss of data) if attacker gains access to the "
                                  f"services within the network, thus providing lateral movement.\n")

                    tags_table = JiraOperations.build_tags_table(tags)

                    issue_description = (
                        f"{issue_description}"
                        f"{open_port_details}"
                        f"{threat}"
                        f"{account_details}")

                    if status == RestrictionStatus.OpenCompletely:
                        auto_remediation_date = (self.config.now + self.config.sg.issue_retention_date).date()
                        issue_description += f"\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\n\n"

                    issue_description += f"{tags_table}"

                    issue_description += f"{sg_instance_details if sg_instance_details else ''}"

                    issue_description += f"{rds_instance_details if rds_instance_details else ''}"

                    issue_description += f"{elb_instance_details if elb_instance_details else ''}"

                    issue_description += f"{instance_profile_details if instance_profile_details else ''}"

                    issue_description += (
                        f"*Recommendation*: "
                        f"Allow access only for a minimum set of required ip addresses/ranges from [RFC1918|https://tools.ietf.org/html/rfc1918]. "
                    )

                    if self.config.whitelisting_procedure_url:
                        issue_description += (f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] "
                                              f"and provide a strong business reasoning. ")

                    issue_description += f"Be sure to delete overly permissive rules after creating rules that are more restrictive.\n"

                    issue_summary = (f"{summary_status} open security group '{group_name}'"
                                     f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}")

                    try:
                        response = jira.add_issue(
                            issue_summary=issue_summary, issue_description=issue_description,
                            priority=priority, labels=["insecure-services"],
                            owner=owner,
                            account_id=account_id,
                            bu=bu, product=product,
                        )
                    except Exception:
                        logging.exception("Failed to create jira ticket")
                        continue

                    if response is not None:
                        issue.jira_details.ticket = response.ticket_id
                        issue.jira_details.ticket_assignee_id = response.ticket_assignee_id

                    issue.jira_details.public = sg_public
                    issue.jira_details.blind_public = sg_blind_public
                    issue.jira_details.in_use = sg_in_use
                    issue.jira_details.owner = owner
                    issue.jira_details.business_unit = bu
                    issue.jira_details.product = product

                    slack.report_issue(
                        msg=f"Discovered {issue_summary}"
                            f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}"
                            f"\n"
                            f"{self.build_open_ports_table_slack(issue.issue_details.perms)}",
                        owner=owner,
                        account_id=account_id,
                        bu=bu, product=product,
                    )

                    IssueOperations.set_status_reported(ddb_table, issue)
    def create_tickets_ebsvolumes(self):
        """ Class method to create jira tickets """
        table_name = self.config.ebsVolume.ddb_table_name

        main_account = Account(region=self.config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(table_name)
        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.ebsVolume.accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_not_closed_issues(
                ddb_table, account_id, EBSUnencryptedVolumeIssue)
            for issue in issues:
                volume_id = issue.issue_id
                region = issue.issue_details.region
                tags = issue.issue_details.tags
                # issue has been already reported
                if issue.timestamps.reported is not None:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    if issue.status in [
                            IssueStatus.Resolved, IssueStatus.Whitelisted
                    ]:
                        logging.debug(
                            f"Closing {issue.status.value} EBS unencrypted volume '{volume_id}' issue"
                        )

                        comment = (
                            f"Closing {issue.status.value} EBS unencrypted volume '{volume_id}' issue "
                            f"in '{account_name} / {account_id}' account, '{region}' region"
                        )
                        jira.close_issue(ticket_id=issue.jira_details.ticket,
                                         comment=comment)
                        slack.report_issue(
                            msg=f"{comment}"
                            f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                            owner=owner,
                            account_id=account_id,
                            bu=bu,
                            product=product,
                        )
                        IssueOperations.set_status_closed(ddb_table, issue)
                    # issue.status != IssueStatus.Closed (should be IssueStatus.Open)
                    elif issue.timestamps.updated > issue.timestamps.reported:
                        logging.error(
                            f"TODO: update jira ticket with new data: {table_name}, {account_id}, {volume_id}"
                        )
                        slack.report_issue(
                            msg=
                            f"EBS unencrypted volume '{volume_id}' issue is changed "
                            f"in '{account_name} / {account_id}' account, '{region}' region"
                            f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                            owner=owner,
                            account_id=account_id,
                            bu=bu,
                            product=product,
                        )
                        IssueOperations.set_status_updated(ddb_table, issue)
                    else:
                        logging.debug(f"No changes for '{volume_id}'")
                # issue has not been reported yet
                else:
                    logging.debug(
                        f"Reporting EBS unencrypted volume '{volume_id}' issue"
                    )

                    # if owner/bu/product tags exist on volume - use it
                    volume_owner = tags.get("owner", None)
                    volume_bu = tags.get("bu", None)
                    volume_product = tags.get("product", None)

                    issue_description = (
                        f"EBS volume needs to be encrypted.\n\n"
                        f"*Risk*: High\n\n"
                        f"*Account Name*: {account_name}\n"
                        f"*Account ID*: {account_id}\n"
                        f"*Region*: {region}\n"
                        f"*Volume ID*: {volume_id}\n")

                    ec2_details = ec2_owner = ec2_bu = ec2_product = None
                    if issue.issue_details.attachments:
                        account = Account(
                            id=account_id,
                            name=account_name,
                            region=region,
                            role_name=self.config.aws.role_name_reporting)

                        if account.session is not None:
                            ec2_client = account.client("ec2")
                            ec2_instances = []
                            for instance_id, state in issue.issue_details.attachments.items(
                            ):
                                metadata = EC2Operations.get_instance_meta_data(
                                    ec2_client, instance_id)
                                if metadata is not None:
                                    ec2_instances.append({
                                        'ec2': metadata,
                                        'state': state
                                    })
                            ec2_details, ec2_owner, ec2_bu, ec2_product = self.build_instances_table(
                                ec2_instances)

                    owner = volume_owner if volume_owner is not None else ec2_owner
                    bu = volume_bu if volume_bu is not None else ec2_bu
                    product = volume_product if volume_product is not None else ec2_product

                    issue_description += JiraOperations.build_tags_table(tags)

                    issue_description += ec2_details if ec2_details else ''

                    issue_description += "*Recommendation*: Encrypt EBS volume. "

                    if self.config.whitelisting_procedure_url:
                        issue_description += (
                            f"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] "
                            f"and provide a strong business reasoning. ")

                    issue_summary = (
                        f"EBS unencrypted volume '{volume_id}' "
                        f" in '{account_name} / {account_id}' account{' [' + bu + ']' if bu else ''}"
                    )

                    try:
                        response = jira.add_issue(
                            issue_summary=issue_summary,
                            issue_description=issue_description,
                            priority="Major",
                            labels=["unencrypted-ebs-volumes"],
                            owner=owner,
                            account_id=account_id,
                            bu=bu,
                            product=product,
                        )
                    except Exception:
                        logging.exception("Failed to create jira ticket")
                        continue

                    if response is not None:
                        issue.jira_details.ticket = response.ticket_id
                        issue.jira_details.ticket_assignee_id = response.ticket_assignee_id

                    issue.jira_details.owner = owner
                    issue.jira_details.business_unit = bu
                    issue.jira_details.product = product

                    slack.report_issue(
                        msg=f"Discovered {issue_summary}"
                        f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                        owner=owner,
                        account_id=account_id,
                        bu=bu,
                        product=product,
                    )

                    IssueOperations.set_status_reported(ddb_table, issue)
Esempio n. 8
0
    def clean_security_groups(self, batch=False):
        """ Class function to clean security groups which are violating aws best practices """
        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(self.config.sg.ddb_table_name)
        backup_bucket = config.aws.s3_backup_bucket

        retention_period = self.config.sg.remediation_retention_period

        jira = JiraReporting(self.config)
        slack = SlackNotification(self.config)

        for account_id, account_name in self.config.sg.remediation_accounts.items():
            logging.debug(f"Checking '{account_name} / {account_id}'")
            issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SecurityGroupIssue)
            for issue in issues:
                group_name = issue.issue_details.name
                group_vpc_id = issue.issue_details.vpc_id
                group_id = issue.issue_id
                group_region = issue.issue_details.region
                # status = issue.jira_details.status

                name_in_whitelist = self.config.sg.in_whitelist(account_id, f"{group_vpc_id}:{group_name}")
                id_in_whitelist = self.config.sg.in_whitelist(account_id, group_id)

                if name_in_whitelist or id_in_whitelist:
                    logging.debug(f"Skipping '{group_name} / {group_id}' (in whitelist)")

                    # Adding label with "whitelisted" to jira ticket.
                    jira.add_label(
                        ticket_id=issue.jira_details.ticket,
                        label=IssueStatus.Whitelisted.value
                    )
                    continue

                if issue.timestamps.reported is None:
                    logging.debug(f"Skipping '{group_name} / {group_id}' (was not reported)")
                    continue

                if issue.timestamps.remediated is not None:
                    logging.debug(f"Skipping '{group_name} / {group_id}' (has been already remediated)")
                    continue

                updated_date = issue.timestamp_as_datetime
                no_of_days_issue_created = (self.config.now - updated_date).days

                if no_of_days_issue_created >= retention_period:
                    owner = issue.jira_details.owner
                    bu = issue.jira_details.business_unit
                    product = issue.jira_details.product

                    try:
                        account = Account(id=account_id,
                                          name=account_name,
                                          region=group_region,
                                          role_name = self.config.aws.role_name_reporting)
                        if account.session is None:
                            continue

                        checker = SecurityGroupsChecker(account=account,
                                                        restricted_ports=self.config.sg.restricted_ports)
                        checker.check(ids=[group_id])
                        sg = checker.get_security_group(group_id)
                        if sg is None:
                            logging.debug(f"Security group '{group_name} / {group_id}' was removed by user")
                        elif sg.restricted:
                            logging.debug(f"Security group '{group_name} / {group_id}' issue was remediated by user")
                        elif sg.status != RestrictionStatus.OpenCompletely:
                            logging.debug(f"Security group '{group_name} / {group_id}' is not completely open")
                        else:
                            if not batch and \
                               not confirm(f"Do you want to remediate security group '{group_name} / {group_id}'", False):
                                continue

                            logging.debug(f"Remediating '{group_name} / {group_id}' rules")

                            backup_path = sg.backup_s3(main_account.client("s3"), backup_bucket)
                            remediation_succeed = True
                            processed = sg.restrict(RestrictionStatus.OpenCompletely)
                            if processed == 0:
                                logging.debug(f"No rules were detected to remediate in '{group_name} / {group_id}'")
                                comment = None
                            elif processed is None:
                                remediation_succeed = False
                                comment = (f"Failed to remediate security group '{group_name} / {group_id}' issue "
                                           f"in '{account_name} / {account_id}' account, '{group_region}' region "
                                           f"due to some limitations. Please, check manually")
                            else:
                                comment = (f"Rules backup was saved to "
                                           f"[{backup_path}|https://s3.console.aws.amazon.com/s3/object/{backup_bucket}/{backup_path}]. "
                                           f"Security group '{group_name} / {group_id}' `{RestrictionStatus.OpenCompletely.value}` issue "
                                           f"in '{account_name} / {account_id}' account, '{group_region}' region "
                                           f"was remediated by hammer")

                            if comment is not None:
                                jira.remediate_issue(
                                    ticket_id=issue.jira_details.ticket,
                                    comment=comment,
                                    reassign=remediation_succeed,
                                )
                                slack.report_issue(
                                    msg=f"{comment}"
                                        f"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}",
                                    owner=owner,
                                    account_id=account_id,
                                    bu=bu, product=product,
                                )
                                IssueOperations.set_status_remediated(ddb_table, issue)
                    except Exception:
                        logging.exception(f"Error occurred while updating security group '{group_name} / {group_id}' rules "
                                          f"in '{account_name} / {account_id} / {group_region}'")
                else:
                    logging.debug(f"Skipping '{group_name} / {group_id}' "
                                  f"({retention_period - no_of_days_issue_created} days before remediation)")
Esempio n. 9
0
class DDBackuper(object):
    """
    TODO: this should be replaced with DynamoDB Point-in-Time recovery when CloudFormattion will support it.
    """
    def __init__(self):
        self.config = Config()
        self.enabled = self.config.aws.ddb_backup_enabled
        self.retention_period = self.config.aws.ddb_backup_retention
        self.account = Account(region=self.config.aws.region)
        self.ddb_client = self.account.client('dynamodb')
        self.ddb_resource = self.account.resource('dynamodb')
        self.now = datetime.now(timezone.utc)
        # used as a part of backup name
        self.today = self.now.strftime("%Y-%m-%d")

    def filter_tables(self):
        """ Return list of hammer ddb tables with existing backups """
        hammer_tables = {}

        for module in self.config.modules:
            table_name = module.ddb_table_name
            try:
                hammer_tables[table_name] = self.ddb_client.list_backups(
                    TableName=table_name)['BackupSummaries']
            except ClientError as err:
                if err.response['Error']['Code'] in [
                        "AccessDenied", "UnauthorizedOperation"
                ]:
                    logging.error(
                        f"Access denied in {self.account} "
                        f"({self.ddb_client.__class__.__name__.lower()}:{err.operation_name})"
                    )
                else:
                    logging.exception(
                        f"Failed to list '{table_name}' backups in {self.account}"
                    )
                continue
        return hammer_tables

    def today_backup_name(self, table_name):
        return f"{table_name}_{self.today}"

    def today_backup_exists(self, table_name, backups):
        """ Returns if today's backup exists in provided list of backups
            Check is based on backup name and self.today
        """
        return self.today_backup_name(table_name) in [
            backup['BackupName'] for backup in backups
        ]

    def check_backups(self, table_name, backups):
        """ Log error if any backup status for provided list of backups is not AVAILABLE """
        for backup in backups:
            name = backup["BackupName"]
            status = backup["BackupStatus"]
            if status != "AVAILABLE":
                logging.error(f"{table_name} backup is not available: {name}")

    def launch_backup(self, table_name):
        try:
            self.ddb_client.create_backup(
                TableName=table_name,
                BackupName=self.today_backup_name(table_name))
        except ClientError as err:
            if err.response['Error']['Code'] in [
                    "AccessDenied", "UnauthorizedOperation"
            ]:
                logging.error(
                    f"Access denied in {self.account} "
                    f"({self.ddb_client.__class__.__name__.lower()}:{err.operation_name})"
                )
            else:
                logging.exception(
                    f"Failed to create '{table_name}' backup in {self.account}"
                )
            return False
        return True

    def rotate_backups(self, table_name, backups):
        """ Removes all outdated backups from provided list of backups
            Check is based on backup creation date and retention period from config
        """
        for backup in backups:
            creationDate = backup['BackupCreationDateTime']
            name = backup['BackupName']
            arn = backup['BackupArn']
            if self.now - creationDate > self.retention_period:
                logging.debug(
                    f"Deleting outdated backup '{name}' for '{table_name}' ({arn})"
                )
                try:
                    self.ddb_client.delete_backup(BackupArn=arn)
                except ClientError as err:
                    if err.response['Error']['Code'] in [
                            "AccessDenied", "UnauthorizedOperation"
                    ]:
                        logging.error(
                            f"Access denied in {self.account} "
                            f"({self.ddb_client.__class__.__name__.lower()}:{err.operation_name})"
                        )
                    else:
                        logging.exception(
                            f"Failed to delete '{arn}' backup in {self.account}"
                        )

    def run(self):
        if not self.enabled:
            logging.debug("DDB backup disabled")
            return
        for table_name, backups in self.filter_tables().items():
            self.check_backups(table_name, backups)
            if not self.today_backup_exists(table_name, backups):
                logging.debug(f"Launching backup of {table_name}")
                if self.launch_backup(table_name):
                    self.rotate_backups(table_name, backups)
            else:
                logging.warning(
                    f"Today backup exists for {table_name}, skipping")