Ejemplo n.º 1
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find inactive keys for IAM users """
    set_logging(level=logging.INFO)
    logging.debug("Initiating IAM user inactive keys checking")

    try:
        sns_arn = os.environ["SNS_IAM_USER_INACTIVE_KEYS_ARN"]
        config = Config()

        if not config.iamUserInactiveKeys.enabled:
            logging.debug("IAM user inactive keys checking disabled")
            return

        logging.debug(
            "Iterating over each account to initiate IAM user inactive keys check"
        )
        for account_id, account_name in config.iamUserInactiveKeys.accounts.items(
        ):
            payload = {
                "account_id": account_id,
                "account_name": account_name,
            }
            logging.debug(
                f"Initiating IAM user inactive keys checking for '{account_name}'"
            )
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception(
            "Error occurred while initiation of IAM user inactive keys check")
        return

    logging.debug("IAM user inactive keys checking initiation done")
Ejemplo n.º 2
0
def lambda_handler(event, context):
    set_logging(level=logging.DEBUG)

    logging.debug(f"Got request\n{jsonDumps(event)}")

    if event.get('RequestType', "") == "Delete":
        send_response(event, context, "SUCCESS")
        return

    region = event.get('ResourceProperties', {}).get('Region', None)
    if region is None:
        logging.error("Failed to get region from event")
        send_response(event, context, "FAILED")

    try:
        account = Account(region=region)
        ec2 = account.client(
            'ec2', config=botocore.config.Config(retries={'max_attempts': 3}))
        images = ec2.describe_images(Filters=[{
            "Name": "product-code",
            "Values": [PRODUCT_CODE]
        }])['Images']
    except Exception:
        logging.exception("Failed to describe images")
        send_response(event, context, "FAILED")
        return

    if len(images) == 0:
        logging.error("No images were found")
        send_response(event, context, "FAILED")
        return

    latest = sorted(images, key=itemgetter('CreationDate'))[-1]['ImageId']
    logging.info(f"Latest '{PRODUCT_CODE}' AMI id - '{latest}'")
    send_response(event, context, "SUCCESS", {'Id': latest})
Ejemplo n.º 3
0
def lambda_handler(event, context):
    set_logging(level=logging.DEBUG)

    config = Config()

    #logging.debug("Client token: " + event['authorizationToken'])
    logging.debug("Method ARN: " + event['methodArn'])

    if event['authorizationToken'] != config.api.token:
        raise Exception('Unauthorized')

    principalId = 'hammer-api-user'

    tmp = event['methodArn'].split(':')
    apiGatewayArnTmp = tmp[5].split('/')
    awsAccountId = tmp[4]

    policy = AuthPolicy(principalId, awsAccountId)
    policy.restApiId = apiGatewayArnTmp[0]
    policy.region = tmp[3]
    policy.stage = apiGatewayArnTmp[1]
    # a quick hack to allow GET calls to /identify/{request_id}, request_id is hex string
    # rewrite this solution to more generic variant
    if len(apiGatewayArnTmp) == 5:
        full_path = '/identify/' + apiGatewayArnTmp[4]
        policy.allowMethod(HttpVerb.GET, full_path)
    policy.allowMethod(HttpVerb.POST, '/identify')
    policy.allowMethod(HttpVerb.POST, '/remediate')

    authResponse = policy.build()

    logging.debug(jsonDumps(authResponse))

    return authResponse
Ejemplo n.º 4
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find security groups unrestricted access """
    set_logging(level=logging.INFO)
    logging.debug("Initiating CloudTrail checking")

    try:
        sns_arn = os.environ["SNS_ARN"]
        config = Config()

        if not config.cloudtrails.enabled:
            logging.debug("CloudTrail checking disabled")
            return

        logging.debug(
            "Iterating over each account to initiate CloudTrail check")
        for account_id, account_name in config.cloudtrails.accounts.items():
            payload = {
                "account_id": account_id,
                "account_name": account_name,
                "regions": config.aws.regions,
                "sns_arn": sns_arn
            }
            logging.debug(
                f"Initiating CloudTrail checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception(
            "Error occurred while initiation of CloudTrail checking")
        return

    logging.debug("CloudTrail checking initiation done")
def lambda_handler(event, context):
    """ Lambda handler to initiate to find S3 bucket public access in ACL """
    set_logging(level=logging.INFO)
    logging.debug("Initiating S3 acls checking")

    try:
        sns_arn = os.environ["SNS_S3_ACL_ARN"]
        config = Config()

        if not config.s3acl.enabled:
            logging.debug("S3 acls checking disabled")
            return

        logging.debug("Iterating over each account to initiate s3 acls check")
        for account_id, account_name in config.s3acl.accounts.items():
            payload = {
                "account_id": account_id,
                "account_name": account_name,
            }
            logging.debug(f"Initiating s3 acls checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception("Error occurred while initiation of S3 acl checking")
        return

    logging.debug("S3 acls checking initiation done")
def lambda_handler(event, context):
    """ Lambda handler to initiate to find public RDS snapshots """
    set_logging(level=logging.INFO)
    logging.debug("Initiating public RDS snapshots checking")

    try:
        sns_arn = os.environ["SNS_ARN"]
        config = Config()

        if not config.rdsSnapshot.enabled:
            logging.debug("Public RDS snapshots checking disabled")
            return

        logging.debug("Iterating each account to initiate RDS snapshots checking")
        for account_id, account_name in config.rdsSnapshot.accounts.items():
            payload = {"account_id": account_id,
                       "account_name": account_name,
                       "regions": config.aws.regions,
                       "sns_arn": sns_arn
                      }
            logging.debug(f"Initiating public RDS snapshots checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception("Error occurred while initiation of public RDS snapshots checking")
        return

    logging.debug("Public RDS snapshot checking initiation done")
Ejemplo n.º 7
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find unencrypted EBS volumes """
    set_logging(level=logging.INFO)
    logging.debug("Initiating unencrypted EBS volumes checking")

    try:
        sns_arn = os.environ["SNS_EBS_VOLUMES_ARN"]
        config = Config()

        if not config.ebsVolume.enabled:
            logging.debug("Unencrypted EBS volumes checking disabled")
            return

        logging.debug("Iterating over each account to initiate unencrypted EBS volumes checking")
        for account_id, account_name in config.ebsVolume.accounts.items():
            payload = {"account_id": account_id,
                       "account_name": account_name,
                       "regions": config.aws.regions,
                       "sns_arn": sns_arn
                      }
            logging.debug(f"Initiating unencrypted EBS volume checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception("Error occurred while initiation of unencrypted EBS volumes checking")
        return

    logging.debug("Unencrypted EBS volume checking initiation done")
Ejemplo n.º 8
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find SQS public access in policy """
    set_logging(level=logging.INFO)
    logging.debug("Initiating SQS policies checking")

    try:
        sns_arn = os.environ["SNS_ARN"]
        config = Config()

        if not config.sqspolicy.enabled:
            logging.debug("SQS policies checking disabled")
            return

        logging.debug(
            "Iterating over each account to initiate SQS policies check")
        for account_id, account_name in config.sqspolicy.accounts.items():
            payload = {
                "account_id": account_id,
                "account_name": account_name,
                "regions": config.aws.regions,
                "sns_arn": sns_arn
            }
            logging.debug(
                f"Initiating SQS policies checking for '{account_name}'")
            Sns.publish(sns_arn, payload)

    except Exception:
        logging.exception(
            "Error occurred while initiation of SQS policy checking")
        return

    logging.debug("SQS policies checking initiation done")
Ejemplo n.º 9
0
def lambda_handler(event, context):
    set_logging(level=logging.INFO)
    parser = LogsParser()

    #logging.debug(f"get event\n{event}")
    if 'awslogs' in event:
        parser.logs_event(event)
    elif 'Records' in event:
        parser.sns_event(event)
Ejemplo n.º 10
0
def lambda_handler(event, context):
    set_logging(level=logging.DEBUG)

    try:
        backuper = DDBackuper()
        backuper.run()
    except Exception:
        logging.exception(f"Failed to backup DDB tables")
        return
Ejemplo n.º 11
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate public EBS snapshots """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.ebsSnapshot.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public EBS snapshots in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, EBSPublicSnapshotIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Public EBS snapshots in DDB:\n{open_issues.keys()}")

        checker = EBSPublicSnapshotsChecker(account=account)
        if checker.check():
            for snapshot in checker.snapshots:
                if snapshot.public:
                    issue = EBSPublicSnapshotIssue(account_id, snapshot.id)
                    issue.issue_details.region = snapshot.account.region
                    issue.issue_details.volume_id = snapshot.volume_id
                    issue.issue_details.tags = snapshot.tags
                    if config.ebsSnapshot.in_whitelist(account_id,
                                                       snapshot.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(
                        f"Setting {snapshot.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(snapshot.id, None)

            logging.debug(
                f"Public EBS snapshots in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated EBS snapshots
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check public EBS snapshots in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain public EBS snapshots checking")

    logging.debug(
        f"Checked public EBS snapshots in '{region}' for '{account_id} ({account_name})'"
    )
Ejemplo n.º 12
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate insecure services """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.sg.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for insecure services in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, SecurityGroupIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Security groups in DDB:\n{open_issues.keys()}")

        checker = SecurityGroupsChecker(
            account=account, restricted_ports=config.sg.restricted_ports)
        if checker.check():
            for sg in checker.groups:
                logging.debug(f"Checking {sg.name} ({sg.id})")
                if not sg.restricted:
                    # TODO: move instances detection for security group from reporting to identification
                    #ec2_instances = EC2Operations.get_instance_details_of_sg_associated(account.client("ec2"), sg.id)
                    #logging.debug(f"associated ec2 instances: {ec2_instances}")
                    issue = SecurityGroupIssue(account_id, sg.id)
                    issue.issue_details.name = sg.name
                    issue.issue_details.region = sg.account.region
                    issue.issue_details.tags = sg.tags
                    issue.issue_details.status = sg.status.value
                    for perm in sg.permissions:
                        for ip_range in perm.ip_ranges:
                            if not ip_range.restricted:
                                issue.add_perm(perm.protocol, perm.from_port,
                                               perm.to_port, ip_range.cidr,
                                               ip_range.status)
                    if config.sg.in_whitelist(
                            account_id, sg.name) or config.sg.in_whitelist(
                                account_id, sg.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(f"Setting {sg.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(sg.id, None)

            logging.debug(f"Security groups in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated security groups
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check insecure services in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain insecure services checking")

    logging.debug(
        f"Checked insecure services in '{region}' for '{account_id} ({account_name})'"
    )
Ejemplo n.º 13
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate s3 buckets acl """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.s3acl.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public S3 ACLs in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, S3AclIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {issue.issue_id: issue for issue in open_issues}
        logging.debug(f"S3 in DDB:\n{open_issues.keys()}")

        checker = S3BucketsAclChecker(account=account)
        if not checker.check():
            return

        for bucket in checker.buckets:
            logging.debug(f"Checking {bucket.name}")
            if bucket.public:
                issue = S3AclIssue(account_id, bucket.name)
                issue.issue_details.owner = bucket.owner
                issue.issue_details.public_acls = bucket.get_public_acls()
                issue.issue_details.tags = bucket.tags
                if config.s3acl.in_whitelist(account_id, bucket.name):
                    issue.status = IssueStatus.Whitelisted
                else:
                    issue.status = IssueStatus.Open
                logging.debug(f"Setting {bucket.name} status {issue.status}")
                IssueOperations.update(ddb_table, issue)
                # remove issue id from issues_list_from_db (if exists)
                # as we already checked it
                open_issues.pop(bucket.name, None)

        logging.debug(f"S3 in DDB:\n{open_issues.keys()}")
        # all other unresolved issues in DDB are for removed/remediated buckets
        for issue in open_issues.values():
            IssueOperations.set_status_resolved(ddb_table, issue)
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check s3 acls for '{account_id} ({account_name})'")
        return

    logging.debug(f"Checked s3 acls for '{account_id} ({account_name})'")
Ejemplo n.º 14
0
 def wrapper(event, context):
     set_logging(level=logging.DEBUG)
     logging.debug(f"request:\n{json.dumps(event, indent=4)}")
     response = handler(event, context)
     logging.debug(f"response:\n{json.dumps(response, indent=4)}")
     return response
Ejemplo n.º 15
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate SQS queue policy """
    set_logging(level=logging.DEBUG)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
        # region = payload['region']
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(config.sqspolicy.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public SQS policies in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}
        logging.debug(f"SQS in DDB:\n{open_issues.keys()}")

        checker = SQSPolicyChecker(account=account)
        if checker.check():
            for queue in checker.queues:
                logging.debug(f"Checking {queue.name}")
                if queue.public:
                    issue = SQSPolicyIssue(account_id, queue.url)
                    issue.issue_details.tags = queue.tags
                    issue.issue_details.name = queue.name
                    issue.issue_details.region = queue.account.region
                    issue.issue_details.policy = queue.policy
                    if config.sqspolicy.in_whitelist(account_id, queue.url):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(f"Setting {queue.name} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(queue.url, None)

        logging.debug(f"SQS in DDB:\n{open_issues.keys()}")
        # all other unresolved issues in DDB are for removed/remediated queues
        for issue in open_issues.values():
            IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(f"Failed to check SQS policies for '{account_id} ({account_name})'")
        return

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain insecure services checking")

    logging.debug(f"Checked SQS policies for '{account_id} ({account_name})'")
Ejemplo n.º 16
0
def lambda_handler(event, context):
    """ Lambda Handler to describe cloud trails enabled or not for each region """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.cloudtrails.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for CloudTrail logging issues in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, CloudTrailIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_id == region
        }
        logging.debug(f"CloudTrail region in DDB:\n{open_issues.keys()}")

        checker = CloudTrailChecker(account=account)
        if checker.check():
            if checker.disabled or checker.delivery_errors:
                issue = CloudTrailIssue(account_id, region)
                issue.issue_details.disabled = checker.disabled
                issue.issue_details.delivery_errors = checker.delivery_errors
                issue.add_trails(checker.trails)
                if config.cloudtrails.in_whitelist(account_id, region):
                    issue.status = IssueStatus.Whitelisted
                else:
                    issue.status = IssueStatus.Open
                logging.debug(f"Setting {region} status {issue.status}")
                IssueOperations.update(ddb_table, issue)
            # issue exists in ddb and was fixed
            elif region in open_issues:
                IssueOperations.set_status_resolved(ddb_table,
                                                    open_issues[region])
        # track the progress of API request to scan specific account/region/feature
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check CloudTrail in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain CloudTrail checking")

    logging.debug(
        f"Checked CloudTrail in '{region}' for '{account_id} ({account_name})'"
    )
Ejemplo n.º 17
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate public ami issues"""
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.publicAMIs.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for Public AMI issues for {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, PublicAMIIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}")

        checker = PublicAMIChecker(account=account)
        if checker.check():
            for ami in checker.amis:
                logging.debug(f"Checking {ami.id}")
                if ami.public_access:
                    issue = PublicAMIIssue(account_id, ami.id)
                    issue.issue_details.tags = ami.tags
                    issue.issue_details.name = ami.name
                    issue.issue_details.region = region
                    if config.publicAMIs.in_whitelist(account_id, ami.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(
                        f"Setting {ami.id}/{ami.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(ami.id, None)

            logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated keys
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
        # track the progress of API request to scan specific account/region/feature
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check AMI public access for '{account_id} ({account_name})'"
        )
        return

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain public AMI checking")

    logging.debug(
        f"Checked AMI public access for '{account_id} ({account_name})'")
Ejemplo n.º 18
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate iam user keys rotation """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.iamUserKeysRotation.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for IAM user keys rotation for {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, IAMKeyRotationIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {issue.issue_id: issue for issue in open_issues}
        logging.debug(
            f"Users with keys to rotate in DDB:\n{open_issues.keys()}")

        checker = IAMKeyChecker(account=account,
                                now=config.now,
                                rotation_criteria_days=config.
                                iamUserKeysRotation.rotation_criteria_days)
        if not checker.check(last_used_check_enabled=False):
            return

        for user in checker.users:
            for key in user.stale_keys:
                issue = IAMKeyRotationIssue(account_id, key.id)
                issue.issue_details.username = user.id
                issue.issue_details.create_date = key.create_date.isoformat()
                if config.iamUserKeysRotation.in_whitelist(
                        account_id,
                        key.id) or config.iamUserKeysRotation.in_whitelist(
                            account_id, user.id):
                    issue.status = IssueStatus.Whitelisted
                else:
                    issue.status = IssueStatus.Open
                logging.debug(
                    f"Setting {key.id}/{user.id} status {issue.status}")
                IssueOperations.update(ddb_table, issue)
                # remove issue id from issues_list_from_db (if exists)
                # as we already checked it
                open_issues.pop(key.id, None)

        logging.debug(f"Keys to rotate in DDB:\n{open_issues.keys()}")
        # all other unresolved issues in DDB are for removed/remediated keys
        for issue in open_issues.values():
            IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check IAM user keys rotation for '{account_id} ({account_name})'"
        )
        return

    logging.debug(
        f"Checked IAM user keys rotation for '{account_id} ({account_name})'")
                            IssueOperations.set_status_remediated(
                                ddb_table, issue)
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating bucket '{bucket_name}' policy "
                            f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(
                        f"Skipping '{bucket_name}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )


if __name__ == "__main__":
    module_name = sys.modules[__name__].__loader__.name
    set_logging(level=logging.DEBUG,
                logfile=f"/var/log/hammer/{module_name}.log")
    config = Config()
    add_cw_logging(config.local.log_group,
                   log_stream=module_name,
                   level=logging.DEBUG,
                   region=config.aws.region)
    try:
        si = SingletonInstance(module_name)
    except SingletonInstanceException:
        logging.error(
            f"Another instance of '{module_name}' is already running, quitting"
        )
        sys.exit(1)

    parser = argparse.ArgumentParser()
    parser.add_argument('--batch',
Ejemplo n.º 20
0
def pytest_sessionstart(session):
    if session.config.option.verbose > 2:
        set_logging(level=logging.DEBUG)  #, logfile="tests.log")

    mock_sts().start()