Example #1
0
    def run(self, *args, **kwargs):
        """Entry point for the scheduler

        Args:
            *args: Optional arguments
            **kwargs: Optional keyword arguments

        Returns:
            None
        """
        accounts = list(AWSAccount.get_all(include_disabled=False).values())

        # S3 Bucket config
        s3_acl = get_template('cloudtrail_s3_bucket_policy.json')
        s3_bucket_name = self.dbconfig.get('bucket_name', self.ns)
        s3_bucket_region = self.dbconfig.get('bucket_region', self.ns,
                                             'us-west-2')
        s3_bucket_account = AWSAccount.get(
            self.dbconfig.get('bucket_account', self.ns))
        CloudTrail.create_s3_bucket(s3_bucket_name, s3_bucket_region,
                                    s3_bucket_account, s3_acl)

        self.validate_sqs_policy(accounts)

        for account in accounts:
            ct = CloudTrail(account, s3_bucket_name, s3_bucket_region,
                            self.log)
            ct.run()
Example #2
0
def process_action(resource, action, action_issuer='unknown'):
    """Process an audit action for a resource, if possible

    Args:
        resource (:obj:`Resource`): A resource object to perform the action on
        action (`str`): Type of action to perform (`kill` or `stop`)
        action_issuer (`str`): The issuer of the action
    Returns:
        `ActionStatus`
    """
    from cinq_collector_aws import AWSRegionCollector

    func_action = action_mapper[resource.resource_type][action]
    extra_info = {}
    action_status = ActionStatus.UNKNOWN

    if func_action:
        if action_mapper[resource.resource_type]['service_name'] == 'lambda':
            client = get_aws_session(
                AWSAccount.get(
                    dbconfig.get('rds_collector_account',
                                 AWSRegionCollector.ns, ''))).client(
                                     'lambda',
                                     dbconfig.get('rds_collector_region',
                                                  AWSRegionCollector.ns, ''))
        else:
            client = get_aws_session(AWSAccount(resource.account)).client(
                action_mapper[resource.resource_type]['service_name'],
                region_name=resource.location)
        try:
            logger.info(
                f'Trying to {action} resource {resource.id} for account {resource.account.account_name} / region {resource.location}'
            )
            action_status, extra_info = func_action(client, resource)
            if action_status == ActionStatus.SUCCEED:
                Enforcement.create(resource.account.account_id, resource.id,
                                   action, datetime.now(), extra_info)
        except Exception as ex:
            action_status = ActionStatus.FAILED
            logger.exception('Failed to apply action {} to {}: {}'.format(
                action, resource.id, ex))
        finally:
            auditlog(event='{}.{}.{}.{}'.format(action_issuer,
                                                resource.resource_type, action,
                                                action_status),
                     actor=action_issuer,
                     data={
                         'resource_id': resource.id,
                         'account_name': resource.account.account_name,
                         'location': resource.location,
                         'info': extra_info
                     })
            return action_status
    else:
        logger.error('Failed to apply action {} to {}: Not supported'.format(
            action, resource.id))
        return ActionStatus.FAILED
Example #3
0
def operate_rds_instance(client, resource, action):
    resource_info = {
        'platform': 'AWS',
        'accountId': AWSAccount(resource.account).account_number,
        'accountName': resource.account.account_name,
        'action': action,
        'region': resource.location,
        'resourceId': resource.instance_name,
        'resourceType': 'rds',
        'resourceSubType': resource.engine
    }

    logger.info('Auditor Request Payload: {}'.format(resource_info))

    response = client.invoke(FunctionName=dbconfig.get(
        'action_taker_arn', NS_AUDITOR_REQUIRED_TAGS, ''),
                             Payload=json.dumps(resource_info).encode('utf-8'))

    response_payload = json.loads(response['Payload'].read().decode('utf-8'))
    logger.info('Server Response Payload: {}'.format(response_payload))

    if response_payload.get('success'):
        return ActionStatus.SUCCEED, response_payload.get('data', {})
    else:
        return ActionStatus.FAILED, {
            'message': response_payload.get('message')
        }
Example #4
0
    def run(self, *args, **kwargs):
        """Iterate through all AWS accounts and apply roles and policies from Github

        Args:
            *args: Optional list of arguments
            **kwargs: Optional list of keyword arguments

        Returns:
            `None`
        """
        accounts = list(AWSAccount.get_all(include_disabled=False).values())
        self.manage_policies(accounts)
Example #5
0
    def __init__(self, account):
        super().__init__()

        if type(account) == str:
            account = AWSAccount.get(account)

        if not isinstance(account, AWSAccount):
            raise InquisitorError('The AWS Collector only supports AWS Accounts, got {}'.format(
                account.__class__.__name__
            ))

        self.account = account
        self.session = get_aws_session(self.account)
Example #6
0
    def get(self):
        rfc26 = []
        accounts = list(AWSAccount.get_all(include_disabled=False).values())
        instances_by_account = self._get_instances_by_account()
        issues_by_account = self._get_issues_by_account()
        buckets_by_account = self._get_buckets_by_account()

        for acct in accounts:
            missing_tags = issues_by_account[acct.account_name]
            total_instances = instances_by_account[acct.account_name]
            total_buckets = buckets_by_account[acct.account_name]
            taggable_resources = total_instances + total_buckets

            if missing_tags == 0:
                pct = 0
            else:
                pct = (float(missing_tags) / taggable_resources *
                       100) if taggable_resources else 100

            rfc26.append({
                'accountName': acct.account_name,
                'compliantResources': taggable_resources - missing_tags,
                'totalInstances': total_instances,
                'totalBuckets': total_buckets,
                'taggableResources': taggable_resources,
                'percent': 100 - pct
            })

        instances = self._get_instance_counts()
        instances_by_states = self._get_instances_by_state()
        instance_states = {x[0]: x[1] for x in instances_by_states}

        if instances:
            public_ips = float(
                self._get_public_ip_instances()) / instances * 100
        else:
            public_ips = 0

        return self.make_response({
            'message': None,
            'stats': {
                'ec2Instances': {
                    'total': instances,
                    'running': instance_states.get('running', 0),
                    'stopped': instance_states.get('stopped', 0)
                },
                'instancesWithPublicIps': public_ips,
                'rfc26Compliance': rfc26
            }
        })
Example #7
0
def process_action(resource, action, resource_type):
    """Process an audit action for a resource, if possible

    Args:
        resource (:obj:`Resource`): A resource object to perform the action on
        action (`str`): Type of action to perform (`kill` or `stop`)
        resource_type (`str`): Type of the resource

    Returns:
        `bool` - Returns the result from the action function
    """
    func_action = action_mapper[resource_type][action]
    if func_action:
        session = get_aws_session(AWSAccount(resource.account))
        client = session.client(
            action_mapper[resource_type]['service_name'],
            region_name=resource.location
        )
        return func_action(client, resource)

    return False
Example #8
0
    def __init__(self, account, bucket_name, bucket_region, logger):
        self.account = account
        self.bucket_region = bucket_region
        self.bucket_name = bucket_name
        self.log = logger

        # Config settings
        self.global_ct_region = dbconfig.get('global_cloudtrail_region',
                                             self.ns, 'us-west-2')
        self.topic_name = dbconfig.get('sns_topic_name', self.ns,
                                       'cloudtrail-log-notification')
        self.trail_name = dbconfig.get('trail_name', self.ns)

        sqs_queue_name = dbconfig.get('sqs_queue_name', self.ns)
        sqs_queue_region = dbconfig.get('sqs_queue_region', self.ns)
        sqs_account = AWSAccount.get(dbconfig.get('sqs_queue_account',
                                                  self.ns))

        self.sqs_queue = 'arn:aws:sqs:{}:{}:{}'.format(
            sqs_queue_region, sqs_account.account_number, sqs_queue_name)

        self.session = get_aws_session(account)
Example #9
0
    def run(self):
        """Main entry point for the auditor worker.

        Returns:
            `None`
        """
        # Loop through all accounts that are marked as enabled
        accounts = list(AWSAccount.get_all(include_disabled=False).values())
        for account in accounts:
            self.log.debug('Updating VPC Flow Logs for {}'.format(account))

            self.session = get_aws_session(account)
            role_arn = self.confirm_iam_role(account)
            # region specific
            for aws_region in AWS_REGIONS:
                try:
                    vpc_list = VPC.get_all(account, aws_region).values()
                    need_vpc_flow_logs = [
                        x for x in vpc_list
                        if x.vpc_flow_logs_status != 'ACTIVE'
                    ]

                    for vpc in need_vpc_flow_logs:
                        if self.confirm_cw_log(account, aws_region, vpc.id):
                            self.create_vpc_flow_logs(account, aws_region,
                                                      vpc.id, role_arn)
                        else:
                            self.log.info(
                                'Failed to confirm log group for {}/{}'.format(
                                    account, aws_region))

                except Exception:
                    self.log.exception(
                        'Failed processing VPCs for {}/{}.'.format(
                            account, aws_region))

            db.session.commit()
Example #10
0
def process_action(resource, action, action_issuer='unknown'):
    """Process an audit action for a resource, if possible

    Args:
        resource (:obj:`Resource`): A resource object to perform the action on
        action (`str`): Type of action to perform (`kill` or `stop`)
        action_issuer (`str`): The issuer of the action
    Returns:
        `ActionStatus`
    """
    func_action = action_mapper[resource.resource_type][action]
    if func_action:
        client = get_aws_session(AWSAccount(resource.account)).client(
            action_mapper[resource.resource_type]['service_name'],
            region_name=resource.location
        )
        try:
            action_status, metrics = func_action(client, resource)
            Enforcement.create(resource.account.account_name, resource.id, action, datetime.now(), metrics)
        except Exception as ex:
            action_status = ActionStatus.FAILED
            logger.error('Failed to apply action {} to {}: {}'.format(action, resource.id, ex))
        finally:
            auditlog(
                event='{}.{}.{}.{}'.format(action_issuer, resource.resource_type, action, action_status),
                actor=action_issuer,
                data={
                    'resource_id': resource.id,
                    'account_name': resource.account.account_name,
                    'location': resource.location
                }
            )
            return action_status
    else:
        logger.error('Failed to apply action {} to {}: Not supported'.format(action, resource.id))
        return ActionStatus.FAILED
Example #11
0
    def validate_sqs_policy(self, accounts):
        """Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue

        Args:
            accounts (`list` of :obj:`Account`): List of accounts

        Returns:
            `None`
        """
        sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns)
        sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns)
        sqs_account = AWSAccount.get(
            self.dbconfig.get('sqs_queue_account', self.ns))
        session = get_aws_session(sqs_account)

        sqs = session.client('sqs', region_name=sqs_queue_region)
        sqs_queue_url = sqs.get_queue_url(
            QueueName=sqs_queue_name,
            QueueOwnerAWSAccountId=sqs_account.account_number)
        sqs_attribs = sqs.get_queue_attributes(
            QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy'])

        policy = json.loads(sqs_attribs['Attributes']['Policy'])

        for account in accounts:
            arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number,
                                               sqs_queue_name)
            if arn not in policy['Statement'][0]['Condition'][
                    'ForAnyValue:ArnEquals']['aws:SourceArn']:
                self.log.warning(
                    'SQS policy is missing condition for ARN {}'.format(arn))
                policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals'][
                    'aws:SourceArn'].append(arn)

        sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'],
                                 Attributes={'Policy': json.dumps(policy)})
Example #12
0
    def update_rds_databases(self):
        """Update list of RDS Databases for the account / region

        Returns:
            `None`
        """
        self.log.info('Updating RDS Databases for {} / {}'.format(
            self.account, self.region
        ))
        # All RDS resources are polled via a Lambda collector in a central account
        rds_collector_account = AWSAccount.get(self.rds_collector_account)
        rds_session = get_aws_session(rds_collector_account)
        # Existing RDS resources come from database
        existing_rds_dbs = RDSInstance.get_all(self.account, self.region)

        try:
            # Special session pinned to a single account for Lambda invocation so we
            # don't have to manage lambdas in every account & region
            lambda_client = rds_session.client('lambda', region_name=self.rds_collector_region)

            # The AWS Config Lambda will collect all the non-compliant resources for all regions
            # within the account
            input_payload = json.dumps({"account_id": self.account.account_number,
                                        "region": self.region,
                                        "role": self.rds_role,
                                        "config_rule_name": self.rds_config_rule_name
                                        }).encode('utf-8')
            response = lambda_client.invoke(FunctionName=self.rds_function_name, InvocationType='RequestResponse',
                                            Payload=input_payload
                                            )
            response_payload = json.loads(response['Payload'].read().decode('utf-8'))
            if response_payload['success']:
                rds_dbs = response_payload['data']
                if rds_dbs:
                    for db_instance in rds_dbs:
                        tags = {t['Key']: t['Value'] for t in db_instance['tags'] or {}}
                        properties = {
                            'tags': tags,
                            'metrics': None,
                            'engine': db_instance['engine'],
                            'creation_date': db_instance['creation_date']
                        }
                        if db_instance['resource_name'] in existing_rds_dbs:
                            rds = existing_rds_dbs[db_instance['resource_name']]
                            if rds.update(db_instance, properties):
                                self.log.debug('Change detected for RDS instance {}/{} '
                                               .format(db_instance['resource_name'], properties))
                        else:
                            RDSInstance.create(
                                db_instance['resource_name'],
                                account_id=self.account.account_id,
                                location=db_instance['region'],
                                properties=properties,
                                tags=tags
                            )
                # Removal of RDS instances
                rk = set()
                erk = set()
                for database in rds_dbs:
                    rk.add(database['resource_name'])
                for existing in existing_rds_dbs.keys():
                    erk.add(existing)

                for resource_id in erk - rk:
                    db.session.delete(existing_rds_dbs[resource_id].resource)
                    self.log.debug('Removed RDS instances {}/{}'.format(
                        self.account.account_name,
                        resource_id
                    ))
                db.session.commit()

            else:
                self.log.error('RDS Lambda Execution Failed / {} / {} / {}'.
                                format(self.account.account_name, self.region, response_payload))

        except Exception as e:
            self.log.exception('There was a problem during RDS collection for {}/{}/{}'.format(
                self.account.account_name, self.region, e
            ))
            db.session.rollback()
Example #13
0
def delete_s3_bucket(client, resource):
    try:
        session = get_aws_session(AWSAccount(resource.account))
        bucket = session.resource('s3', resource.location).Bucket(resource.resource_id)
        days_until_expiry = dbconfig.get('lifecycle_expiration_days', NS_AUDITOR_REQUIRED_TAGS, 3)
        # Separate rule for Object Markers is needed and can't be combined into a single rule per AWS API
        lifecycle_policy = {
            'Rules': [
                {'Status': 'Enabled',
                 'NoncurrentVersionExpiration': {u'NoncurrentDays': days_until_expiry},
                 'Filter': {u'Prefix': ''},
                 'Expiration': {
                     'Date': datetime.utcnow().replace(
                         hour=0, minute=0, second=0, microsecond=0
                     ) + timedelta(days=days_until_expiry)
                 },
                 'AbortIncompleteMultipartUpload': {u'DaysAfterInitiation': days_until_expiry},
                 'ID': 'cinqRemoveObjectsAndVersions'},

                {'Status': 'Enabled',
                 'Filter': {u'Prefix': ''},
                 'Expiration': {
                     'ExpiredObjectDeleteMarker': True
                 },
                 'ID': 'cinqRemoveDeletedExpiredMarkers'}
            ]
        }

        bucket_policy = {
            'Version': '2012-10-17',
            'Id': 'PutObjPolicy',
            'Statement': [
                {'Sid': 'cinqDenyObjectUploads',
                 'Effect': 'Deny',
                 'Principal': '*',
                 'Action': ['s3:PutObject', 's3:GetObject'],
                 'Resource': 'arn:aws:s3:::{}/*'.format(resource.resource_id)
                 }
            ]
        }

        metrics = {'Unavailable': 'Unavailable'}
        for prop in resource.properties:
            if prop.name == "metrics":
                metrics = prop.value

        objects = list(bucket.objects.limit(count=1))
        versions = list(bucket.object_versions.limit(count=1))
        if not objects and not versions:
            bucket.delete()
            logger.info('Deleted s3 bucket {} in {}'.format(resource.resource_id, resource.account))
            Enforcement.create(resource.account_id, resource.resource_id, 'DELETED',
                               datetime.now(), metrics)
            auditlog(
                event='required_tags.s3.terminate',
                actor=NS_AUDITOR_REQUIRED_TAGS,
                data={
                    'resource_id': resource.resource_id,
                    'account_name': resource.account.account_name,
                    'location': resource.location
                }
            )
            return True

        else:
            try:
                rules = bucket.LifecycleConfiguration().rules
                for rule in rules:
                    if rule['ID'] == 'cinqRemoveDeletedExpiredMarkers':
                        rules_exists = True
                        break
                else:
                    rules_exists = False
            except ClientError:
                rules_exists = False

            try:
                current_bucket_policy = bucket.Policy().policy
            except ClientError as error:
                if error.response['Error']['Code'] == 'NoSuchBucketPolicy':
                    current_bucket_policy = 'missing'

            try:
                if not rules_exists:
                    # Grab S3 Metrics before lifecycle policies start removing objects

                    bucket.LifecycleConfiguration().put(LifecycleConfiguration=lifecycle_policy)
                    logger.info('Added policies to delete bucket contents in s3 bucket {} in {}'.format(
                        resource.resource_id,
                        resource.account
                    ))
                    Enforcement.create(resource.account_id, resource.resource_id, 'LIFECYCLE_APPLIED',
                                       datetime.now(), metrics)

                if 'cinqDenyObjectUploads' not in current_bucket_policy:
                    bucket.Policy().put(Policy=json.dumps(bucket_policy))
                    logger.info('Added policy to prevent putObject in s3 bucket {} in {}'.format(
                        resource.resource_id,
                        resource.account
                    ))

            except ClientError as error:
                logger.error(
                    'Problem applying the bucket policy or lifecycle configuration to bucket {} / account {} / {}'
                    .format(resource.resource_id, resource.account_id, error.response['Error']['Code']))

            if rules_exists and 'cinqDenyObjectUploads' in current_bucket_policy:
                # We're waiting for the lifecycle policy to delete data
                raise ResourceActionError({'msg': 'wait_for_deletion'})

    except ResourceActionError as error:
        raise ResourceActionError(error)
    except Exception as error:
        logger.info(
            'Failed to delete s3 bucket {} in {}, error is {}'.format(resource.resource_id, resource.account, error))

        raise ResourceKillError(
            'Failed to delete s3 bucket {} in {}. Reason: {}'.format(resource.resource_id, resource.account, error)
        )