Esempio n. 1
0
    async def _fetch(self, service, regions=None, excluded_regions=None):
        try:
            print_info('Fetching resources for the {} service'.format(
                format_service_name(service)))
            service_config = getattr(self, service)
            # call fetch method for the service
            if 'fetch_all' in dir(service_config):
                method_args = {}

                if regions:
                    method_args['regions'] = regions
                if excluded_regions:
                    method_args['excluded_regions'] = excluded_regions

                if self._is_provider('aws'):
                    if service != 'iam':
                        method_args['partition_name'] = get_partition_name(
                            self.credentials.session)

                await service_config.fetch_all(**method_args)
                if hasattr(service_config, 'finalize'):
                    await service_config.finalize()
            else:
                print_debug('No method to fetch service %s.' % service)
        except Exception as e:
            print_exception(f'Could not fetch {service} configuration: {e}')
Esempio n. 2
0
def connect_service(service, credentials, region_name=None, config=None, silent=False):
    """
    Instantiates an AWS API client

    :param service:                         Service targeted, e.g. ec2
    :param credentials:                     Id, secret, token
    :param region_name:                     Region desired, e.g. us-east-2
    :param config:                          Configuration (optional)
    :param silent:                          Whether or not to print messages

    :return:
    """
    api_client = None
    try:
        client_params = {'service_name': service.lower()}
        session_params = {'aws_access_key_id': credentials.get('access_key'),
                          'aws_secret_access_key': credentials.get('secret_key'),
                          'aws_session_token': credentials.get('token')}
        if region_name:
            client_params['region_name'] = region_name
            session_params['region_name'] = region_name
        if config:
            client_params['config'] = config
        aws_session = boto3.session.Session(**session_params)
        if not silent:
            info_message = 'Connecting to AWS %s' % service
            if region_name:
                info_message = info_message + ' in %s' % region_name
            print_info('%s...' % info_message)
        api_client = aws_session.client(**client_params)
    except Exception as e:
        print_exception(e)
    return api_client
    async def _fetch(self, service, regions):
        try:
            print_info('Fetching resources for the {} service'.format(
                format_service_name(service)))
            service_config = getattr(self, service)
            # call fetch method for the service
            if 'fetch_all' in dir(service_config):
                method_args = {
                    'credentials': self.credentials,
                    'regions': regions
                }

                if self._is_provider('aws'):
                    if service != 'iam':
                        method_args['partition_name'] = get_partition_name(
                            self.credentials)

                await service_config.fetch_all(**method_args)
                if hasattr(service_config, 'finalize'):
                    await service_config.finalize()
            else:
                print_debug('No method to fetch service %s.' % service)
        except Exception as e:
            print_error('Error: could not fetch %s configuration.' % service)
            print_exception(e)
Esempio n. 4
0
    async def get_projects(self):
        try:

            # All projects to which the user / Service Account has access to
            if self.all_projects:
                return await self._get_projects_recursively(
                    parent_type='all', parent_id=None)
            # Project passed through the CLI
            elif self.project_id:
                return await self._get_projects_recursively(
                    parent_type='project', parent_id=self.project_id)
            # Folder passed through the CLI
            elif self.folder_id:
                return await self._get_projects_recursively(
                    parent_type='folder', parent_id=self.folder_id)
            # Organization passed through the CLI
            elif self.organization_id:
                return await self._get_projects_recursively(
                    parent_type='organization', parent_id=self.organization_id)
            # Project inferred from default configuration
            elif self.default_project_id:
                return await self._get_projects_recursively(
                    parent_type='project', parent_id=self.default_project_id)
            # Raise exception if none of the above
            else:
                print_info(
                    "Could not infer the Projects to scan and no default Project ID was found.")
                return []

        except Exception as e:
            print_exception(f'Failed to retrieve projects: {e}')
            return []
Esempio n. 5
0
 def _match_instances_and_roles(self):
     print_info('Matching EC2 instances and IAM roles')
     ec2_config = self.services['ec2']
     iam_config = self.services['iam']
     role_instances = {}
     for r in ec2_config['regions']:
         for v in ec2_config['regions'][r]['vpcs']:
             if 'instances' in ec2_config['regions'][r]['vpcs'][v]:
                 for i in ec2_config['regions'][r]['vpcs'][v]['instances']:
                     instance_profile = ec2_config['regions'][r]['vpcs'][v][
                         'instances'][i]['IamInstanceProfile']
                     instance_profile_id = instance_profile[
                         'Id'] if instance_profile else None
                     if instance_profile_id:
                         manage_dictionary(role_instances,
                                           instance_profile_id, [])
                         role_instances[instance_profile_id].append(i)
     for role_id in iam_config['roles']:
         iam_config['roles'][role_id]['instances_count'] = 0
         for instance_profile_id in iam_config['roles'][role_id][
                 'instance_profiles']:
             if instance_profile_id in role_instances:
                 iam_config['roles'][role_id]['instance_profiles'][instance_profile_id]['instances'] = \
                     role_instances[instance_profile_id]
                 iam_config['roles'][role_id]['instances_count'] += len(
                     role_instances[instance_profile_id])
Esempio n. 6
0
    def save_to_file(self, content, file_type, force_write, debug):
        config_path, first_line = get_filename(file_type, self.report_name,
                                               self.report_dir)
        print_info('Saving data to %s' % config_path)
        try:
            with self.__open_file(config_path, force_write) as f:
                if first_line:
                    print('%s' % first_line, file=f)
                results = json.dumps(content,
                                     indent=4 if debug else None,
                                     separators=(',', ': '),
                                     sort_keys=True,
                                     cls=ScoutJsonEncoder)
                print('%s' % results, file=f)
                if file_type == 'RESULTS':
                    timestamp = datetime.datetime.now().strftime(
                        "%d-%m-%y %H:%M:%S")
                    store_custom_format(json.loads(results), config_path,
                                        self.report_name, force_write,
                                        content.account_id, timestamp)

        except AttributeError as e:
            # __open_file returned None
            pass
        except Exception as e:
            print_exception(e)
Esempio n. 7
0
    def _get_projects(self):
        # All projects to which the user / Service Account has access to
        if self.all_projects:
            self.projects = self._gcp_facade_get_projects(
                parent_type='all', parent_id=None)

        # Project passed through the CLI
        elif self.project_id:
            self.projects = self._gcp_facade_get_projects(
                parent_type='project', parent_id=self.project_id)

        # Folder passed through the CLI
        elif self.folder_id:
            self.projects = self._gcp_facade_get_projects(
                parent_type='folder', parent_id=self.folder_id)

        # Organization passed through the CLI
        elif self.organization_id:
            self.projects = self._gcp_facade_get_projects(
                parent_type='organization', parent_id=self.organization_id)

        # Project inferred from default configuration
        elif self.credentials.default_project_id:
            self.projects = self._gcp_facade_get_projects(
                parent_type='project', parent_id=self.credentials.default_project_id)

        # Raise exception if none of the above
        else:
            print_info(
                "Could not infer the Projects to scan and no default Project ID was found.")
Esempio n. 8
0
 def create_html_report(self, force_write):
     contents = ''
     # Use the script corresponding to the result format
     contents += self.get_content_from_file('/%s_format.html' % self.result_format)
     # Use all scripts under html/partials/
     contents += self.get_content_from_folder('partials')
     contents += self.get_content_from_folder('partials/%s' % self.provider)
     # Use all scripts under html/summaries/
     contents += self.get_content_from_folder('summaries')
     contents += self.get_content_from_folder('summaries/%s' % self.provider)
     new_file, first_line = get_filename('REPORT', self.report_name, self.report_dir)
     print_info('Creating %s' % new_file)
     if prompt_for_overwrite(new_file, force_write):
         if os.path.exists(new_file):
             os.remove(new_file)
         with open(os.path.join(self.html_data_path, 'report.html')) as f:
             with open(new_file, 'wt') as nf:
                 for line in f:
                     newline = line
                     newline = newline.replace('<!-- CONTENTS PLACEHOLDER -->', contents)
                     newline = newline.replace('<!-- RESULTS PLACEHOLDER -->',
                                               get_filename('RESULTS',
                                                            self.report_name,
                                                            self.report_dir,
                                                            relative_path=True)[0])
                     newline = newline.replace('<!-- EXCEPTIONS PLACEHOLDER -->',
                                               get_filename('EXCEPTIONS',
                                                            self.report_name,
                                                            self.report_dir,
                                                            relative_path=True)[0])
                     newline = newline.replace('<!-- SQLITE JS PLACEHOLDER -->',
                                               '{}/sqlite.js'.format(DEFAULT_INCLUDES_DIRECTORY))
                     nf.write(newline)
     return new_file
Esempio n. 9
0
 def create_html_report(self, force_write):
     contents = ''
     # Use all scripts under html/partials/
     contents += self.get_content_from('partials')
     contents += self.get_content_from('partials/%s' % self.provider)
     # Use all scripts under html/summaries/
     contents += self.get_content_from('summaries')
     contents += self.get_content_from('summaries/%s' % self.provider)
     new_file, first_line = get_filename('HTMLREPORT', self.profile,
                                         self.report_dir)
     print_info('Creating %s' % new_file)
     if prompt_4_overwrite(new_file, force_write):
         if os.path.exists(new_file):
             os.remove(new_file)
         with open(os.path.join(self.html_data_path, self.html_root)) as f:
             with open(new_file, 'wt') as nf:
                 for line in f:
                     newline = line
                     if self.profile != 'default':
                         newline = newline.replace(
                             DEFAULT_RESULT_FILE,
                             DEFAULT_RESULT_FILE.replace(
                                 '.js', '-%s.js' % self.profile))
                         newline = newline.replace(
                             DEFAULT_EXCEPTIONS_FILE,
                             DEFAULT_EXCEPTIONS_FILE.replace(
                                 '.js', '-%s.js' % self.profile))
                     newline = newline.replace('<!-- PLACEHOLDER -->',
                                               contents)
                     nf.write(newline)
     return new_file
Esempio n. 10
0
    def _process_cloudtrail_trails(cloudtrail_config):
        print_info('Processing CloudTrail config')
        global_events_logging = []
        data_logging_trails_count = 0
        for region in cloudtrail_config['regions']:
            for trail_id in cloudtrail_config['regions'][region]['trails']:
                trail = cloudtrail_config['regions'][region]['trails'][
                    trail_id]
                if 'HomeRegion' in trail and trail['HomeRegion'] != region:
                    # Part of a multi-region trail, skip until we find the whole object
                    continue
                if trail['IncludeGlobalServiceEvents'] and trail['IsLogging']:
                    global_events_logging.append((
                        region,
                        trail_id,
                    ))
                # Any wildcard logging?
                if trail.get('wildcard_data_logging', False):
                    data_logging_trails_count += 1

        cloudtrail_config[
            'data_logging_trails_count'] = data_logging_trails_count
        cloudtrail_config['IncludeGlobalServiceEvents'] = len(
            global_events_logging) > 0
        cloudtrail_config['DuplicatedGlobalServiceEvents'] = len(
            global_events_logging) > 1
Esempio n. 11
0
    async def is_api_enabled(self, project_id, service):
        """
        Given a project ID and service name, this method tries to determine if the service's API is enabled
        """

        serviceusage_client = self._build_arbitrary_client('serviceusage',
                                                           'v1',
                                                           force_new=True)
        services = serviceusage_client.services()
        try:
            request = services.list(parent=f'projects/{project_id}')
            services_response = await GCPFacadeUtils.get_all(
                'services', request, services)
        except Exception as e:
            print_exception(
                f'Could not fetch the state of services for project \"{project_id}\", '
                f'including {format_service_name(service.lower())} in the execution',
                {'exception': e})
            return True

        # These are hardcoded endpoint correspondences as there's no easy way to do this.
        if service == 'IAM':
            endpoint = 'iam'
        elif service == 'KMS':
            endpoint = 'cloudkms'
        elif service == 'CloudStorage':
            endpoint = 'storage-component'
        elif service == 'CloudSQL':
            endpoint = 'sql-component'
        elif service == 'ComputeEngine':
            endpoint = 'compute'
        elif service == 'KubernetesEngine':
            endpoint = 'container'
        elif service == 'StackdriverLogging':
            endpoint = 'logging'
        elif service == 'StackdriverMonitoring':
            endpoint = 'monitoring'
        else:
            print_debug(
                'Could not validate the state of the {} API for project \"{}\", '
                'including it in the execution'.format(
                    format_service_name(service.lower()), project_id))
            return True

        for s in services_response:
            if endpoint in s.get('name'):
                if s.get('state') == 'ENABLED':
                    return True
                else:
                    print_info(
                        '{} API not enabled for project \"{}\", skipping'.
                        format(format_service_name(service.lower()),
                               project_id))
                    return False

        print_error(
            f'Could not validate the state of the {format_service_name(service.lower())} API '
            f'for project \"{project_id}\", including it in the execution')
        return True
Esempio n. 12
0
def run_from_cli():
    parser = ScoutSuiteArgumentParser()
    args = parser.parse_args()

    # Get the dictionary to get None instead of a crash
    args = args.__dict__

    try:
        return run(
            provider=args.get('provider'),
            # AWS
            profile=args.get('profile'),
            # Azure
            user_account=args.get('user_account'),
            service_account=args.get('service_account'),
            cli=args.get('cli'),
            msi=args.get('msi'),
            service_principal=args.get('service_principal'),
            file_auth=args.get('file_auth'),
            tenant_id=args.get('tenant_id'),
            subscription_id=args.get('subscription_id'),
            client_id=args.get('client_id'),
            client_secret=args.get('client_secret'),
            username=args.get('username'),
            password=args.get('password'),
            # GCP
            project_id=args.get('project_id'),
            folder_id=args.get('folder_id'),
            organization_id=args.get('organization_id'),
            all_projects=args.get('all_projects'),
            # General
            report_name=args.get('report_name'),
            report_dir=args.get('report_dir'),
            timestamp=args.get('timestamp'),
            services=args.get('services'),
            skipped_services=args.get('skipped_services'),
            result_format=args.get('result_format'),
            database_name=args.get('database_name'),
            host_ip=args.get('host_ip'),
            host_port=args.get('host_port'),
            max_workers=args.get('max_workers'),
            regions=args.get('regions'),
            fetch_local=args.get('fetch_local'),
            update=args.get('update'),
            ip_ranges=args.get('ip_ranges'),
            ip_ranges_name_key=args.get('ip_ranges_name_key'),
            ruleset=args.get('ruleset'),
            exceptions=args.get('exceptions'),
            force_write=args.get('force_write'),
            debug=args.get('debug'),
            quiet=args.get('quiet'),
            log_file=args.get('log_file'),
            no_browser=args.get('no_browser'),
            programmatic_execution=False)
    except (KeyboardInterrupt, SystemExit):
        print_info('Exiting')
Esempio n. 13
0
def link_elastic_ips_callback2(ec2_config, current_config, path, current_path,
                               instance_id, callback_args):
    if instance_id == callback_args['instance_id']:
        if 'PublicIpAddress' not in current_config:
            current_config['PublicIpAddress'] = callback_args['elastic_ip']
        elif current_config['PublicIpAddress'] != callback_args['elastic_ip']:
            print_info(
                'Warning: public IP address exists (%s) for an instance associated with an elastic IP (%s)'
                % (current_config['PublicIpAddress'],
                   callback_args['elastic_ip']))
def upload_findigs_to_securityhub(session, formatted_findings_list):
    try:
        if formatted_findings_list:
            print_info('Batch uploading {} findings'.format(len(formatted_findings_list)))
            securityhub = session.client('securityhub')
            response = securityhub.batch_import_findings(Findings=formatted_findings_list)
            print_info('Upload completed, {} succeeded, {} failed'.format(response.get('SuccessCount'),
                                                                          response.get('FailedCount')))
            return response
    except Exception as e:
        print_exception('Unable to upload findings to Security Hub: {}'.format(e))
Esempio n. 15
0
 def save_to_file(self, config, config_type, force_write, _debug):
     config_path, first_line = get_filename(config_type, self.report_name, self.report_dir, file_extension="db")
     print_info('Saving data to %s' % config_path)
     try:
         with self.__open_file(config_path, force_write) as database:
             result_dict = self.to_dict(config)
             for k, v in result_dict.items():
                 database[k] = v
             database.commit()
     except Exception as e:
         print_exception(e)
Esempio n. 16
0
    def _build_services_list(supported_services, services, skipped_services):

        # Ensure services and skipped services exist, otherwise log exception
        error = False
        for service in services + skipped_services:
            if service not in supported_services:
                print_error('Service \"{}\" does not exist, skipping'.format(service))
                error = True
        if error:
            print_info('Available services are: {}'.format(str(list(supported_services)).strip('[]')))

        return [s for s in supported_services if (services == [] or s in services) and s not in skipped_services]
Esempio n. 17
0
async def run_concurrently(function, backoff_seconds=15):
    try:
        async with asyncio.get_event_loop().throttler:
            return await run_function_concurrently(function)
    except Exception as e:
        # Determine whether the exception is due to API throttling
        if is_throttled(e):
            print_info('Hitting API rate limiting, will retry in {}s'.format(backoff_seconds))
            await asyncio.sleep(backoff_seconds)
            return await run_concurrently(function, backoff_seconds + 15)
        else:
            raise
Esempio n. 18
0
 def save_to_file(self, config, file_type, force_write, debug):
     config_path, first_line = get_filename(file_type, self.profile, self.report_dir)
     print_info('Saving data to %s' % config_path)
     try:
         with self.__open_file(config_path, force_write) as f:
             if first_line:
                 print('%s' % first_line, file=f)
             print('%s' % json.dumps(config, indent=4 if debug else None, separators=(',', ': '), sort_keys=True, cls=Scout2Encoder), file=f)
     except AttributeError as e:
         # __open_file returned None
         pass
     except Exception as e:
         print_exception(e)
Esempio n. 19
0
async def run_concurrently(function, backoff_seconds=15):
    try:
        async with asyncio.get_event_loop().throttler:
            return await run_function_concurrently(function)
    except Exception as e:
        # Determine whether the exception is due to API throttling
        if is_throttled(e):
            source_file = inspect.getsourcefile(function)
            source_file_line = inspect.getsourcelines(function)[1]
            print_info(f'Hitting API rate limiting ({"/".join(source_file.split("/")[-2:])} L{source_file_line}), will retry in {backoff_seconds}s')
            await asyncio.sleep(backoff_seconds)
            return await run_concurrently(function, backoff_seconds + 15)
        else:
            raise
def run(profile, file):
    session = boto3.Session(profile_name=profile)
    # Test querying for current user
    get_caller_identity(session)
    print_info('Authenticated with profile {}'.format(profile))

    try:
        with open(file) as f:
            formatted_findings_list = process_results_file(f,
                                                           session.region_name)
    except Exception as e:
        print_exception('Unable to open file {}: {}'.format(file, e))

    upload_findigs_to_securityhub(session, formatted_findings_list)
Esempio n. 21
0
    def _new_go_to_and_do(self, current_config, path, current_path, callbacks):
        """
        Recursively go to a target and execute a callback
        """
        try:

            key = path.pop(0)
            if not current_config:
                current_config = self.config
            if not current_path:
                current_path = []
            keys = key.split('.')
            if len(keys) > 1:
                while True:
                    key = keys.pop(0)
                    if not len(keys):
                        break
                    current_path.append(key)
                    current_config = current_config[key]
            if key in current_config:
                current_path.append(key)
                for (i, value) in enumerate(list(current_config[key])):
                    if len(path) == 0:
                        for callback_info in callbacks:
                            callback_name = callback_info[0]

                            # callback = globals()[callback_name]
                            callback = getattr(self, callback_name)

                            callback_args = callback_info[1]
                            if type(current_config[key] == dict) and type(
                                    value) != dict and type(value) != list:
                                callback(current_config[key][value], path,
                                         current_path, value, callback_args)
                            else:
                                callback(current_config, path, current_path,
                                         value, callback_args)
                    else:
                        tmp = copy.deepcopy(current_path)
                        try:
                            tmp.append(value)
                            self._new_go_to_and_do(current_config[key][value],
                                                   copy.deepcopy(path), tmp,
                                                   callbacks)
                        except:
                            tmp.pop()
                            tmp.append(i)
                            self._new_go_to_and_do(current_config[key][i],
                                                   copy.deepcopy(path), tmp,
                                                   callbacks)
        except Exception as e:
            print_exception(e)
            print_info('Path: %s' % str(current_path))
            print_info('Key = %s' %
                       str(key) if 'key' in locals() else 'not defined')
            print_info('Value = %s' %
                       str(value) if 'value' in locals() else 'not defined')
            print_info('Path = %s' % path)
Esempio n. 22
0
    def __open_file(self, config_filename, force_write, quiet=False):
        """

        :param config_filename:
        :param force_write:
        :param quiet:
        :return:
        """
        if not quiet:
            print_info('Saving config...')
        if prompt_4_overwrite(config_filename, force_write):
            try:
                config_dirname = os.path.dirname(config_filename)
                if not os.path.isdir(config_dirname):
                    os.makedirs(config_dirname)
                return open(config_filename, 'wt')
            except Exception as e:
                print_exception(e)
        else:
            return None
Esempio n. 23
0
def save_blob_as_json(filename, blob, force_write):
    """
    Creates/Modifies file and saves python object as JSON

    :param filename:
    :param blob:
    :param force_write:

    :return:
    """
    try:
        if prompt_overwrite(filename, force_write):
            with open(filename, 'wt') as f:
                print_info('%s' % json.dumps(blob,
                                             indent=4,
                                             separators=(',', ': '),
                                             sort_keys=True,
                                             cls=CustomJSONEncoder))
    except Exception as e:
        print_exception(e)
Esempio n. 24
0
    def parse_buckets(self, bucket, params):
        """
        Parse a single S3 bucket

        TODO:
        - CORS
        - Lifecycle
        - Notification ?
        - Get bucket's policy

        :param bucket:
        :param params:
        :return:
        """
        bucket['name'] = bucket.pop('Name')
        api_client = params['api_clients'][get_s3_list_region(list(params['api_clients'].keys())[0])]

        bucket['CreationDate'] = str(bucket['CreationDate'])
        bucket['region'] = get_s3_bucket_location(api_client, bucket['name'])
        # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland...
        if bucket['region'] == 'EU':
            bucket['region'] = 'eu-west-1'
        # h4ck :: S3 is global but region-aware...
        if bucket['region'] not in params['api_clients']:
            print_info('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region']))
            self.buckets_count -= 1
            return

        api_client = params['api_clients'][bucket['region']]
        get_s3_bucket_logging(api_client, bucket['name'], bucket)
        get_s3_bucket_versioning(api_client, bucket['name'], bucket)
        get_s3_bucket_webhosting(api_client, bucket['name'], bucket)
        get_s3_bucket_default_encryption(api_client, bucket['name'], bucket)
        bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket)
        get_s3_bucket_policy(api_client, bucket['name'], bucket)
        get_s3_bucket_secure_transport(api_client, bucket['name'], bucket)
        # If requested, get key properties
        bucket['id'] = self.get_non_provider_id(bucket['name'])
        self.buckets[bucket['id']] = bucket
Esempio n. 25
0
    async def get_projects(self):
        try:

            # resourcemanager_client = self._get_client()
            # request = resourcemanager_client.projects().list()
            # projects_group = resourcemanager_client.projects()
            # return await GCPFacadeUtils.get_all('projects', request, projects_group)

            # All projects to which the user / Service Account has access to
            if self.all_projects:
                return await self._get_projects_recursively(parent_type='all',
                                                            parent_id=None)
            # Project passed through the CLI
            elif self.project_id:
                return await self._get_projects_recursively(
                    parent_type='project', parent_id=self.project_id)
            # Folder passed through the CLI
            elif self.folder_id:
                return await self._get_projects_recursively(
                    parent_type='folder', parent_id=self.folder_id)
            # Organization passed through the CLI
            elif self.organization_id:
                return await self._get_projects_recursively(
                    parent_type='organization', parent_id=self.organization_id)
            # Project inferred from default configuration
            elif self.default_project_id:
                return await self._get_projects_recursively(
                    parent_type='project', parent_id=self.default_project_id)
            # Raise exception if none of the above
            else:
                print_info(
                    "Could not infer the Projects to scan and no default Project ID was found."
                )
                return []

        except Exception as e:
            print_exception('Failed to retrieve projects: {}'.format(e))
            return []
Esempio n. 26
0
def analyze_ec2_config(ec2_info, aws_account_id, force_write):
    try:
        print_info('Analyzing EC2 config... ', new_line=False)
        # Custom EC2 analysis
        #        check_for_elastic_ip(ec2_info)
        # FIXME - commented for now as this method doesn't seem to be defined anywhere'
        # list_network_attack_surface(ec2_info, 'attack_surface', 'PublicIpAddress')
        # TODO: make this optional, commented out for now
        # list_network_attack_surface(ec2_info, 'private_attack_surface', 'PrivateIpAddress')
        print_info('Success')
    except Exception as e:
        print_info('Error')
        print_exception(e)
Esempio n. 27
0
    def fetch_all(self, credentials, regions=None, partition_name='aws', targets=None):
        """
        :param credentials:             F
        :param service:                 Name of the service
        :param regions:                 Name of regions to fetch data from
        :param partition_name:          AWS partition to connect to
        :param targets:                 Type of resources to be fetched; defaults to all.
        :return:
        """
        regions = [] if regions is None else regions
        global status, formatted_string

        # Initialize targets
        if not targets:
            targets = type(self).targets
        print_info('Fetching %s config...' % format_service_name(self.service))
        formatted_string = None

        # FIXME the below should be in moved to each provider's code

        # Connect to the service
        if self._is_provider('aws'):
            if self.service in ['s3']:  # S3 namespace is global but APIs aren't....
                api_clients = {}
                for region in build_region_list(self.service, regions, partition_name):
                    api_clients[region] = connect_service('s3', credentials, region, silent=True)
                api_client = api_clients[list(api_clients.keys())[0]]
            elif self.service == 'route53domains':
                api_client = connect_service(self.service, credentials, 'us-east-1',
                                             silent=True)  # TODO: use partition's default region
            else:
                api_client = connect_service(self.service, credentials, silent=True)

        elif self._is_provider('gcp'):
            api_client = gcp_connect_service(service=self.service, credentials=credentials)

        elif self._is_provider('azure'):
            api_client = azure_connect_service(service=self.service, credentials=credentials)

        # Threading to fetch & parse resources (queue consumer)
        params = {'api_client': api_client}

        if self._is_provider('aws'):
            if self.service in ['s3']:
                params['api_clients'] = api_clients

        # Threading to parse resources (queue feeder)
        target_queue = self._init_threading(self.__fetch_target, params, self.thread_config['parse'])

        # Threading to list resources (queue feeder)
        params = {'api_client': api_client, 'q': target_queue}

        if self._is_provider('aws'):
            if self.service in ['s3']:
                params['api_clients'] = api_clients

        service_queue = self._init_threading(self.__fetch_service, params, self.thread_config['list'])

        # Init display
        self.fetchstatuslogger = FetchStatusLogger(targets)

        # Go
        for target in targets:
            service_queue.put(target)

        # Blocks until all items in the queue have been gotten and processed.
        service_queue.join()
        target_queue.join()

        # Show completion and force newline
        if self._is_provider('aws'):
            # Show completion and force newline
            if self.service != 'iam':
                self.fetchstatuslogger.show(True)
        else:
            self.fetchstatuslogger.show(True)

        # Threads should stop running as queues are empty
        self.run_target_threads = False
        self.run_service_threads = False
        # Put x items in the queues to ensure threads run one last time (and exit)
        for i in range(self.thread_config['parse']):
            target_queue.put(None)
        for j in range(self.thread_config['list']):
            service_queue.put(None)
Esempio n. 28
0
    def fetch_all(self,
                  credentials,
                  regions=None,
                  partition_name='aws',
                  targets=None):
        """
        Fetch all the configuration supported by Scout2 for a given service

        :param credentials:             F
        :param regions:                 Name of regions to fetch data from
        :param partition_name:          AWS partition to connect to
        :param targets:                 Type of resources to be fetched; defaults to all.

        """
        # Initialize targets
        # Tweak params
        regions = [] if regions is None else regions
        realtargets = ()
        if not targets:
            targets = self.targets
        for i, target in enumerate(targets['first_region']):
            params = self.tweak_params(target[3], credentials)
            realtargets = realtargets + (
                (target[0], target[1], target[2], params, target[4]), )
        targets['first_region'] = realtargets
        realtargets = ()
        for i, target in enumerate(targets['other_regions']):
            params = self.tweak_params(target[3], credentials)
            realtargets = realtargets + (
                (target[0], target[1], target[2], params, target[4]), )
        targets['other_regions'] = realtargets

        print_info('Fetching %s config...' % format_service_name(self.service))
        self.fetchstatuslogger = FetchStatusLogger(targets['first_region'],
                                                   True)
        api_service = 'ec2' if self.service.lower(
        ) == 'vpc' else self.service.lower()

        # Init regions
        regions = build_region_list(
            api_service, regions,
            partition_name)  # TODO: move this code within this class
        self.fetchstatuslogger.counts['regions']['discovered'] = len(regions)

        # Threading to fetch & parse resources (queue consumer)
        q = self._init_threading(self._fetch_target, {},
                                 self.thread_config['parse'])

        # Threading to list resources (queue feeder)
        qr = self._init_threading(
            self._fetch_region, {
                'api_service': api_service,
                'credentials': credentials,
                'q': q,
                'targets': ()
            }, self.thread_config['list'])

        # Go
        for i, region in enumerate(regions):
            qr.put((region, targets['first_region']
                    if i == 0 else targets['other_regions']))

        # Blocks until all items in the queue have been gotten and processed.
        qr.join()
        q.join()

        # Show completion and force newline
        self.fetchstatuslogger.show(True)

        # Threads should stop running as queues are empty
        self.run_qr_threads = False
        self.run_q_threads = False
        # Put x items in the queues to ensure threads run one last time (and exit)
        for i in range(self.thread_config['parse']):
            q.put(None)
        for j in range(self.thread_config['list']):
            qr.put(None)
Esempio n. 29
0
def main(args):
    # Configure the debug level
    config_debug_level(args.debug)

    # FIXME check that all requirements are installed
    # # Check version of opinel
    # if not check_requirements(os.path.realpath(__file__)):
    #     return 42

    # Support multiple environments
    for profile_name in args.profile:

        # Load the config
        try:
            # FIXME this is specific to AWS
            report_file_name = 'aws-%s' % profile_name
            report = Scout2Report('aws', report_file_name, args.report_dir, args.timestamp)
            aws_config = report.jsrw.load_from_file(AWSCONFIG)
            services = aws_config['service_list']
        except Exception as e:
            print_exception(e)
            print_error('Error, failed to load the configuration for profile %s' % profile_name)
            continue

        # Create a ruleset with only whatever rules were specified...
        if args.config:
            rule_filename = args.config
            ruleset = TmpRuleset(environment_name=args.profile[0],
                                 cloud_provider='aws',
                                 rule_dirs=[os.getcwd()],
                                 rule_filename=args.config,
                                 rule_args=args.config_args)
        elif len(args.path) > 0:
            # Create a local tmp rule
            rule_dict = {'description': 'artifact'}
            rule_dict['path'] = args.path[0]
            rule_dict['conditions'] = []
            rule_filename = 'listall-artifact.json'
            with open(os.path.join(os.getcwd(), rule_filename), 'wt') as f:
                f.write(json.dumps(rule_dict))
            ruleset = TmpRuleset(rule_dirs=[os.getcwd()], rule_filename=rule_filename, rule_args=[])
        else:
            print_error(
                'Error, you must provide either a rule configuration file or the path to the resources targeted.')
            continue

        # FIXME is broken in Scout Suite, only handles AWS
        cloud_provider = get_provider(provider='aws',
                                      profile=args.profile[0])

        # Process the rule
        pe = ProcessingEngine(ruleset)
        pe.run(cloud_provider, skip_dashboard=True)

        # Retrieve items
        rule = ruleset.rules[rule_filename][0]
        rule_service = rule.service.lower()
        rule_key = rule.key
        rule_type = rule.rule_type
        resources = aws_config['services'][rule_service][rule_type][rule_key]['items']

        # Set the keys to output
        if len(args.keys):
            # 1. Explicitly provided on the CLI
            rule.keys = args.keys
        elif len(args.keys_file):
            # 2. Explicitly provided files that contain the list of keys
            rule.keys = []
            for filename in args.keys_file:
                with open(filename, 'rt') as f:
                    rule.keys += json.load(f)['keys']
        else:
            try:
                # 3. Load default set of keys based on path
                target_path = rule.display_path if hasattr(rule, 'display_path') else rule.path
                listall_configs_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                   'output/data/listall-configs')
                target_file = os.path.join(listall_configs_dir, '%s.json' % target_path)
                if os.path.isfile(target_file):
                    with open(target_file, 'rt') as f:
                        rule.keys = json.load(f)['keys']
            except:
                # 4. Print the object name
                rule.keys = ['name']

        # Prepare the output format
        (lines, template) = format_listall_output(args.format_file[0], None, args.format, rule)

        # Print the output
        print_info(generate_listall_output(lines, resources, aws_config, template, []))
Esempio n. 30
0
async def _run(provider,
               # AWS
               profile,
               aws_access_key_id,
               aws_secret_access_key,
               aws_session_token,
               # Azure
               user_account, service_account,
               cli, msi, service_principal, file_auth, tenant_id, subscription_id,
               client_id, client_secret,
               username, password,
               # GCP
               project_id, folder_id, organization_id, all_projects,
               # Aliyun
               access_key_id, access_key_secret,
               # General
               report_name, report_dir,
               timestamp,
               services, skipped_services,
               result_format,
               database_name, host_ip, host_port,
               regions,
               excluded_regions,
               fetch_local, update,
               ip_ranges, ip_ranges_name_key,
               ruleset, exceptions,
               force_write,
               debug,
               quiet,
               log_file,
               no_browser,
               programmatic_execution,
               **kwargs):
    """
    Run a scout job.
    """

    # Configure the debug level
    set_logger_configuration(debug, quiet, log_file)

    print_info('Launching Scout')

    print_info('Authenticating to cloud provider')
    auth_strategy = get_authentication_strategy(provider)
    try:
        credentials = auth_strategy.authenticate(profile=profile,
                                                 aws_access_key_id=aws_access_key_id,
                                                 aws_secret_access_key=aws_secret_access_key,
                                                 aws_session_token=aws_session_token,
                                                 user_account=user_account,
                                                 service_account=service_account,
                                                 cli=cli,
                                                 msi=msi,
                                                 service_principal=service_principal,
                                                 file_auth=file_auth,
                                                 tenant_id=tenant_id,
                                                 subscription_id=subscription_id,
                                                 client_id=client_id,
                                                 client_secret=client_secret,
                                                 username=username,
                                                 password=password,
                                                 access_key_id=access_key_id,
                                                 access_key_secret=access_key_secret,
                                                 programmatic_execution=programmatic_execution)

        if not credentials:
            return 101
    except Exception as e:
        print_exception('Authentication failure: {}'.format(e))
        return 101

    # Create a cloud provider object
    cloud_provider = get_provider(provider=provider,
                                  profile=profile,
                                  project_id=project_id,
                                  folder_id=folder_id,
                                  organization_id=organization_id,
                                  all_projects=all_projects,
                                  report_dir=report_dir,
                                  timestamp=timestamp,
                                  services=services,
                                  skipped_services=skipped_services,
                                  credentials=credentials)

    # Create a new report
    report_name = report_name if report_name else cloud_provider.get_report_name()
    report = ScoutReport(cloud_provider.provider_code,
                         report_name,
                         report_dir,
                         timestamp,
                         result_format=result_format)

    if database_name:
        database_file, _ = get_filename('RESULTS', report_name, report_dir, file_extension="db")
        Server.init(database_file, host_ip, host_port)
        return

    # Complete run, including pulling data from provider
    if not fetch_local:

        # Fetch data from provider APIs
        try:
            print_info('Gathering data from APIs')
            await cloud_provider.fetch(regions=regions, excluded_regions=excluded_regions)
        except KeyboardInterrupt:
            print_info('\nCancelled by user')
            return 130

        # Update means we reload the whole config and overwrite part of it
        if update:
            print_info('Updating existing data')
            current_run_services = copy.deepcopy(cloud_provider.services)
            last_run_dict = report.encoder.load_from_file('RESULTS')
            cloud_provider.services = last_run_dict['services']
            for service in cloud_provider.service_list:
                cloud_provider.services[service] = current_run_services[service]

    # Partial run, using pre-pulled data
    else:
        print_info('Using local data')
        # Reload to flatten everything into a python dictionary
        last_run_dict = report.encoder.load_from_file('RESULTS')
        for key in last_run_dict:
            setattr(cloud_provider, key, last_run_dict[key])

    # Pre processing
    cloud_provider.preprocessing(
        ip_ranges, ip_ranges_name_key)

    # Analyze config
    print_info('Running rule engine')
    finding_rules = Ruleset(cloud_provider=cloud_provider.provider_code,
                            environment_name=cloud_provider.environment,
                            filename=ruleset,
                            ip_ranges=ip_ranges,
                            account_id=cloud_provider.account_id)
    processing_engine = ProcessingEngine(finding_rules)
    processing_engine.run(cloud_provider)

    # Create display filters
    print_info('Applying display filters')
    filter_rules = Ruleset(cloud_provider=cloud_provider.provider_code,
                           environment_name=cloud_provider.environment,
                           rule_type='filters',
                           account_id=cloud_provider.account_id)
    processing_engine = ProcessingEngine(filter_rules)
    processing_engine.run(cloud_provider)

    # Handle exceptions
    if exceptions:
        print_info('Applying exceptions')
        try:
            exceptions = RuleExceptions(exceptions)
            exceptions.process(cloud_provider)
            exceptions = exceptions.exceptions
        except Exception as e:
            print_exception('Failed to load exceptions: {}'.format(e))
            exceptions = {}
    else:
        exceptions = {}

    run_parameters = {
        'services': services,
        'skipped_services': skipped_services,
        'regions': regions,
        'excluded_regions': excluded_regions,
    }
    # Finalize
    cloud_provider.postprocessing(report.current_time, finding_rules, run_parameters)

    # Save config and create HTML report
    html_report_path = report.save(
        cloud_provider, exceptions, force_write, debug)

    # Open the report by default
    if not no_browser:
        print_info('Opening the HTML report')
        url = 'file://%s' % os.path.abspath(html_report_path)
        webbrowser.open(url, new=2)

    if ERRORS_LIST:  # errors were handled during execution
        return 200
    else:
        return 0