Esempio n. 1
0
    def setup(self):
        config_debug_level(True)
        self.test_dir = os.path.dirname(os.path.realpath(__file__))

        self.test_ruleset_001 = os.path.join(self.test_dir,
                                             'data/test-ruleset.json')
        self.test_ruleset_002 = os.path.join(
            self.test_dir, 'data/test-ruleset-absolute-path.json')
Esempio n. 2
0
class TestScoutSNSUtilsClass:
    config_debug_level(True)

    #
    # Test get_sns_region in us-east-1
    #
    def test_get_sns_region(self):
        # TODO: change to us-east-1
        credentials = read_creds('default')
        service_config = {'regions': {'us-east-1': {}}}
        get_sns_region(
            params={
                'region': 'us-east-1',
                'creds': credentials,
                'sns_config': service_config
            })

    #
    # Test get_sns_info (multiple thread of get_sns_region)
    # 1. in us-east-1 and us-west-1
    # 2. in empty region intersection
    #
    def test_get_sns_info(self):
        credentials = read_creds('default')
        service_config = {
            'regions': {
                'us-east-1': {},
                'us-west-1': {}
            }
        }  # , 'cn-north-1': {}}}
        get_sns_info(credentials, service_config, ['us-east-1', 'us-west-1'],
                     'aws')
        get_sns_info(credentials, service_config, ['us-east-1', 'us-west-1'],
                     'aws-us-gov')

    #        get_sns_info(credentials, service_config, ['us-gov-west-1'], 'aws-us-gov')

    #
    # Smoke tests for status display functions
    #
    def test_sns_status_init(self):
        sns_status_init()

    def test_sns_status(self):
        sns_status(True)
        sns_status(False)
        sns_status()

    def test_formatted_status(self):
        formatted_status(1, 42, True)
        formatted_status(42, 1, False)
Esempio n. 3
0
def main(args):
    # Configure the debug level
    config_debug_level(args.debug)

    # FIXME check that all requirements are installed
    # # Check version of opinel
    # if not check_requirements(os.path.realpath(__file__)):
    #     return 42

    # Support multiple environments
    for profile_name in args.profile:

        # Load the config
        try:
            # FIXME this is specific to AWS
            report_file_name = 'aws-%s' % profile_name
            report = Scout2Report('aws', report_file_name, args.report_dir, args.timestamp)
            aws_config = report.jsrw.load_from_file(AWSCONFIG)
            services = aws_config['service_list']
        except Exception as e:
            print_exception(e)
            print_error('Error, failed to load the configuration for profile %s' % profile_name)
            continue

        # Create a ruleset with only whatever rules were specified...
        if args.config:
            rule_filename = args.config
            ruleset = TmpRuleset(environment_name=args.profile[0],
                                 cloud_provider='aws',
                                 rule_dirs=[os.getcwd()],
                                 rule_filename=args.config,
                                 rule_args=args.config_args)
        elif len(args.path) > 0:
            # Create a local tmp rule
            rule_dict = {'description': 'artifact'}
            rule_dict['path'] = args.path[0]
            rule_dict['conditions'] = []
            rule_filename = 'listall-artifact.json'
            with open(os.path.join(os.getcwd(), rule_filename), 'wt') as f:
                f.write(json.dumps(rule_dict))
            ruleset = TmpRuleset(rule_dirs=[os.getcwd()], rule_filename=rule_filename, rule_args=[])
        else:
            print_error(
                'Error, you must provide either a rule configuration file or the path to the resources targeted.')
            continue

        # FIXME is broken in Scout Suite, only handles AWS
        cloud_provider = get_provider(provider='aws',
                                      profile=args.profile[0])

        # Process the rule
        pe = ProcessingEngine(ruleset)
        pe.run(cloud_provider, skip_dashboard=True)

        # Retrieve items
        rule = ruleset.rules[rule_filename][0]
        rule_service = rule.service.lower()
        rule_key = rule.key
        rule_type = rule.rule_type
        resources = aws_config['services'][rule_service][rule_type][rule_key]['items']

        # Set the keys to output
        if len(args.keys):
            # 1. Explicitly provided on the CLI
            rule.keys = args.keys
        elif len(args.keys_file):
            # 2. Explicitly provided files that contain the list of keys
            rule.keys = []
            for filename in args.keys_file:
                with open(filename, 'rt') as f:
                    rule.keys += json.load(f)['keys']
        else:
            try:
                # 3. Load default set of keys based on path
                target_path = rule.display_path if hasattr(rule, 'display_path') else rule.path
                listall_configs_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                   'output/data/listall-configs')
                target_file = os.path.join(listall_configs_dir, '%s.json' % target_path)
                if os.path.isfile(target_file):
                    with open(target_file, 'rt') as f:
                        rule.keys = json.load(f)['keys']
            except:
                # 4. Print the object name
                rule.keys = ['name']

        # Prepare the output format
        (lines, template) = format_listall_output(args.format_file[0], None, args.format, rule)

        # Print the output
        print_info(generate_listall_output(lines, resources, aws_config, template, []))
 def setup(self):
     config_debug_level(True)
     self.rule_counters = {'found': 0, 'tested': 0, 'verified': 0}
     self.test_dir = os.path.dirname(os.path.realpath(__file__))
Esempio n. 5
0
def main(args=None):
    """
    Main method that runs a scan

    :return:
    """
    if not args:
        parser = ScoutSuiteArgumentParser()
        args = parser.parse_args()

    # Get the dictionnary to get None instead of a crash
    args = args.__dict__

    # Configure the debug level
    config_debug_level(args.get('debug'))

    # Create a cloud provider object
    cloud_provider = get_provider(
        provider=args.get('provider'),
        profile=args.get('profile'),
        project_id=args.get('project_id'),
        folder_id=args.get('folder_id'),
        organization_id=args.get('organization_id'),
        all_projects=args.get('all_projects'),
        report_dir=args.get('report_dir'),
        timestamp=args.get('timestamp'),
        services=args.get('services'),
        skipped_services=args.get('skipped_services'),
        thread_config=args.get('thread_config'))

    report_file_name = generate_report_name(cloud_provider.provider_code, args)

    # TODO: move this to after authentication, so that the report can be more specific to what's being scanned.
    # For example if scanning with a GCP service account, the SA email can only be known after authenticating...
    # Create a new report
    report = Scout2Report(args.get('provider'), report_file_name,
                          args.get('report_dir'), args.get('timestamp'))

    # Complete run, including pulling data from provider
    if not args.get('fetch_local'):
        # Authenticate to the cloud provider
        authenticated = cloud_provider.authenticate(
            profile=args.get('profile'),
            user_account=args.get('user_account'),
            service_account=args.get('service_account'),
            cli=args.get('cli'),
            msi=args.get('msi'),
            service_principal=args.get('service_principal'),
            file_auth=args.get('file_auth'),
            tenant_id=args.get('tenant_id'),
            subscription_id=args.get('subscription_id'),
            client_id=args.get('client_id'),
            client_secret=args.get('client_secret'),
            username=args.get('username'),
            password=args.get('password'))

        if not authenticated:
            return 401

        # Fetch data from provider APIs
        try:
            cloud_provider.fetch(regions=args.get('regions'))
        except KeyboardInterrupt:
            print_info('\nCancelled by user')
            return 130

        # Update means we reload the whole config and overwrite part of it
        if args.get('update'):
            current_run_services = copy.deepcopy(cloud_provider.services)
            last_run_dict = report.jsrw.load_from_file(AWSCONFIG)
            cloud_provider.services = last_run_dict['services']
            for service in cloud_provider.service_list:
                cloud_provider.services[service] = current_run_services[
                    service]

    # Partial run, using pre-pulled data
    else:
        # Reload to flatten everything into a python dictionary
        last_run_dict = report.jsrw.load_from_file(AWSCONFIG)
        for key in last_run_dict:
            setattr(cloud_provider, key, last_run_dict[key])

    # Pre processing
    cloud_provider.preprocessing(args.get('ip_ranges'),
                                 args.get('ip_ranges_name_key'))

    # Analyze config
    finding_rules = Ruleset(environment_name=args.get('profile'),
                            cloud_provider=args.get('provider'),
                            filename=args.get('ruleset'),
                            ip_ranges=args.get('ip_ranges'),
                            aws_account_id=cloud_provider.aws_account_id)
    processing_engine = ProcessingEngine(finding_rules)
    processing_engine.run(cloud_provider)

    # Create display filters
    filter_rules = Ruleset(cloud_provider=args.get('provider'),
                           filename='filters.json',
                           rule_type='filters',
                           aws_account_id=cloud_provider.aws_account_id)
    processing_engine = ProcessingEngine(filter_rules)
    processing_engine.run(cloud_provider)

    # Handle exceptions
    try:
        exceptions = RuleExceptions(args.get('profile'),
                                    args.get('exceptions')[0])
        exceptions.process(cloud_provider)
        exceptions = exceptions.exceptions
    except Exception as e:
        print_debug(
            'Warning, failed to load exceptions. The file may not exist or may have an invalid format.'
        )
        exceptions = {}

    # Finalize
    cloud_provider.postprocessing(report.current_time, finding_rules)

    # TODO: this is AWS-specific - move to postprocessing?
    # This is partially implemented
    # Get organization data if it exists
    try:
        profile = AWSProfiles.get(args.get('profile'))[0]
        if 'source_profile' in profile.attributes:
            organization_info_file = os.path.join(
                os.path.expanduser('~/.aws/recipes/%s/organization.json' %
                                   profile.attributes['source_profile']))
            if os.path.isfile(organization_info_file):
                with open(organization_info_file, 'rt') as f:
                    org = {}
                    accounts = json.load(f)
                    for account in accounts:
                        account_id = account.pop('Id')
                        org[account_id] = account
                    setattr(cloud_provider, 'organization', org)
    except Exception as e:
        pass

    # Save config and create HTML report
    html_report_path = report.save(cloud_provider, exceptions,
                                   args.get('force_write'), args.get('debug'))

    # Open the report by default
    if not args.get('no_browser'):
        print_info('Opening the HTML report...')
        url = 'file://%s' % os.path.abspath(html_report_path)
        webbrowser.open(url, new=2)

    return 0