def main(): # Parse arguments parser = RulesArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Load ruleset ruleset = Ruleset(filename=args.base_ruleset, name=args.ruleset_name, load_rules=False, rules_dir=args.rules_dir) # Generate the HTML generator ruleset_generator = RulesetGenerator(args.ruleset_name, args.generator_dir) ruleset.ruleset_generator_metadata = Scout2Config('default', None, None, [], []).metadata ruleset_generator_path = ruleset_generator.save(ruleset, args.force_write, args.debug) # Open the HTML ruleset generator in a browser printInfo('Starting the HTML ruleset generator...') url = 'file://%s' % os.path.abspath(ruleset_generator_path) webbrowser.open(url, new=2)
def test_ruleset_class(self): test001 = Ruleset(filename=self.test_ruleset_001) assert ('iam-password-policy-no-expiration.json' in test001.rules) assert (type( test001.rules['iam-password-policy-no-expiration.json']) == list) assert (type( test001.rules['iam-password-policy-no-expiration.json'][0] == Rule) ) assert (hasattr( test001.rules['iam-password-policy-no-expiration.json'][0], 'path')) for rule in test001.rules: printDebug(test001.rules[rule][0].to_string()) test002 = Ruleset(filename=self.test_ruleset_002) for rule in test002.rules: printDebug(test002.rules[rule][0].to_string()) test003 = Ruleset(filename='tests/data/no-such-file.json') assert (test003.rules == []) test004 = Ruleset(filename='tests/data/invalid-file.json') test005 = Ruleset(filename=self.test_ruleset_001, ruleset_generator=True)
def test_all_finding_rules(self): test_dir = os.path.dirname(os.path.realpath(__file__)) test_ruleset_file_name = os.path.join(test_dir, 'data/ruleset-test.json') with open( os.path.join(test_dir, '../AWSScout2/rules/data/rulesets/default.json'), 'rt') as f: ruleset = json.load(f) rule_counters = {'found': 0, 'tested': 0, 'verified': 0} for file_name in ruleset['rules']: rule_counters['found'] += 1 test_config_file_name = os.path.join( test_dir, 'data/rule-configs/%s' % file_name) if not os.path.isfile(test_config_file_name): continue rule_counters['tested'] += 1 test_ruleset = {'rules': {}, 'about': 'regression test'} test_ruleset['rules'][file_name] = [] rule = ruleset['rules'][file_name][0] rule['enabled'] = True test_ruleset['rules'][file_name].append(rule) with open(test_ruleset_file_name, 'wt') as f: f.write(json.dumps(test_ruleset, indent=4)) # printError('Ruleset ::') # printError(str(test_ruleset)) rules = Ruleset(filename=test_ruleset_file_name) pe = ProcessingEngine(rules) with open(test_config_file_name, 'rt') as f: aws_config = json.load(f) pe.run(aws_config) service = file_name.split('-')[0] findings = aws_config['services'][service]['findings'] findings = findings[list(findings.keys())[0]]['items'] test_result_file_name = os.path.join( test_dir, 'data/rule-results/%s' % file_name) if not os.path.isfile(test_result_file_name): printError('Expected findings:: ') printError(json.dumps(findings, indent=4)) continue rule_counters['verified'] += 1 with open(test_result_file_name, 'rt') as f: items = json.load(f) try: assert (set(sorted(findings)) == set(sorted(items))) except Exception as e: printError('Expected items:\n %s' % json.dumps(sorted(items))) printError('Reported items:\n %s' % json.dumps(sorted(findings))) assert (False) printError('Existing rules: %d' % rule_counters['found']) printError('Processed rules: %d' % rule_counters['tested']) printError('Verified rules: %d' % rule_counters['verified'])
def test_search_ruleset(self): test201 = Ruleset().search_ruleset('test', no_prompt=True)
def test_find_file(self): test101 = Ruleset().find_file(self.test_ruleset_001) test102 = Ruleset().find_file('default')
def main(): # Parse arguments parser = Scout2ArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Set the profile name profile_name = args.profile[0] # Search for AWS credentials if not args.fetch_local: credentials = read_creds(args.profile[0], args.csv_credentials, args.mfa_serial, args.mfa_code) if credentials['AccessKeyId'] is None: return 42 # Create a new Scout2 config report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = Scout2Config(profile_name, args.report_dir, args.timestamp, args.services, args.skipped_services, args.thread_config) if not args.fetch_local: # Fetch data from AWS APIs if not running a local analysis try: aws_config.fetch(credentials, regions=args.regions, partition_name=get_partition_name(credentials)) except KeyboardInterrupt: printInfo('\nCancelled by user') return 130 aws_config = report.jsrw.to_dict(aws_config) # Set the account ID aws_config['aws_account_id'] = get_aws_account_id(credentials) # Update means we reload the whole config and overwrite part of it if args.update == True: new_aws_config = copy.deepcopy(aws_config) aws_config = report.jsrw.load_from_file(AWSCONFIG) for service in new_aws_config['service_list']: # Per service only for now, may add per region & per VPC later... aws_config['services'][service] = new_aws_config['services'][ service] # Update the metadata too aws_config['metadata'] = Scout2Config('default', None, None, [], []).metadata else: # Reload to flatten everything into a python dictionary aws_config = report.jsrw.load_from_file(AWSCONFIG) # Pre processing preprocessing(aws_config, args.ip_ranges, args.ip_ranges_name_key) # Analyze config finding_rules = Ruleset(profile_name, filename=args.ruleset, ip_ranges=args.ip_ranges, aws_account_id=aws_config['aws_account_id']) pe = ProcessingEngine(finding_rules) pe.run(aws_config) # Create display filters filter_rules = Ruleset(filename='filters.json', rule_type='filters', aws_account_id=aws_config['aws_account_id']) pe = ProcessingEngine(filter_rules) pe.run(aws_config) # Handle exceptions try: exceptions = RuleExceptions(profile_name, args.exceptions[0]) exceptions.process(aws_config) exceptions = exceptions.exceptions except Exception as e: printDebug( 'Warning, failed to load exceptions. The file may not exist or may have an invalid format.' ) exceptions = {} # Finalize postprocessing(aws_config, report.current_time, finding_rules) # Get organization data if it exists try: profile = AWSProfiles.get(profile_name)[0] if 'source_profile' in profile.attributes: organization_info_file = os.path.join( os.path.expanduser('~/.aws/recipes/%s/organization.json' % profile.attributes['source_profile'])) if os.path.isfile(organization_info_file): with open(organization_info_file, 'rt') as f: org = {} accounts = json.load(f) for account in accounts: account_id = account.pop('Id') org[account_id] = account aws_config['organization'] = org except: pass if args.json: printInfo('Writing to results.json') fp = open('results.json', 'w') json.dump(aws_config, fp, default=json_helper) fp.close() sys.exit() # Save config and create HTML report html_report_path = report.save(aws_config, exceptions, args.force_write, args.debug) # Open the report by default if not args.no_browser: printInfo('Opening the HTML report...') url = 'file://%s' % os.path.abspath(html_report_path) webbrowser.open(url, new=2)
def main(): # Parse arguments parser = Scout2ArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Set the profile name profile_name = args.profile[0] # Search for AWS credentials if not args.fetch_local: credentials = read_creds(args.profile[0], args.csv_credentials, args.mfa_serial, args.mfa_code) if credentials['AccessKeyId'] is None: return 42 # Create a new Scout2 config report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = Scout2Config(profile_name, args.report_dir, args.timestamp, args.services, args.skipped_services) if not args.fetch_local: # Fetch data from AWS APIs if not running a local analysis try: aws_config.fetch(credentials, regions=args.regions, partition_name=args.partition_name) except KeyboardInterrupt: printInfo('\nCancelled by user') return 130 aws_config = report.jsrw.to_dict(aws_config) # Update means we reload the whole config and overwrite part of it if args.update == True: new_aws_config = copy.deepcopy(aws_config) aws_config = report.jsrw.load_from_file(AWSCONFIG) for service in new_aws_config['service_list']: # Per service only for now, may add per region & per VPC later... aws_config['services'][service] = new_aws_config['services'][service] else: # Reload to flatten everything into a python dictionary aws_config = report.jsrw.load_from_file(AWSCONFIG) # Pre processing preprocessing(aws_config, args.ip_ranges, args.ip_ranges_name_key) # Analyze config ruleset = Ruleset(profile_name, filename = args.ruleset, ip_ranges = args.ip_ranges) ruleset.analyze(aws_config) # Create display filters filters = Ruleset(filename = 'filters.json', rule_type = 'filters') filters.analyze(aws_config) # Handle exceptions process_exceptions(aws_config, args.exceptions[0]) # Finalize postprocessing(aws_config, report.current_time, ruleset) # Save config and create HTML report html_report_path = report.save(aws_config, {}, args.force_write, args.debug) # Open the report by default if not args.no_browser: printInfo('Opening the HTML report...') url = 'file://%s' % os.path.abspath(html_report_path) webbrowser.open(url, new=2)
def main(): # Parse arguments parser = ListallArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Support multiple environments for profile_name in args.profile: # Load the config report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = report.jsrw.load_from_file(AWSCONFIG) services = aws_config['service_list'] # Create a ruleset with only whatever rules were specified... if args.config: ruleset = Ruleset(filename='sample', load_rules=False) ruleset.ruleset['rules'][0]['filename'] = args.config ruleset.init_rules(services, args.ip_ranges, '', False) # aws_config['aws_account_id, False) # Need to set the arguments values args.config_args else: # TODO: #args = args #config = {} #config['conditions'] = args.conditions if hasattr(args, 'conditions') else [] #config['mapping'] = args.mapping if hasattr(args, 'mapping') else [] pass # Get single rule... TODO: clean tmp = ruleset.rules.pop(ruleset.rules.keys()[0]) rule = tmp.pop(tmp.keys()[0]) # Set the keys to output if len(args.keys): # 1. Explicitly provided on the CLI rule['keys'] = args.keys elif len(args.keys_file): # 2. Explicitly provided files that contain the list of keys rule['keys'] = [] for filename in args.keys_file: with open(filename, 'rt') as f: rule['keys'] += json.load(f)['keys'] else: try: # 3. Load default set of keys based on path target_path = config[ 'display_path'] if 'display_path' in config else config[ 'path'] with open('listall-configs/%s.json' % target_path) as f: rule['keys'] = json.load(f)['keys'] except: # 4. Print the object name rule['keys'] = ['name'] # Recursion if len(args.path): rule['path'] = args.path[0] target_path = rule['path'].split('.') current_path = [] resources = recurse(aws_config['services'], aws_config['services'], target_path, current_path, rule) # Prepare the output format (lines, template) = format_listall_output(args.format_file, 'foo', args.format, rule) # Print the output printInfo( generate_listall_output(lines, resources, aws_config, template, []))