def get_stackset_ready_accounts(credentials, account_ids, quiet=True): """ Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role :param credentials: AWS credentials to use when calling sts:assumerole :param org_account_ids: List of AWS accounts to check for Stackset configuration :return: List of account IDs in which assuming the stackset execution role worked """ api_client = connect_service('sts', credentials, silent=True) configured_account_ids = [] for account_id in account_ids: try: role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id api_client.assume_role( RoleArn=role_arn, RoleSessionName='opinel-get_stackset_ready_accounts') configured_account_ids.append(account_id) except Exception as e: pass if len(configured_account_ids) != len(account_ids) and not quiet: printInfo( 'Only %d of these accounts have the necessary stack set execution role:' % len(configured_account_ids)) printDebug(str(configured_account_ids)) return configured_account_ids
def find_profiles_in_file(filename, names=[], quiet=True): profiles = [] if type(names) != list: names = [names] if not quiet: printDebug('Searching for profiles matching %s in %s ... ' % (str(names), filename)) name_filters = [] for name in names: name_filters.append(re.compile('^%s$' % name)) if os.path.isfile(filename): with open(filename, 'rt') as f: aws_credentials = f.read() existing_profiles = re_profile_name.findall(aws_credentials) profile_count = len(existing_profiles) - 1 for i, profile in enumerate(existing_profiles): matching_profile = False raw_profile = None for name_filter in name_filters: if name_filter.match(profile[2]): matching_profile = True i1 = aws_credentials.index(profile[0]) if i < profile_count: i2 = aws_credentials.index( existing_profiles[i + 1][0]) raw_profile = aws_credentials[i1:i2] else: raw_profile = aws_credentials[i1:] if len(name_filters) == 0 or matching_profile: profiles.append( AWSProfile(filename=filename, raw_profile=raw_profile, name=profile[2])) return profiles
def __init__(self, environment_name='default', cloud_provider='aws', filename=None, name=None, rules_dir=None, rule_type='findings', ip_ranges=None, aws_account_id=None, ruleset_generator=False): rules_dir = [] if rules_dir is None else rules_dir ip_ranges = [] if ip_ranges is None else ip_ranges self.rules_data_path = os.path.dirname( os.path.dirname(os.path.abspath(__file__))) + '/providers/%s/rules' % cloud_provider self.environment_name = environment_name self.rule_type = rule_type # Ruleset filename self.filename = self.find_file(filename, provider=cloud_provider) if not self.filename: self.search_ruleset(environment_name) printDebug('Loading ruleset %s' % self.filename) self.name = os.path.basename(self.filename).replace('.json', '') if not name else name self.load(self.rule_type) self.shared_init(ruleset_generator, rules_dir, aws_account_id, ip_ranges)
def create_stack_instances(api_client, stack_set_name, account_ids, regions, quiet=False): """ :param api_client: :param stack_set_name: :param account_ids: :param regions: :return: """ operation_preferences = { 'FailureTolerancePercentage': 100, 'MaxConcurrentPercentage': 100 } if not quiet: printInfo('Creating stack instances in %d regions and %d accounts...' % (len(regions), len(account_ids))) printDebug(' %s' % ', '.join(regions)) response = api_client.create_stack_instances( StackSetName=stack_set_name, Accounts=account_ids, Regions=regions, OperationPreferences=operation_preferences) if not quiet: printInfo('Successfully started operation Id %s' % response['OperationId']) return response['OperationId']
def fetch(self, credentials, services=None, regions=None): services = [] if services is None else services regions = [] if regions is None else regions for service in vars(self): try: # skip services if services != [] and service not in services: continue service_config = getattr(self, service) # call fetch method for the service if 'fetch_all' in dir(service_config): method_args = {} method_args['credentials'] = credentials method_args['regions'] = regions if self._is_provider('aws'): if service != 'iam': method_args['partition_name'] = get_partition_name( credentials) service_config.fetch_all(**method_args) if hasattr(service_config, 'finalize'): service_config.finalize() else: printDebug('No method to fetch service %s.' % service) except Exception as e: printError('Error: could not fetch %s configuration.' % service) printException(e)
def delete_resources(self, resource_type): resources = copy.deepcopy(self.cleanup[resource_type]) while True: unmodifiable_resource = False remaining_resources = [] printDebug('Deleting the following %s: %s' % (resource_type, str(resources))) time.sleep(5) for resource in resources: if resource_type == 'groups': errors = [] try: self.api_client.delete_group(GroupName=resource) except: errors = ['EntityTemporarilyUnmodifiable'] else: method = globals()['delete_%s' % resource_type[:-1]] errors = method(self.api_client, resource) if len(errors): printDebug('Errors when deleting %s' % resource) remaining_resources.append(resource) for handled_code in [ 'EntityTemporarilyUnmodifiable', 'DeleteConflict' ]: if handled_code in errors: unmodifiable_resource = True else: printError('Failed to delete %s %s' % (resource_type[:-1], resource)) assert (False) resources = copy.deepcopy(remaining_resources) if not unmodifiable_resource: break
def assert_create(self, resource_type, resource_data, error_count): assert len(resource_data['errors']) == error_count nameattr = '%sname' % resource_type[:-1] if error_count == 0: printDebug('Successfully created %s %s' % (resource_type[:-1], resource_data[nameattr])) self.cleanup[resource_type].append(resource_data[nameattr])
def run(self, cloud_provider, skip_dashboard=False): # Clean up existing findings for service in cloud_provider.services: cloud_provider.services[service][self.ruleset.rule_type] = {} # Process each rule for finding_path in self.rules: for rule in self.rules[finding_path]: if not rule.enabled: # or rule.service not in []: # TODO: handle this... continue printDebug('Processing %s rule[%s]: "%s"' % (rule.service, rule.filename, rule.description)) finding_path = rule.path path = finding_path.split('.') service = path[0] manage_dictionary(cloud_provider.services[service], self.ruleset.rule_type, {}) cloud_provider.services[service][self.ruleset.rule_type][ rule.key] = {} cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['description'] = rule.description cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['path'] = rule.path for attr in ['level', 'id_suffix', 'display_path']: if hasattr(rule, attr): cloud_provider.services[service][ self.ruleset.rule_type][rule.key][attr] = getattr( rule, attr) try: setattr(rule, 'checked_items', 0) cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['items'] = recurse(cloud_provider.services, cloud_provider.services, path, [], rule, True) if skip_dashboard: continue cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['dashboard_name'] = rule.dashboard_name cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['checked_items'] = rule.checked_items cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['flagged_items'] = len( cloud_provider.services[service][ self.ruleset.rule_type][rule.key]['items']) cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['service'] = rule.service cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['rationale'] = rule.rationale if hasattr( rule, 'rationale') else 'No description available.' except Exception as e: printException(e) printError('Failed to process rule defined in %s' % rule.filename) # Fallback if process rule failed to ensure report creation and data dump still happen cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['checked_items'] = 0 cloud_provider.services[service][self.ruleset.rule_type][ rule.key]['flagged_items'] = 0
def sort_vpc_flow_logs_callback(self, current_config, path, current_path, flow_log_id, callback_args): attached_resource = current_config['ResourceId'] if attached_resource.startswith('vpc-'): vpc_path = combine_paths(current_path[0:4], ['vpcs', attached_resource]) try: attached_vpc = get_object_at(self, vpc_path) except Exception as e: printDebug( 'It appears that the flow log %s is attached to a resource that was previously deleted (%s).' % ( flow_log_id, attached_resource)) return manage_dictionary(attached_vpc, 'flow_logs', []) if flow_log_id not in attached_vpc['flow_logs']: attached_vpc['flow_logs'].append(flow_log_id) for subnet_id in attached_vpc['subnets']: manage_dictionary(attached_vpc['subnets'][subnet_id], 'flow_logs', []) if flow_log_id not in attached_vpc['subnets'][subnet_id]['flow_logs']: attached_vpc['subnets'][subnet_id]['flow_logs'].append(flow_log_id) elif attached_resource.startswith('subnet-'): subnet_path = combine_paths(current_path[0:4], ['vpcs', self.subnet_map[attached_resource]['vpc_id'], 'subnets', attached_resource]) subnet = get_object_at(self, subnet_path) manage_dictionary(subnet, 'flow_logs', []) if flow_log_id not in subnet['flow_logs']: subnet['flow_logs'].append(flow_log_id) # TODO this is pre-merge (from Loic) code # all_vpcs = get_object_at(self, combine_paths(current_path[0:2], ['vpcs'])) # for vpc in self.services['vpc']: # if attached_resource in all_vpcs[vpc]['subnets']: # manage_dictionary(all_vpcs[vpc]['subnets'][attached_resource], 'flow_logs', []) # if flow_log_id not in all_vpcs[vpc]['subnets'][attached_resource]['flow_logs']: # all_vpcs[vpc]['subnets'][attached_resource]['flow_logs'].append(flow_log_id) # break else: printError('Resource %s attached to flow logs is not handled' % attached_resource)
def sort_vpc_flow_logs_callback(vpc_config, current_config, path, current_path, flow_log_id, callback_args): attached_resource = current_config['ResourceId'] if attached_resource.startswith('vpc-'): vpc_path = combine_paths(current_path[0:2], ['vpcs', attached_resource]) try: attached_vpc = get_object_at(vpc_config, vpc_path) except Exception as e: printDebug('It appears that the flow log %s is attached to a resource that was previously deleted (%s).' % (flow_log_id, attached_resource)) return manage_dictionary(attached_vpc, 'flow_logs', []) if flow_log_id not in attached_vpc['flow_logs']: attached_vpc['flow_logs'].append(flow_log_id) for subnet_id in attached_vpc['subnets']: manage_dictionary(attached_vpc['subnets'][subnet_id], 'flow_logs', []) if flow_log_id not in attached_vpc['subnets'][subnet_id]['flow_logs']: attached_vpc['subnets'][subnet_id]['flow_logs'].append(flow_log_id) elif attached_resource.startswith('subnet-'): all_vpcs = get_object_at(vpc_config, combine_paths(current_path[0:2], ['vpcs'])) for vpc in all_vpcs: if attached_resource in all_vpcs[vpc]['subnets']: manage_dictionary(all_vpcs[vpc]['subnets'][attached_resource], 'flow_logs', []) if flow_log_id not in all_vpcs[vpc]['subnets'][attached_resource]['flow_logs']: all_vpcs[vpc]['subnets'][attached_resource]['flow_logs'].append(flow_log_id) break else: printError('Resource %s attached to flow logs is not handled' % attached_resource)
def get_organization_accounts(api_client, quiet=True): # List all accounts in the organization org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] if not quiet: printInfo('Found %d accounts in the organization.' % len(org_accounts)) printDebug(str(org_accounts)) return org_accounts
def test_list(self): profiles = AWSProfiles.list() printDebug(str(profiles)) assert(set(['l01cd3v-1', 'l01cd3v-2', 'l01cd3v-role1', 'l01cd3v-role2', 'l01cd3v-role3', 'l01cd3v-3', 'l01cd3v-4', 'testprofile', 'scout2fortravis']) == set(profiles)) profiles = AWSProfiles.list(names = 'l01cd3v-role.*') printDebug(str(profiles)) assert(set(['l01cd3v-role1', 'l01cd3v-role2', 'l01cd3v-role3']) == set(profiles)) profiles = AWSProfiles.list(names = '.*1') assert(set(['l01cd3v-1', 'l01cd3v-role1']) == set(profiles))
def test_list(self): profiles = AWSProfiles.list() assert (set([ 'l01cd3v-1', 'l01cd3v-2', 'l01cd3v-role1', 'l01cd3v-role2', 'l01cd3v-3', 'l01cd3v-4' ]) == set(profiles)) profiles = AWSProfiles.list(names='l01cd3v-role.*') printDebug(str(profiles)) assert (set(['l01cd3v-role1', 'l01cd3v-role2']) == set(profiles)) profiles = AWSProfiles.list(names='.*1') assert (set(['l01cd3v-1', 'l01cd3v-role1']) == set(profiles))
def run(self, aws_config): for finding_path in self.rules: for rule in self.rules[finding_path]: if not rule.enabled: # or rule.service not in []: # TODO: handle this... continue printDebug('Processing %s rule[%s]: "%s"' % (rule.service, rule.filename, rule.description)) finding_path = rule.path path = finding_path.split('.') service = path[0] manage_dictionary(aws_config['services'][service], self.ruleset.rule_type, {}) aws_config['services'][service][self.ruleset.rule_type][ rule.key] = {} aws_config['services'][service][self.ruleset.rule_type][ rule.key]['description'] = rule.description aws_config['services'][service][self.ruleset.rule_type][ rule.key]['path'] = rule.path for attr in ['level', 'id_suffix', 'display_path']: if hasattr(rule, attr): aws_config['services'][service][ self.ruleset.rule_type][rule.key][attr] = getattr( rule, attr) try: setattr(rule, 'checked_items', 0) aws_config['services'][service][self.ruleset.rule_type][ rule.key]['items'] = recurse(aws_config['services'], aws_config['services'], path, [], rule, True) aws_config['services'][service][self.ruleset.rule_type][ rule.key]['dashboard_name'] = rule.dashboard_name aws_config['services'][service][self.ruleset.rule_type][ rule.key]['checked_items'] = rule.checked_items aws_config['services'][service][self.ruleset.rule_type][ rule.key]['flagged_items'] = len( aws_config['services'][service][ self.ruleset.rule_type][rule.key]['items']) aws_config['services'][service][self.ruleset.rule_type][ rule.key]['service'] = rule.service aws_config['services'][service][self.ruleset.rule_type][ rule.key]['rationale'] = rule.rationale if hasattr( rule, 'rationale') else 'N/A' except Exception as e: printException(e) printError('Failed to process rule defined in %s' % rule.filename) # Fallback if process rule failed to ensure report creation and data dump still happen aws_config['services'][service][self.ruleset.rule_type][ rule.key]['checked_items'] = 0 aws_config['services'][service][self.ruleset.rule_type][ rule.key]['flagged_items'] = 0
def __init__(self, environment_name = 'default', filename = None, name = None, rules_dir = [], rule_type = 'findings', ip_ranges = [], aws_account_id = None, ruleset_generator = False): self.rules_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') self.environment_name = environment_name self.rule_type = rule_type # Ruleset filename self.filename = self.find_file(filename) if not self.filename: self.search_ruleset(environment_name) printDebug('Loading ruleset %s' % self.filename) self.name = os.path.basename(self.filename).replace('.json','') if not name else name self.load(self.rule_type) self.shared_init(ruleset_generator, rules_dir, aws_account_id, ip_ranges)
def wait_for_stack_set(api_client, stack_set_name, timeout=60, increment=5): printDebug('Waiting for stack set %s to be ready...' % stack_set_name) timer = 0 while True: if timer >= timeout: printError('Timed out.') break printError('Checking the stack set\'s status...') time.sleep(increment) timer += increment info = api_client.describe_stack_set(StackSetName=stack_set_name) if info['StackSet']['Status'] == 'ACTIVE': break
def test_ruleset_class(self): test001 = Ruleset(filename = self.test_ruleset_001) assert('iam-password-policy-no-expiration.json' in test001.rules) assert(type(test001.rules['iam-password-policy-no-expiration.json']) == list) assert(type(test001.rules['iam-password-policy-no-expiration.json'][0] == Rule)) assert(hasattr(test001.rules['iam-password-policy-no-expiration.json'][0], 'path')) for rule in test001.rules: printDebug(test001.rules[rule][0].to_string()) test002 = Ruleset(filename = self.test_ruleset_002) for rule in test002.rules: printDebug(test002.rules[rule][0].to_string()) test003 = Ruleset(filename = 'tests/data/no-such-file.json') assert(test003.rules == []) test004 = Ruleset(filename = 'tests/data/invalid-file.json') test005 = Ruleset(filename = self.test_ruleset_001, ruleset_generator = True)
def process(self, cloud_provider): for service in self.exceptions: for rule in self.exceptions[service]: filtered_items = [] if rule not in cloud_provider.services[service]['findings']: printDebug('Warning:: key error should not be happening') continue for item in cloud_provider.services[service]['findings'][rule][ 'items']: if item not in self.exceptions[service][rule]: filtered_items.append(item) cloud_provider.services[service]['findings'][rule][ 'items'] = filtered_items cloud_provider.services[service]['findings'][rule]['flagged_items'] = \ len(cloud_provider.services[service]['findings'][rule]['items'])
def process(self, aws_config): for service in self.exceptions: for rule in self.exceptions[service]: filtered_items = [] if rule not in aws_config['services'][service]['findings']: printDebug('Warning:: key error should not be happening') continue for item in aws_config['services'][service]['findings'][rule][ 'items']: if item not in self.exceptions[service][rule]: filtered_items.append(item) aws_config['services'][service]['findings'][rule][ 'items'] = filtered_items aws_config['services'][service]['findings'][rule][ 'flagged_items'] = len(aws_config['services'][service] ['findings'][rule]['items'])
def get_organization_accounts(api_client, exceptions=[], quiet=True): # List all accounts in the organization org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] if not quiet: printInfo('Found %d accounts in the organization.' % len(org_accounts)) for account in org_accounts: printDebug(str(account)) if len(exceptions): filtered_accounts = [] for account in org_accounts: if account['Id'] not in exceptions: filtered_accounts.append(account) org_accounts = filtered_accounts return org_accounts
def test_ruleset_class(self): test001 = Ruleset(filename=self.test_ruleset_001) assert ('iam-password-policy-no-expiration.json' in test001.rules) assert (type( test001.rules['iam-password-policy-no-expiration.json']) == list) assert (type( test001.rules['iam-password-policy-no-expiration.json'][0] == Rule) ) assert (hasattr( test001.rules['iam-password-policy-no-expiration.json'][0], 'path')) for rule in test001.rules: printDebug(test001.rules[rule][0].to_string()) test002 = Ruleset(filename=self.test_ruleset_002) for rule in test002.rules: printDebug(test002.rules[rule][0].to_string()) test003 = Ruleset(filename='tests/data/no-such-file.json') assert (test003.rules == []) test004 = Ruleset(filename='tests/data/invalid-file.json') test005 = Ruleset(filename=self.test_ruleset_001, ruleset_generator=True)
def test_ruleset_class(self): test001 = Ruleset(filename=self.test_ruleset_001) assert (os.path.isdir(test001.rules_data_path)) assert (os.path.isfile(test001.filename)) assert (test001.name == "test-ruleset") assert (test001.about == "regression test") test_file_key = 'iam-password-policy-no-expiration.json' assert (test_file_key in test001.rules) assert (type(test001.rules[test_file_key]) == list) assert (type(test001.rules[test_file_key][0] == Rule)) assert (hasattr(test001.rules[test_file_key][0], 'path')) for rule in test001.rules: printDebug(test001.rules[rule][0].to_string()) assert (test_file_key in test001.rule_definitions) assert (test001.rule_definitions[test_file_key].description == "Password expiration disabled") for rule_def in test001.rule_definitions: printDebug(str(test001.rule_definitions[rule_def])) test002 = Ruleset(filename=self.test_ruleset_002) for rule in test002.rules: printDebug(test002.rules[rule][0].to_string()) test005 = Ruleset(filename=self.test_ruleset_001, ruleset_generator=True)
def test_ruleset_class(self, printError): test001 = Ruleset(filename=self.test_ruleset_001) assert (os.path.isdir(test001.rules_data_path)) assert (os.path.isfile(test001.filename)) assert (test001.name == "test-ruleset") assert (test001.about == "regression test") test_file_key = 'iam-password-policy-no-expiration.json' assert (test_file_key in test001.rules) assert (type(test001.rules[test_file_key]) == list) assert (type(test001.rules[test_file_key][0] == Rule)) assert (hasattr(test001.rules[test_file_key][0], 'path')) for rule in test001.rules: printDebug(test001.rules[rule][0].to_string()) assert (test_file_key in test001.rule_definitions) assert (test001.rule_definitions[test_file_key].description == "Password expiration disabled") for rule_def in test001.rule_definitions: printDebug(str(test001.rule_definitions[rule_def])) assert (printError.call_count == 0) test002 = Ruleset(filename=self.test_ruleset_002) for rule in test002.rules: printDebug(test002.rules[rule][0].to_string()) assert (printError.call_count == 1) # is this expected ?? assert ("test-ruleset-absolute-path.json does not exist." in printError.call_args_list[0][0][0]) test005 = Ruleset(filename=self.test_ruleset_001, ruleset_generator=True)
def run(self, aws_config, skip_dashboard = False): # Clean up existing findings for service in aws_config['services']: aws_config['services'][service][self.ruleset.rule_type] = {} # Process each rule for finding_path in self.rules: for rule in self.rules[finding_path]: if not rule.enabled: # or rule.service not in []: # TODO: handle this... continue printDebug('Processing %s rule[%s]: "%s"' % (rule.service, rule.filename, rule.description)) finding_path = rule.path path = finding_path.split('.') service = path[0] manage_dictionary(aws_config['services'][service], self.ruleset.rule_type, {}) aws_config['services'][service][self.ruleset.rule_type][rule.key] = {} aws_config['services'][service][self.ruleset.rule_type][rule.key]['description'] = rule.description aws_config['services'][service][self.ruleset.rule_type][rule.key]['path'] = rule.path for attr in ['level', 'id_suffix', 'display_path']: if hasattr(rule, attr): aws_config['services'][service][self.ruleset.rule_type][rule.key][attr] = getattr(rule, attr) try: setattr(rule, 'checked_items', 0) aws_config['services'][service][self.ruleset.rule_type][rule.key]['items'] = recurse(aws_config['services'], aws_config['services'], path, [], rule, True) if skip_dashboard: continue aws_config['services'][service][self.ruleset.rule_type][rule.key]['dashboard_name'] = rule.dashboard_name aws_config['services'][service][self.ruleset.rule_type][rule.key]['checked_items'] = rule.checked_items aws_config['services'][service][self.ruleset.rule_type][rule.key]['flagged_items'] = len(aws_config['services'][service][self.ruleset.rule_type][rule.key]['items']) aws_config['services'][service][self.ruleset.rule_type][rule.key]['service'] = rule.service aws_config['services'][service][self.ruleset.rule_type][rule.key]['rationale'] = rule.rationale if hasattr(rule, 'rationale') else 'N/A' except Exception as e: printException(e) printError('Failed to process rule defined in %s' % rule.filename) # Fallback if process rule failed to ensure report creation and data dump still happen aws_config['services'][service][self.ruleset.rule_type][rule.key]['checked_items'] = 0 aws_config['services'][service][self.ruleset.rule_type][rule.key]['flagged_items'] = 0
def __init__(self, environment_name='default', filename=None, name=None, rules_dir=[], rule_type='findings', ip_ranges=[], aws_account_id=None, ruleset_generator=False): self.rules_data_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'data') self.environment_name = environment_name self.rule_type = rule_type # Ruleset filename self.filename = self.find_file(filename) if not self.filename: self.search_ruleset(environment_name) printDebug('Loading ruleset %s' % self.filename) self.name = os.path.basename(self.filename).replace( '.json', '') if not name else name self.load(self.rule_type) self.shared_init(ruleset_generator, rules_dir, aws_account_id, ip_ranges)
def delete_stack_set(api_client, stack_set_name, timeout=60 * 5): """ """ printDebug('Deleting stack set %s' % stack_set_name) # Check for instances stack_instances = handle_truncated_response( api_client.list_stack_instances, {'StackSetName': stack_set_name}, ['Summaries'])['Summaries'] account_ids = [] regions = [] if len(stack_instances) > 0: for si in stack_instances: if si['Account'] not in account_ids: account_ids.append(si['Account']) if si['Region'] not in regions: regions.append(si['Region']) operation_id = api_client.delete_stack_instances( StackSetName=stack_set_name, Accounts=account_ids, Regions=regions, RetainStacks=False)['OperationId'] wait_for_operation(api_client, stack_set_name, operation_id) api_client.delete_stack_set(StackSetName=stack_set_name)
def prepare_cloudformation_params(stack_name, template_path, template_parameters, resource_type, tags=[], need_on_failure=False): """ :param api_client: :param stack_name: :param template_path: :param template_parameters: List of parameter keys and values :param quiet: :return: """ printDebug('Reading CloudFormation template from %s' % template_path) template_body = read_file(template_path) params = {} params['%sName' % resource_type] = stack_name params['TemplateBody'] = template_body if len(template_parameters): params['Parameters'] = [] it = iter(template_parameters) for param in it: printError('Param:: %s' % param) params['Parameters'].append({ 'ParameterKey': param, 'ParameterValue': next(it) }) if len(tags): params['Tags'] = tags if re_iam_capability.match(template_body): params['Capabilities'] = ['CAPABILITY_NAMED_IAM'] if need_on_failure: params['OnFailure'] = 'ROLLBACK' return params
def __init__(self, environment_name='default', filename=None, name=None, services=[], rule_type='findings', rules_dir=None, ip_ranges=[], aws_account_id=None, ruleset_generator=False): self.rules_data_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'data') self.environment_name = environment_name self.rule_type = rule_type # Ruleset filename self.filename = self.find_file(filename) if not self.filename: self.search_ruleset(environment_name) printDebug('Loading ruleset %s' % self.filename) self.name = os.path.basename(self.filename).replace( '.json', '') if not name else name # Load ruleset self.load(self.rule_type) # Load rule definitions self.load_rule_definitions(ruleset_generator) # Prepare the rules params = {} params['aws_account_id'] = aws_account_id if ruleset_generator: self.prepare_rules(attributes=['description', 'key', 'rationale'], params=params) else: self.prepare_rules(ip_ranges=ip_ranges, params=params)
def wait_for_operation(api_client, stack_set_name, operation_id, timeout=5 * 60, increment=5): printDebug('Waiting for operation %s on stack set %s...' % (operation_id, stack_set_name)) timer = 0 status = '' while True: if timer >= timeout: printError('Timed out.') break info = api_client.describe_stack_set_operation( StackSetName=stack_set_name, OperationId=operation_id) status = info['StackSetOperation']['Status'] if status not in ['RUNNING', 'STOPPING']: break printError( 'Operation status is \'%s\'... waiting %d seconds until next check...' % (status, increment)) time.sleep(increment) timer += increment return 'Operation %s is %s' % (operation_id, status)
def sort_vpc_flow_logs_callback(aws_config, current_config, path, current_path, flow_log_id, callback_args): attached_resource = current_config['ResourceId'] if attached_resource.startswith('vpc-'): vpc_path = combine_paths(current_path[0:4], ['vpcs', attached_resource]) try: attached_vpc = get_object_at(aws_config, vpc_path) except Exception as e: printDebug('It appears that the flow log %s is attached to a resource that was previously deleted (%s).' % (flow_log_id, attached_resource)) return manage_dictionary(attached_vpc, 'flow_logs', []) if flow_log_id not in attached_vpc['flow_logs']: attached_vpc['flow_logs'].append(flow_log_id) for subnet_id in attached_vpc['subnets']: manage_dictionary(attached_vpc['subnets'][subnet_id], 'flow_logs', []) if flow_log_id not in attached_vpc['subnets'][subnet_id]['flow_logs']: attached_vpc['subnets'][subnet_id]['flow_logs'].append(flow_log_id) elif attached_resource.startswith('subnet-'): subnet_path = combine_paths(current_path[0:4], ['vpcs', subnet_map[attached_resource]['vpc_id'], 'subnets', attached_resource]) subnet = get_object_at(aws_config, subnet_path) manage_dictionary(subnet, 'flow_logs', []) if flow_log_id not in subnet['flow_logs']: subnet['flow_logs'].append(flow_log_id) else: printError('Resource %s attached to flow logs is not handled' % attached_resource)
def test_list(self): profiles = sorted(set(AWSProfiles.list())) printDebug(str(profiles)) testprofiles = sorted( set([ 'l01cd3v-1', 'l01cd3v-2', 'l01cd3v-role1', 'l01cd3v-role2', 'l01cd3v-role3', 'l01cd3v-role4', 'l01cd3v-3', 'l01cd3v-4', 'testprofile', 'scout2fortravis', 'scout2fortraviswithexternalid' ])) printDebug(str(testprofiles)) assert (testprofiles == profiles) profiles = AWSProfiles.list(names='l01cd3v-role.*') printDebug(str(profiles)) assert (set([ 'l01cd3v-role1', 'l01cd3v-role2', 'l01cd3v-role3', 'l01cd3v-role4' ]) == set(profiles)) profiles = AWSProfiles.list(names='.*1') assert (set(['l01cd3v-1', 'l01cd3v-role1']) == set(profiles))
def main(): # Parse arguments parser = Scout2ArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Set the profile name profile_name = args.profile[0] # Search for AWS credentials if not args.fetch_local: credentials = read_creds(args.profile[0], args.csv_credentials, args.mfa_serial, args.mfa_code) if credentials['AccessKeyId'] is None: return 42 # Create a new Scout2 config report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = Scout2Config(profile_name, args.report_dir, args.timestamp, args.services, args.skipped_services, args.thread_config) if not args.fetch_local: # Fetch data from AWS APIs if not running a local analysis try: aws_config.fetch(credentials, regions=args.regions, partition_name = get_partition_name(credentials)) except KeyboardInterrupt: printInfo('\nCancelled by user') return 130 aws_config = report.jsrw.to_dict(aws_config) # Set the account ID aws_config['aws_account_id'] = get_aws_account_id(credentials) # Update means we reload the whole config and overwrite part of it if args.update == True: new_aws_config = copy.deepcopy(aws_config) aws_config = report.jsrw.load_from_file(AWSCONFIG) for service in new_aws_config['service_list']: # Per service only for now, may add per region & per VPC later... aws_config['services'][service] = new_aws_config['services'][service] # Update the metadata too aws_config['metadata'] = Scout2Config('default', None, None, [], []).metadata else: # Reload to flatten everything into a python dictionary aws_config = report.jsrw.load_from_file(AWSCONFIG) # Pre processing preprocessing(aws_config, args.ip_ranges, args.ip_ranges_name_key) # Analyze config finding_rules = Ruleset(profile_name, filename = args.ruleset, ip_ranges = args.ip_ranges, aws_account_id = aws_config['aws_account_id']) pe = ProcessingEngine(finding_rules) pe.run(aws_config) # Create display filters filter_rules = Ruleset(filename = 'filters.json', rule_type = 'filters', aws_account_id = aws_config['aws_account_id']) pe = ProcessingEngine(filter_rules) pe.run(aws_config) # Handle exceptions try: exceptions = RuleExceptions(profile_name, args.exceptions[0]) exceptions.process(aws_config) exceptions = exceptions.exceptions except Exception as e: printDebug('Warning, failed to load exceptions. The file may not exist or may have an invalid format.') exceptions = {} # Finalize postprocessing(aws_config, report.current_time, finding_rules) # Get organization data if it exists try: profile = AWSProfiles.get(profile_name)[0] if 'source_profile' in profile.attributes: organization_info_file = os.path.join(os.path.expanduser('~/.aws/recipes/%s/organization.json' % profile.attributes['source_profile'])) if os.path.isfile(organization_info_file): with open(organization_info_file, 'rt') as f: org = {} accounts = json.load(f) for account in accounts: account_id = account.pop('Id') org[account_id] = account aws_config['organization'] = org except: pass # Save config and create HTML report html_report_path = report.save(aws_config, exceptions, args.force_write, args.debug) # Open the report by default if not args.no_browser: printInfo('Opening the HTML report...') url = 'file://%s' % os.path.abspath(html_report_path) webbrowser.open(url, new=2) return 0
def main(): # Parse arguments parser = Scout2ArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Set the profile name profile_name = args.profile[0] # Search for AWS credentials if not args.fetch_local: credentials = read_creds(args.profile[0], args.csv_credentials, args.mfa_serial, args.mfa_code) if credentials['AccessKeyId'] is None: return 42 # Create a new Scout2 config report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = Scout2Config(profile_name, args.report_dir, args.timestamp, args.services, args.skipped_services, args.thread_config) if not args.fetch_local: # Fetch data from AWS APIs if not running a local analysis try: aws_config.fetch(credentials, regions=args.regions, partition_name=get_partition_name(credentials)) except KeyboardInterrupt: printInfo('\nCancelled by user') return 130 aws_config = report.jsrw.to_dict(aws_config) # Set the account ID aws_config['aws_account_id'] = get_aws_account_id(credentials) # Update means we reload the whole config and overwrite part of it if args.update == True: new_aws_config = copy.deepcopy(aws_config) aws_config = report.jsrw.load_from_file(AWSCONFIG) for service in new_aws_config['service_list']: # Per service only for now, may add per region & per VPC later... aws_config['services'][service] = new_aws_config['services'][ service] # Update the metadata too aws_config['metadata'] = Scout2Config('default', None, None, [], []).metadata else: # Reload to flatten everything into a python dictionary aws_config = report.jsrw.load_from_file(AWSCONFIG) # Pre processing preprocessing(aws_config, args.ip_ranges, args.ip_ranges_name_key) # Analyze config finding_rules = Ruleset(profile_name, filename=args.ruleset, ip_ranges=args.ip_ranges, aws_account_id=aws_config['aws_account_id']) pe = ProcessingEngine(finding_rules) pe.run(aws_config) # Create display filters filter_rules = Ruleset(filename='filters.json', rule_type='filters', aws_account_id=aws_config['aws_account_id']) pe = ProcessingEngine(filter_rules) pe.run(aws_config) # Handle exceptions try: exceptions = RuleExceptions(profile_name, args.exceptions[0]) exceptions.process(aws_config) exceptions = exceptions.exceptions except Exception as e: printDebug( 'Warning, failed to load exceptions. The file may not exist or may have an invalid format.' ) exceptions = {} # Finalize postprocessing(aws_config, report.current_time, finding_rules) # Get organization data if it exists try: profile = AWSProfiles.get(profile_name)[0] if 'source_profile' in profile.attributes: organization_info_file = os.path.join( os.path.expanduser('~/.aws/recipes/%s/organization.json' % profile.attributes['source_profile'])) if os.path.isfile(organization_info_file): with open(organization_info_file, 'rt') as f: org = {} accounts = json.load(f) for account in accounts: account_id = account.pop('Id') org[account_id] = account aws_config['organization'] = org except: pass if args.json: printInfo('Writing to results.json') fp = open('results.json', 'w') json.dump(aws_config, fp, default=json_helper) fp.close() sys.exit() # Save config and create HTML report html_report_path = report.save(aws_config, exceptions, args.force_write, args.debug) # Open the report by default if not args.no_browser: printInfo('Opening the HTML report...') url = 'file://%s' % os.path.abspath(html_report_path) webbrowser.open(url, new=2)
def main(passed_args=None): """ Main method that runs a scan :return: """ # FIXME check that all requirements are installed # # Check version of opinel # requirements_file_path = '%s/requirements.txt' % os.path.dirname(sys.modules['__main__'].__file__) # if not check_requirements(requirements_file_path): # return 42 # Parse arguments parser = ScoutSuiteArgumentParser() if passed_args: args = parser.parse_args(passed_args) else: args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Create a cloud provider object cloud_provider = get_provider(provider=args.provider, profile=args.profile[0], project_id=args.project_id, folder_id=args.folder_id, organization_id=args.organization_id, report_dir=args.report_dir, timestamp=args.timestamp, services=args.services, skipped_services=args.skipped_services, thread_config=args.thread_config) if cloud_provider.provider_code == 'aws': if args.profile: report_file_name = 'aws-%s' % args.profile[0] else: report_file_name = 'aws' if cloud_provider.provider_code == 'gcp': if args.project_id: report_file_name = 'gcp-%s' % args.project_id elif args.organization_id: report_file_name = 'gcp-%s' % args.organization_id elif args.folder_id: report_file_name = 'gcp-%s' % args.folder_id else: report_file_name = 'gcp' if cloud_provider.provider_code == 'azure': report_file_name = 'azure' # Create a new report report = Scout2Report(args.provider, report_file_name, args.report_dir, args.timestamp) # Complete run, including pulling data from provider if not args.fetch_local: # Authenticate to the cloud provider authenticated = cloud_provider.authenticate(profile=args.profile[0], csv_credentials=args.csv_credentials, mfa_serial=args.mfa_serial, mfa_code=args.mfa_code, key_file=args.key_file, user_account=args.user_account, service_account=args.service_account, azure_cli=args.azure_cli, azure_msi=args.azure_msi, azure_service_principal=args.azure_service_principal, azure_file_auth=args.azure_file_auth, azure_user_credentials=args.azure_user_credentials) if not authenticated: return 42 # Fetch data from provider APIs try: cloud_provider.fetch(regions=args.regions) except KeyboardInterrupt: printInfo('\nCancelled by user') return 130 # Update means we reload the whole config and overwrite part of it if args.update: current_run_services = copy.deepcopy(cloud_provider.services) last_run_dict = report.jsrw.load_from_file(AWSCONFIG) cloud_provider.services = last_run_dict['services'] for service in cloud_provider.service_list: cloud_provider.services[service] = current_run_services[service] # Partial run, using pre-pulled data else: # Reload to flatten everything into a python dictionary last_run_dict = report.jsrw.load_from_file(AWSCONFIG) for key in last_run_dict: setattr(cloud_provider, key, last_run_dict[key]) # Pre processing cloud_provider.preprocessing(args.ip_ranges, args.ip_ranges_name_key) # Analyze config finding_rules = Ruleset(environment_name=args.profile[0], cloud_provider=args.provider, filename=args.ruleset, ip_ranges=args.ip_ranges, aws_account_id=cloud_provider.aws_account_id) processing_engine = ProcessingEngine(finding_rules) processing_engine.run(cloud_provider) # Create display filters filter_rules = Ruleset(cloud_provider=args.provider, filename='filters.json', rule_type='filters', aws_account_id=cloud_provider.aws_account_id) processing_engine = ProcessingEngine(filter_rules) processing_engine.run(cloud_provider) # Handle exceptions try: exceptions = RuleExceptions(args.profile[0], args.exceptions[0]) exceptions.process(cloud_provider) exceptions = exceptions.exceptions except Exception as e: printDebug('Warning, failed to load exceptions. The file may not exist or may have an invalid format.') exceptions = {} # Finalize cloud_provider.postprocessing(report.current_time, finding_rules) # TODO this is AWS-specific - move to postprocessing? # Get organization data if it exists try: profile = AWSProfiles.get(args.profile[0])[0] if 'source_profile' in profile.attributes: organization_info_file = os.path.join(os.path.expanduser('~/.aws/recipes/%s/organization.json' % profile.attributes['source_profile'])) if os.path.isfile(organization_info_file): with open(organization_info_file, 'rt') as f: org = {} accounts = json.load(f) for account in accounts: account_id = account.pop('Id') org[account_id] = account setattr(cloud_provider, 'organization', org) except Exception as e: pass # Save config and create HTML report html_report_path = report.save(cloud_provider, exceptions, args.force_write, args.debug) # Open the report by default if not args.no_browser: printInfo('Opening the HTML report...') url = 'file://%s' % os.path.abspath(html_report_path) webbrowser.open(url, new=2) return 0
def main(args=None): """ Main method that runs a scan :return: """ if not args: parser = ScoutSuiteArgumentParser() args = parser.parse_args() # Get the dictionnary to get None instead of a crash args = args.__dict__ # Configure the debug level configPrintException(args.get('debug')) # Create a cloud provider object cloud_provider = get_provider( provider=args.get('provider'), profile=args.get('profile'), project_id=args.get('project_id'), folder_id=args.get('folder_id'), organization_id=args.get('organization_id'), all_projects=args.get('all_projects'), report_dir=args.get('report_dir'), timestamp=args.get('timestamp'), services=args.get('services'), skipped_services=args.get('skipped_services'), thread_config=args.get('thread_config'), ) report_file_name = generate_report_name(cloud_provider.provider_code, args) # TODO move this to after authentication, so that the report can be more specific to what's being scanned. # For example if scanning with a GCP service account, the SA email can only be known after authenticating... # Create a new report report = Scout2Report(args.get('provider'), report_file_name, args.get('report_dir'), args.get('timestamp')) # Complete run, including pulling data from provider if not args.get('fetch_local'): # Authenticate to the cloud provider authenticated = cloud_provider.authenticate( profile=args.get('profile'), csv_credentials=args.get('csv_credentials'), mfa_serial=args.get('mfa_serial'), mfa_code=args.get('mfa_code'), user_account=args.get('user_account'), service_account=args.get('service_account'), cli=args.get('cli'), msi=args.get('msi'), service_principal=args.get('service_principal'), file_auth=args.get('file_auth'), tenant_id=args.get('tenant_id'), subscription_id=args.get('subscription_id'), client_id=args.get('client_id'), client_secret=args.get('client_secret'), username=args.get('username'), password=args.get('password')) if not authenticated: return 42 # Fetch data from provider APIs try: cloud_provider.fetch(regions=args.get('regions')) except KeyboardInterrupt: printInfo('\nCancelled by user') return 130 # Update means we reload the whole config and overwrite part of it if args.get('update'): current_run_services = copy.deepcopy(cloud_provider.services) last_run_dict = report.jsrw.load_from_file(AWSCONFIG) cloud_provider.services = last_run_dict['services'] for service in cloud_provider.service_list: cloud_provider.services[service] = current_run_services[ service] # Partial run, using pre-pulled data else: # Reload to flatten everything into a python dictionary last_run_dict = report.jsrw.load_from_file(AWSCONFIG) for key in last_run_dict: setattr(cloud_provider, key, last_run_dict[key]) # Pre processing cloud_provider.preprocessing(args.get('ip_ranges'), args.get('ip_ranges_name_key')) # Analyze config finding_rules = Ruleset(environment_name=args.get('profile'), cloud_provider=args.get('provider'), filename=args.get('ruleset'), ip_ranges=args.get('ip_ranges'), aws_account_id=cloud_provider.aws_account_id) processing_engine = ProcessingEngine(finding_rules) processing_engine.run(cloud_provider) # Create display filters filter_rules = Ruleset(cloud_provider=args.get('provider'), filename='filters.json', rule_type='filters', aws_account_id=cloud_provider.aws_account_id) processing_engine = ProcessingEngine(filter_rules) processing_engine.run(cloud_provider) # Handle exceptions try: exceptions = RuleExceptions(args.get('profile'), args.get('exceptions')[0]) exceptions.process(cloud_provider) exceptions = exceptions.exceptions except Exception as e: printDebug( 'Warning, failed to load exceptions. The file may not exist or may have an invalid format.' ) exceptions = {} # Finalize cloud_provider.postprocessing(report.current_time, finding_rules) # TODO this is AWS-specific - move to postprocessing? # Get organization data if it exists try: profile = AWSProfiles.get(args.get('profile'))[0] if 'source_profile' in profile.attributes: organization_info_file = os.path.join( os.path.expanduser('~/.aws/recipes/%s/organization.json' % profile.attributes['source_profile'])) if os.path.isfile(organization_info_file): with open(organization_info_file, 'rt') as f: org = {} accounts = json.load(f) for account in accounts: account_id = account.pop('Id') org[account_id] = account setattr(cloud_provider, 'organization', org) except Exception as e: pass # Save config and create HTML report html_report_path = report.save(cloud_provider, exceptions, args.get('force_write'), args.get('debug')) # Open the report by default if not args.get('no_browser'): printInfo('Opening the HTML report...') url = 'file://%s' % os.path.abspath(html_report_path) webbrowser.open(url, new=2) return 0