def resource_older_than_template(resource_type, resource, template_filename): resource_modification_time = 0 # Check if there is a need for an update if resource_type == 'stack': resource_modification_time = resource[ 'LastUpdatedTime'] if 'LastUpdatedTime' in resource else resource[ 'CreationTime'] print('Resource time: %s' % resource_modification_time) else: printInfo(json.dumps(resource, indent=4)) for tag in resource['Tags']: if tag['Key'] == 'LastUpdatedTime': resource_modification_time = datetime.datetime.utcfromtimestamp( float(tag['Value'])).replace(tzinfo=pytz.utc) break print('Resource time: %s' % resource_modification_time) template_modification_time = get_template_modification_time( template_filename) print('Template time: %s' % template_modification_time) if resource_modification_time == 0 or resource_modification_time < template_modification_time: print( 'The template for %s was modified after the %s was last updated.' % (resource['%sName' % snake_to_camel(resource_type)], snake_to_words(resource_type))) return True else: return False
def get_value_at(all_info, current_path, key, to_string=False): """ Get value located at a given path :param all_info: :param current_path: :param key: :param to_string: :return: """ keys = key.split('.') if keys[-1] == 'id': target_obj = current_path[len(keys)-1] else: if key == 'this': target_path = current_path elif '.' in key: target_path = [] for i, key in enumerate(keys): if key == 'id': target_path.append(current_path[i]) else: target_path.append(key) if len(keys) > len(current_path): target_path = target_path + keys[len(target_path):] else: target_path = copy.deepcopy(current_path) target_path.append(key) target_obj = all_info for p in target_path: try: if type(target_obj) == list and type(target_obj[0]) == dict: target_obj = target_obj[int(p)] elif type(target_obj) == list: target_obj = p elif p == '': target_obj = target_obj else: try: target_obj = target_obj[p] except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception if to_string: return str(target_obj) else: return target_obj
def html_generator(self, output_dir, metadata, force_write, debug): """ :param output_dir: :param metadata: :param force_write: :param debug: :return: """ # Prepare the output directories prepare_html_output_dir(output_dir) # Create the JS include file printInfo('Preparing the HTML ruleset generator...') js_ruleset = {} js_ruleset['name'] = self.name js_ruleset['available_rules'] = self.available_rules js_ruleset['services'] = list(sorted(set(self.services))) js_ruleset['ruleset_generator_metadata'] = metadata save_config_to_file(self.environment_name, js_ruleset, 'ruleset', output_dir, force_write, debug) # Create the HTML generator html_generator = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../rules/data/ruleset-generator.html') dst_html_generator = os.path.join(output_dir, 'ruleset-generator.html') shutil.copyfile(html_generator, dst_html_generator) return dst_html_generator
def match_instances_and_roles(aws_config): """ Foobar :param aws_config: """ printInfo('Matching EC2 instances and IAM roles...') ec2_config = aws_config['services']['ec2'] iam_config = aws_config['services']['iam'] role_instances = {} for r in ec2_config['regions']: for v in ec2_config['regions'][r]['vpcs']: if 'instances' in ec2_config['regions'][r]['vpcs'][v]: for i in ec2_config['regions'][r]['vpcs'][v]['instances']: instance_profile = ec2_config['regions'][r]['vpcs'][v]['instances'][i]['IamInstanceProfile'] instance_profile_id = instance_profile['Id'] if instance_profile else None if instance_profile_id: manage_dictionary(role_instances, instance_profile_id, []) role_instances[instance_profile_id].append(i) for role_id in iam_config['roles']: iam_config['roles'][role_id]['instances_count'] = 0 for instance_profile_id in iam_config['roles'][role_id]['instance_profiles']: if instance_profile_id in role_instances: iam_config['roles'][role_id]['instance_profiles'][instance_profile_id]['instances'] = role_instances[instance_profile_id] iam_config['roles'][role_id]['instances_count'] += len(role_instances[instance_profile_id])
def _process_cloudtrail_trails(self, cloudtrail_config): printInfo('Processing CloudTrail config...') global_events_logging = [] data_logging_trails_count = 0 for region in cloudtrail_config['regions']: for trail_id in cloudtrail_config['regions'][region]['trails']: trail = cloudtrail_config['regions'][region]['trails'][ trail_id] if 'HomeRegion' in trail and trail['HomeRegion'] != region: # Part of a multi-region trail, skip until we find the whole object continue if trail['IncludeGlobalServiceEvents'] == True and trail[ 'IsLogging'] == True: global_events_logging.append(( region, trail_id, )) # Any wildcard logging? if trail.get('wildcard_data_logging', False): data_logging_trails_count += 1 cloudtrail_config[ 'data_logging_trails_count'] = data_logging_trails_count cloudtrail_config['IncludeGlobalServiceEvents'] = False if ( len(global_events_logging) == 0) else True cloudtrail_config['DuplicatedGlobalServiceEvents'] = True if ( len(global_events_logging) > 1) else False
def cloudformation_wait(api_client, resource_type, resource_name, operation_id=None, timeout=5 * 60, increment=5): if resource_type == 'stack': callback = api_client.describe_stacks params = {'StackName': resource_name} elif resource_type == 'stack_set': params = {'StackSetName': resource_name} if operation_id: callback = api_client.describe_stack_set_operation params['OperationId'] = operation_id resource_type = 'operation' else: callback = api_client.describe_stack_set else: printError('Unknown resource type: %s' % resource_type) return timer = 0 while True: if timer >= timeout: printError('Timed out.') break rc, status = still_running(callback, params, resource_type) if rc == False: printInfo('Status: %s' % status) break printInfo('Status: %s... waiting %d seconds until next check...' % (status, increment)) timer += increment time.sleep(increment)
def create_html_report(self, force_write): contents = '' # Use all scripts under html/partials/ contents += self.get_content_from('partials') contents += self.get_content_from('partials/%s' % self.provider) # Use all scripts under html/summaries/ contents += self.get_content_from('summaries') contents += self.get_content_from('summaries/%s' % self.provider) new_file, first_line = get_filename(HTMLREPORT, self.profile, self.report_dir) printInfo('Creating %s ...' % new_file) if prompt_4_overwrite(new_file, force_write): if os.path.exists(new_file): os.remove(new_file) with open(os.path.join(self.html_data_path, self.html_root)) as f: with open(new_file, 'wt') as nf: for line in f: newline = line if self.profile != 'default': newline = newline.replace( AWSCONFIG_FILE, AWSCONFIG_FILE.replace('.js', '-%s.js' % self.profile)) newline = newline.replace( EXCEPTIONS_FILE, EXCEPTIONS_FILE.replace( '.js', '-%s.js' % self.profile)) newline = newline.replace('<!-- PLACEHOLDER -->', contents) nf.write(newline) return new_file
def link_elastic_ips_callback2(ec2_config, current_config, path, current_path, instance_id, callback_args): if instance_id == callback_args['instance_id']: if not 'PublicIpAddress' in current_config: current_config['PublicIpAddress'] = callback_args['elastic_ip'] elif current_config['PublicIpAddress'] != callback_args['elastic_ip']: printInfo('Warning: public IP address exists (%s) for an instance associated with an elastic IP (%s)' % ( current_config['PublicIpAddress'], callback_args['elastic_ip']))
def create_or_update_stack(api_client, stack_name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion=False): """ :param api_client: :param stack_name: :param template_path: :param template_parameters: List of parameter keys and values :param quiet: :return: """ try: stack = api_client.describe_stacks(StackName=stack_name) printInfo('Stack already exists... ', newLine=False) stack_id = update_stack(api_client, stack_name, template_path, template_parameters, quiet, wait_for_completion) except Exception as e: if hasattr(e, 'response') and type( e.response) == dict and 'Error' in e.response and e.response[ 'Error']['Code'] == 'ValidationError': stack_id = create_stack(api_client, stack_name, template_path, template_parameters, tags, quiet, wait_for_completion) else: stack_id = None printException(e) return stack_id
def get_stackset_ready_accounts(credentials, account_ids, quiet=True): """ Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role :param credentials: AWS credentials to use when calling sts:assumerole :param org_account_ids: List of AWS accounts to check for Stackset configuration :return: List of account IDs in which assuming the stackset execution role worked """ api_client = connect_service('sts', credentials, silent=True) configured_account_ids = [] for account_id in account_ids: try: role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id api_client.assume_role( RoleArn=role_arn, RoleSessionName='opinel-get_stackset_ready_accounts') configured_account_ids.append(account_id) except Exception as e: pass if len(configured_account_ids) != len(account_ids) and not quiet: printInfo( 'Only %d of these accounts have the necessary stack set execution role:' % len(configured_account_ids)) printDebug(str(configured_account_ids)) return configured_account_ids
def main(): # Parse arguments parser = RulesArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Load ruleset ruleset = Ruleset(filename=args.base_ruleset, name=args.ruleset_name, load_rules=False, rules_dir=args.rules_dir) # Generate the HTML generator ruleset_generator = RulesetGenerator(args.ruleset_name, args.generator_dir) ruleset.ruleset_generator_metadata = Scout2Config('default', None, None, [], []).metadata ruleset_generator_path = ruleset_generator.save(ruleset, args.force_write, args.debug) # Open the HTML ruleset generator in a browser printInfo('Starting the HTML ruleset generator...') url = 'file://%s' % os.path.abspath(ruleset_generator_path) webbrowser.open(url, new=2)
def create_stack_instances(api_client, stack_set_name, account_ids, regions, quiet=False): """ :param api_client: :param stack_set_name: :param account_ids: :param regions: :return: """ operation_preferences = { 'FailureTolerancePercentage': 100, 'MaxConcurrentPercentage': 100 } if not quiet: printInfo('Creating stack instances in %d regions and %d accounts...' % (len(regions), len(account_ids))) printDebug(' %s' % ', '.join(regions)) response = api_client.create_stack_instances( StackSetName=stack_set_name, Accounts=account_ids, Regions=regions, OperationPreferences=operation_preferences) if not quiet: printInfo('Successfully started operation Id %s' % response['OperationId']) return response['OperationId']
def parse_buckets(self, bucket, params): """ Parse a single S3 bucket TODO """ bucket['name'] = bucket.pop('Name') api_client = params['api_clients']['us-east-1'] bucket['CreationDate'] = str(bucket['CreationDate']) bucket['region'] = get_s3_bucket_location(api_client, bucket['name']) # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland... if bucket['region'] == 'EU': bucket['region'] = 'eu-west-1' # h4ck :: S3 is global but region-aware... if bucket['region'] not in params['api_clients']: printInfo('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region'])) self.buckets_count -= 1 return api_client = params['api_clients'][bucket['region']] get_s3_bucket_logging(api_client, bucket['name'], bucket) get_s3_bucket_versioning(api_client, bucket['name'], bucket) get_s3_bucket_webhosting(api_client, bucket['name'], bucket) bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket) # TODO: # CORS # Lifecycle # Notification ? # Get bucket's policy get_s3_bucket_policy(api_client, bucket['name'], bucket) # If requested, get key properties #if params['check_encryption'] or params['check_acls']: # get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'], # params['check_acls']) bucket['id'] = self.get_non_aws_id(bucket['name']) self.buckets[bucket['id']] = bucket
def get_value_at(all_info, current_path, key, to_string=False): """ Get value located at a given path :param all_info: :param current_path: :param key: :param to_string: :return: """ keys = key.split('.') if keys[-1] == 'id': target_obj = current_path[len(keys) - 1] else: if key == 'this': target_path = current_path elif '.' in key: target_path = [] for i, key in enumerate(keys): if key == 'id': target_path.append(current_path[i]) else: target_path.append(key) if len(keys) > len(current_path): target_path = target_path + keys[len(target_path):] else: target_path = copy.deepcopy(current_path) target_path.append(key) target_obj = all_info for p in target_path: try: if type(target_obj) == list and type(target_obj[0]) == dict: target_obj = target_obj[int(p)] elif type(target_obj) == list: target_obj = p elif p == '': target_obj = target_obj else: try: target_obj = target_obj[p] except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception if to_string: return str(target_obj) else: return target_obj
def fetch_all(self, credentials, regions=[], partition_name='aws', targets=None): """ Fetch all the SNS configuration supported by Scout2 :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ global status, formatted_string # Initialize targets if not targets: targets = type(self).targets printInfo('Fetching %s config...' % format_service_name(self.service)) formatted_string = None api_service = self.service.lower() # Connect to the service if self.service in ['s3' ]: # S3 namespace is global but APIs aren't.... api_clients = {} for region in build_region_list(self.service, regions, partition_name): api_clients[region] = connect_service('s3', credentials, region) api_client = api_clients[list(api_clients.keys())[0]] elif self.service == 'route53domains': api_client = connect_service( self.service, credentials, 'us-east-1') # TODO: use partition's default region else: api_client = connect_service(self.service, credentials) # Threading to fetch & parse resources (queue consumer) params = {'api_client': api_client} if self.service in ['s3']: params['api_clients'] = api_clients q = self._init_threading(self.__fetch_target, params, 20) # Threading to list resources (queue feeder) params = {'api_client': api_client, 'q': q} if self.service in ['s3']: params['api_clients'] = api_clients qt = self._init_threading(self.__fetch_service, params, 10) # Init display self.fetchstatuslogger = FetchStatusLogger(targets) # Go for target in targets: qt.put(target) # Join qt.join() q.join() # Show completion and force newline if self.service != 'iam': self.fetchstatuslogger.show(True)
def get_organization_accounts(api_client, quiet=True): # List all accounts in the organization org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] if not quiet: printInfo('Found %d accounts in the organization.' % len(org_accounts)) printDebug(str(org_accounts)) return org_accounts
def enable_mfa(iam_client, user, qrcode_file=None): """ Create and activate an MFA virtual device :param iam_client: :param user: :param qrcode_file: :return: """ mfa_serial = '' tmp_qrcode_file = None try: printInfo('Enabling MFA for user \'%s\'...' % user) mfa_device = iam_client.create_virtual_mfa_device( VirtualMFADeviceName=user)['VirtualMFADevice'] mfa_serial = mfa_device['SerialNumber'] mfa_png = mfa_device['QRCodePNG'] mfa_seed = mfa_device['Base32StringSeed'] tmp_qrcode_file = display_qr_code(mfa_png, mfa_seed) if qrcode_file != None: with open(qrcode_file, 'wt') as f: f.write(mfa_png) while True: mfa_code1 = prompt_4_mfa_code() mfa_code2 = prompt_4_mfa_code(activate=True) if mfa_code1 == 'q' or mfa_code2 == 'q': try: delete_virtual_mfa_device(iam_client, mfa_serial) except Exception as e: printException(e) pass raise Exception try: iam_client.enable_mfa_device(UserName=user, SerialNumber=mfa_serial, AuthenticationCode1=mfa_code1, AuthenticationCode2=mfa_code2) printInfo( 'Succesfully enabled MFA for for \'%s\'. The device\'s ARN is \'%s\'.' % (user, mfa_serial)) break except Exception as e: printException(e) pass except Exception as e: printException(e) # We shouldn't return normally because if we've gotten here # the user has potentially not set up the MFA device # correctly, so we don't want to e.g. write the .no-mfa # credentials file or anything. raise finally: if tmp_qrcode_file is not None: # This is a tempfile.NamedTemporaryFile, so simply closing # it will also unlink it. tmp_qrcode_file.close() return mfa_serial
def fetch_all(self, credentials, regions=[], partition_name='aws', targets=None): """ Fetch all the SNS configuration supported by Scout2 :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets if not targets: try: targets = type( self).targets # TODO: remove this case eventually except: targets = self.targets # Tweak params realtargets = () for i, target in enumerate(targets): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ( (target[0], target[1], target[2], params, target[4]), ) targets = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets, True) api_service = 'ec2' if self.service.lower( ) == 'vpc' else self.service.lower() # Init regions regions = build_region_list( api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, 20) # Threading to list resources (queue feeder) qr = self._init_threading( self._fetch_region, { 'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': targets }, 10) # Go for region in regions: qr.put(region) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
def show_profiles_from_aws_credentials_file(credentials_files = [aws_credentials_file, aws_config_file]): """ Show profile names from ~/.aws/credentials :param credentials_files: :return: """ profiles = get_profiles_from_aws_credentials_file(credentials_files) for profile in set(profiles): printInfo(' * %s' % profile)
def postprocessing(aws_config): for service in aws_config['services']: method_name = '%s_postprocessing' % service if method_name in globals(): try: printInfo('Post-processing %s config...' % format_service_name(service)) method = globals()[method_name] method(aws_config) except Exception as e: printException(e) pass
def show_access_keys(iam_client, user_name): """ :param iam_client: :param user_name: :return: """ keys = get_access_keys(iam_client, user_name) printInfo('User \'%s\' currently has %s access keys:' % (user_name, len(keys))) for key in keys: printInfo('\t%s (%s)' % (key['AccessKeyId'], key['Status']))
def go_to_and_do(aws_config, current_config, path, current_path, callback, callback_args = None): """ Recursively go to a target and execute a callback :param aws_config: A :param current_config: :param path: :param current_path: :param callback: :param callback_args: :return: """ try: key = path.pop(0) if not current_config: current_config = aws_config if not current_path: current_path = [] keys = key.split('.') if len(keys) > 1: while True: key = keys.pop(0) if not len(keys): break current_path.append(key) current_config = current_config[key] if key in current_config: current_path.append(key) for (i, value) in enumerate(list(current_config[key])): if len(path) == 0: if type(current_config[key] == dict) and type(value) != dict and type(value) != list: callback(aws_config, current_config[key][value], path, current_path, value, callback_args) else: callback(aws_config, current_config, path, current_path, value, callback_args) else: tmp = copy.deepcopy(current_path) try: tmp.append(value) go_to_and_do(aws_config, current_config[key][value], copy.deepcopy(path), tmp, callback, callback_args) except: tmp.pop() tmp.append(i) go_to_and_do(aws_config, current_config[key][i], copy.deepcopy(path), tmp, callback, callback_args) except Exception as e: printException(e) if i: printInfo('Index: %s' % str(i)) printInfo('Path: %s' % str(current_path)) printInfo('Key = %s' % str(key)) printInfo('Value = %s' % str(value)) printInfo('Path = %s' % path)
def go_to_and_do(aws_config, current_config, path, current_path, callback, callback_args = None): """ Recursively go to a target and execute a callback :param aws_config: :param current_config: :param path: :param current_path: :param callback: :param callback_args: :return: """ try: key = path.pop(0) if not current_config: current_config = aws_config if not current_path: current_path = [] keys = key.split('.') if len(keys) > 1: while True: key = keys.pop(0) if not len(keys): break current_path.append(key) current_config = current_config[key] if key in current_config: current_path.append(key) for (i, value) in enumerate(list(current_config[key])): if len(path) == 0: if type(current_config[key] == dict) and type(value) != dict and type(value) != list: callback(aws_config, current_config[key][value], path, current_path, value, callback_args) else: callback(aws_config, current_config, path, current_path, value, callback_args) else: tmp = copy.deepcopy(current_path) try: tmp.append(value) go_to_and_do(aws_config, current_config[key][value], copy.deepcopy(path), tmp, callback, callback_args) except: tmp.pop() tmp.append(i) go_to_and_do(aws_config, current_config[key][i], copy.deepcopy(path), tmp, callback, callback_args) except Exception as e: printException(e) if i: printInfo('Index: %s' % str(i)) printInfo('Path: %s' % str(current_path)) printInfo('Key = %s' % str(key)) printInfo('Value = %s' % str(value)) printInfo('Path = %s' % path)
def process_cloudtrail_trails(cloudtrail_config): printInfo('Processing CloudTrail config...') global_events_logging = [] for region in cloudtrail_config['regions']: for trail_id in cloudtrail_config['regions'][region]['trails']: trail = cloudtrail_config['regions'][region]['trails'][trail_id] if 'HomeRegion' in trail and trail['HomeRegion'] != region: # Part of a multi-region trail, skip until we find the whole object continue if trail['IncludeGlobalServiceEvents'] == True and trail['IsLogging'] == True: global_events_logging.append((region, trail_id,)) cloudtrail_config['IncludeGlobalServiceEvents'] = False if (len(global_events_logging) == 0) else True cloudtrail_config['DuplicatedGlobalServiceEvents'] = True if (len(global_events_logging) > 1) else False
def add_user_to_group(iam_client, user, group, quiet=False): """ Add an IAM user to an IAM group :param iam_client: :param group: :param user: :param user_info: :param dry_run: :return: """ if not quiet: printInfo('Adding user to group %s...' % group) iam_client.add_user_to_group(GroupName=group, UserName=user)
def _go_to_and_do(self, current_config, path, current_path, callback, callback_args=None): """ Recursively go to a target and execute a callback """ try: key = path.pop(0) if not current_config: current_config = self.config if not current_path: current_path = [] keys = key.split('.') if len(keys) > 1: while True: key = keys.pop(0) if not len(keys): break current_path.append(key) current_config = current_config[key] # if hasattr(current_config, key): if key in current_config: current_path.append(key) # current_config_key = getattr(current_config, key) current_config_key = current_config[key] for (i, value) in enumerate(list(current_config_key)): if len(path) == 0: if type(current_config_key == dict) and type( value) != dict and type(value) != list: callback(current_config_key[value], path, current_path, value, callback_args) else: callback(current_config, path, current_path, value, callback_args) else: tmp = copy.deepcopy(current_path) try: tmp.append(value) self._go_to_and_do(current_config_key[value], copy.deepcopy(path), tmp, callback, callback_args) except: tmp.pop() tmp.append(i) self._go_to_and_do(current_config_key[i], copy.deepcopy(path), tmp, callback, callback_args) except Exception as e: printException(e) printInfo('Path: %s' % str(current_path)) printInfo('Key = %s' % str(key) if 'key' in locals() else 'not defined') printInfo('Value = %s' % str(value) if 'value' in locals() else 'not defined') printInfo('Path = %s' % path)
def main(): # Parse arguments parser = OpinelArgumentParser(os.path.basename(__file__)) parser.add_argument('debug') parser.add_argument('profile') parser.add_argument('common-groups', default=[], nargs='+', help='List of groups each IAM user should belong to.') parser.add_argument( 'category-groups', default=[], nargs='+', help='List of category groups; each IAM user must belong to one.') parser.add_argument( 'category-regex', default=[], nargs='+', help='List of regex enabling auto-assigment of category groups.') args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Get profile name profile_name = args.profile[0] # Search for AWS credentials credentials = read_creds(profile_name) if not credentials['AccessKeyId']: return 42 # Connect to IAM iam_client = connect_service('iam', credentials) if not iam_client: return 42 # Create groups for group in args.category_groups + args.common_groups: try: printInfo('Creating group %s...' % group) iam_client.create_group(GroupName=group) except Exception as e: if e.response['Error']['Code'] != 'EntityAlreadyExists': printException(e)
def delete_virtual_mfa_device(iam_client, mfa_serial): """ Delete a vritual MFA device given its serial number :param iam_client: :param mfa_serial: :return: """ try: printInfo('Deleting MFA device %s...' % mfa_serial) iam_client.delete_virtual_mfa_device(SerialNumber=mfa_serial) except Exception as e: printException(e) printError('Failed to delete MFA device %s' % mfa_serial) pass
def get_organization_accounts(api_client, exceptions=[], quiet=True): # List all accounts in the organization org_accounts = handle_truncated_response(api_client.list_accounts, {}, ['Accounts'])['Accounts'] if not quiet: printInfo('Found %d accounts in the organization.' % len(org_accounts)) for account in org_accounts: printDebug(str(account)) if len(exceptions): filtered_accounts = [] for account in org_accounts: if account['Id'] not in exceptions: filtered_accounts.append(account) org_accounts = filtered_accounts return org_accounts
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets # Tweak params realtargets = () if not targets: targets = self.targets for i, target in enumerate(targets['first_region']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['first_region'] = realtargets realtargets = () for i, target in enumerate(targets['other_regions']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['other_regions'] = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True) api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower() # Init regions regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, self.thread_config['parse']) # Threading to list resources (queue feeder) qr = self._init_threading(self._fetch_region, {'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()}, self.thread_config['list']) # Go for i, region in enumerate(regions): qr.put((region, targets['first_region'] if i == 0 else targets['other_regions'])) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
def download_object(q, params): bucket_name = params['Bucket'] s3_client = params['S3Client'] while True: key, tries = q.get() filename = os.path.join(download_folder, key.split('/')[-1]) dst = re.sub(r'\.(\w*)?$', '', filename) if (not os.path.exists(filename) and not os.path.exists(dst)) or (os.path.exists(filename) and os.path.getsize(filename) == 0) or (os.path.exists(dst) and os.path.getsize(dst) == 0): try: s3_client.download_file(bucket_name, key, filename) except Exception as e: if tries < 2: q.put([key, tries + 1]) printInfo('Error downloading %s; re-queued.' % filename) else: printException(e) printInfo('Error downloading %s; discarded.' % filename) q.task_done()
def process_cloudtrail_trails(cloudtrail_config): printInfo('Processing CloudTrail config...') global_events_logging = [] data_logging_trails_count = 0 for region in cloudtrail_config['regions']: for trail_id in cloudtrail_config['regions'][region]['trails']: trail = cloudtrail_config['regions'][region]['trails'][trail_id] if 'HomeRegion' in trail and trail['HomeRegion'] != region: # Part of a multi-region trail, skip until we find the whole object continue if trail['IncludeGlobalServiceEvents'] == True and trail['IsLogging'] == True: global_events_logging.append((region, trail_id,)) # Any wildcard logging? if trail.get('wildcard_data_logging', False): data_logging_trails_count += 1 cloudtrail_config['data_logging_trails_count'] = data_logging_trails_count cloudtrail_config['IncludeGlobalServiceEvents'] = False if (len(global_events_logging) == 0) else True cloudtrail_config['DuplicatedGlobalServiceEvents'] = True if (len(global_events_logging) > 1) else False
def parse_buckets(self, bucket, params): """ Parse a single S3 bucket TODO: - CORS - Lifecycle - Notification ? - Get bucket's policy :param bucket: :param params: :return: """ bucket['name'] = bucket.pop('Name') api_client = params['api_clients'][get_s3_list_region(list(params['api_clients'].keys())[0])] bucket['CreationDate'] = str(bucket['CreationDate']) bucket['region'] = get_s3_bucket_location(api_client, bucket['name']) # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland... if bucket['region'] == 'EU': bucket['region'] = 'eu-west-1' # h4ck :: S3 is global but region-aware... if bucket['region'] not in params['api_clients']: printInfo('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region'])) self.buckets_count -= 1 return api_client = params['api_clients'][bucket['region']] get_s3_bucket_logging(api_client, bucket['name'], bucket) get_s3_bucket_versioning(api_client, bucket['name'], bucket) get_s3_bucket_webhosting(api_client, bucket['name'], bucket) get_s3_bucket_default_encryption(api_client, bucket['name'], bucket) bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket) get_s3_bucket_policy(api_client, bucket['name'], bucket) get_s3_bucket_secure_transport(api_client, bucket['name'], bucket) # If requested, get key properties #if params['check_encryption'] or params['check_acls']: # get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'], # params['check_acls']) bucket['id'] = self.get_non_aws_id(bucket['name']) self.buckets[bucket['id']] = bucket
def update_cloudformation_resource_from_template(api_client, resource_type, name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion=False): """ :param callback: :param name: :param template_path: :param template_parameters: :param quiet: :return: """ try: update = getattr(api_client, 'update_%s' % resource_type) api_resource_type = snake_to_camel(resource_type) # Add a timestamps tags.append({'Key': 'OpinelTimestamp', 'Value': str(time.time())}) params = prepare_cloudformation_params(name, template_path, template_parameters, api_resource_type, tags) if not quiet: printInfo('Updating the %s...' % resource_type, newLine=False) response = update(**params) operation_id = response[ 'OperationId'] if resource_type == 'stack_set' else None if wait_for_completion: cloudformation_wait(api_client, resource_type, name, operation_id) except Exception as e: if api_resource_type == 'Stack' and hasattr(e, 'response') and type( e.response == dict ) and e.response['Error']['Code'] == 'ValidationError' and e.response[ 'Error']['Message'] == 'No updates are to be performed.': printInfo(' Already up to date.') else: printException(e) printError(' Failed.')
def __open_file(self, config_filename, force_write, quiet=False): """ :param config_filename: :param force_write: :param quiet: :return: """ if not quiet: printInfo('Saving config...') if prompt_4_overwrite(config_filename, force_write): try: config_dirname = os.path.dirname(config_filename) if not os.path.isdir(config_dirname): os.makedirs(config_dirname) return open(config_filename, 'wt') except Exception as e: printException(e) else: return None
def analyze_ec2_config(ec2_info, aws_account_id, force_write): try: printInfo('Analyzing EC2 config... ', newLine = False) # Tweaks link_elastic_ips(ec2_info) add_security_group_name_to_ec2_grants(ec2_info, aws_account_id) # Custom EC2 analysis # check_for_elastic_ip(ec2_info) list_network_attack_surface(ec2_info, 'attack_surface', 'PublicIpAddress') # TODO: make this optional, commented out for now # list_network_attack_surface(ec2_info, 'private_attack_surface', 'PrivateIpAddress') printInfo('Success') except Exception as e: printInfo('Error') printException(e)
def main(): # Parse arguments parser = Scout2ArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Set the profile name profile_name = args.profile[0] # Search for AWS credentials if not args.fetch_local: credentials = read_creds(args.profile[0], args.csv_credentials, args.mfa_serial, args.mfa_code) if credentials['AccessKeyId'] is None: return 42 # Create a new Scout2 config report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = Scout2Config(profile_name, args.report_dir, args.timestamp, args.services, args.skipped_services, args.thread_config) if not args.fetch_local: # Fetch data from AWS APIs if not running a local analysis try: aws_config.fetch(credentials, regions=args.regions, partition_name = get_partition_name(credentials)) except KeyboardInterrupt: printInfo('\nCancelled by user') return 130 aws_config = report.jsrw.to_dict(aws_config) # Set the account ID aws_config['aws_account_id'] = get_aws_account_id(credentials) # Update means we reload the whole config and overwrite part of it if args.update == True: new_aws_config = copy.deepcopy(aws_config) aws_config = report.jsrw.load_from_file(AWSCONFIG) for service in new_aws_config['service_list']: # Per service only for now, may add per region & per VPC later... aws_config['services'][service] = new_aws_config['services'][service] # Update the metadata too aws_config['metadata'] = Scout2Config('default', None, None, [], []).metadata else: # Reload to flatten everything into a python dictionary aws_config = report.jsrw.load_from_file(AWSCONFIG) # Pre processing preprocessing(aws_config, args.ip_ranges, args.ip_ranges_name_key) # Analyze config finding_rules = Ruleset(profile_name, filename = args.ruleset, ip_ranges = args.ip_ranges, aws_account_id = aws_config['aws_account_id']) pe = ProcessingEngine(finding_rules) pe.run(aws_config) # Create display filters filter_rules = Ruleset(filename = 'filters.json', rule_type = 'filters', aws_account_id = aws_config['aws_account_id']) pe = ProcessingEngine(filter_rules) pe.run(aws_config) # Handle exceptions try: exceptions = RuleExceptions(profile_name, args.exceptions[0]) exceptions.process(aws_config) exceptions = exceptions.exceptions except Exception as e: printDebug('Warning, failed to load exceptions. The file may not exist or may have an invalid format.') exceptions = {} # Finalize postprocessing(aws_config, report.current_time, finding_rules) # Get organization data if it exists try: profile = AWSProfiles.get(profile_name)[0] if 'source_profile' in profile.attributes: organization_info_file = os.path.join(os.path.expanduser('~/.aws/recipes/%s/organization.json' % profile.attributes['source_profile'])) if os.path.isfile(organization_info_file): with open(organization_info_file, 'rt') as f: org = {} accounts = json.load(f) for account in accounts: account_id = account.pop('Id') org[account_id] = account aws_config['organization'] = org except: pass # Save config and create HTML report html_report_path = report.save(aws_config, exceptions, args.force_write, args.debug) # Open the report by default if not args.no_browser: printInfo('Opening the HTML report...') url = 'file://%s' % os.path.abspath(html_report_path) webbrowser.open(url, new=2) return 0
def main(): # Parse arguments parser = ListallArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Support multiple environments for profile_name in args.profile: # Load the config try: report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = report.jsrw.load_from_file(AWSCONFIG) services = aws_config['service_list'] except Exception as e: printException(e) printError('Error, failed to load the configuration for profile %s' % profile_name) continue # Create a ruleset with only whatever rules were specified... if args.config: rule_filename = args.config ruleset = TmpRuleset(rule_dirs = [os.getcwd()], rule_filename = args.config, rule_args = args.config_args) elif len(args.path) > 0: # Create a local tmp rule rule_dict = {'description': 'artifact'} rule_dict['path'] = args.path[0] rule_dict['conditions'] = [] rule_filename = 'listall-artifact.json' with open(os.path.join(os.getcwd(), rule_filename), 'wt') as f: f.write(json.dumps(rule_dict)) ruleset = TmpRuleset(rule_dirs = [os.getcwd()], rule_filename = rule_filename, rule_args = []) else: printError('Error, you must provide either a rule configuration file or the path to the resources targeted.') continue # Process the rule pe = ProcessingEngine(ruleset) pe.run(aws_config, skip_dashboard = True) # Retrieve items rule = ruleset.rules[rule_filename][0] rule_service = rule.service.lower() rule_key = rule.key rule_type = rule.rule_type resources = aws_config['services'][rule_service][rule_type][rule_key]['items'] # Set the keys to output if len(args.keys): # 1. Explicitly provided on the CLI rule.keys = args.keys elif len(args.keys_file): # 2. Explicitly provided files that contain the list of keys rule.keys = [] for filename in args.keys_file: with open(filename, 'rt') as f: rule.keys += json.load(f)['keys'] else: try: # 3. Load default set of keys based on path target_path = rule.display_path if hasattr(rule, 'display_path') else rule.path listall_configs_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'output/data/listall-configs') target_file = os.path.join(listall_configs_dir, '%s.json' % target_path) if os.path.isfile(target_file): with open(target_file, 'rt') as f: rule.keys = json.load(f)['keys'] except: # 4. Print the object name rule.keys = ['name'] # Prepare the output format (lines, template) = format_listall_output(args.format_file[0], None, args.format, rule) # Print the output printInfo(generate_listall_output(lines, resources, aws_config, template, []))
def link_elastic_ips_callback2(ec2_config, current_config, path, current_path, instance_id, callback_args): if instance_id == callback_args['instance_id']: if not 'PublicIpAddress' in current_config: current_config['PublicIpAddress'] = callback_args['elastic_ip'] elif current_config['PublicIpAddress'] != callback_args['elastic_ip']: printInfo('Warning: public IP address exists (%s) for an instance associated with an elastic IP (%s)' % (current_config['PublicIpAddress'], callback_args['elastic_ip']))