def fetch(self, credentials, services = [], regions = [], partition_name = ''): """ :param credentials: :param services: :param regions: :param partition_name: :return: """ for service in vars(self): try: if services != [] and service not in services: continue service_config = getattr(self, service) if 'fetch_all' in dir(service_config): method_args = {} method_args['credentials'] = credentials if service != 'iam': method_args['regions'] = regions method_args['partition_name'] = partition_name service_config.fetch_all(**method_args) if hasattr(service_config, 'finalize'): service_config.finalize() except Exception as e: printError('Error: could not fetch %s configuration.' % service) printException(e)
def fetch_credential_report(self, credentials, ignore_exception = False): """ Fetch the credential report :param: api_client :type: FOO :param: ignore_exception : initiate credential report creation as not always ready :type: Boolean """ iam_report = {} try: api_client = connect_service('iam', credentials, silent = True) response = api_client.generate_credential_report() if response['State'] != 'COMPLETE': if not ignore_exception: printError('Failed to generate a credential report.') return report = api_client.get_credential_report()['Content'] lines = report.splitlines() keys = lines[0].decode('utf-8').split(',') for line in lines[1:]: values = line.decode('utf-8').split(',') manage_dictionary(iam_report, values[0], {}) for key, value in zip(keys, values): iam_report[values[0]][key] = value self.credential_report = iam_report self.fetchstatuslogger.counts['credential_report']['fetched'] = 1 except Exception as e: if ignore_exception: return printError('Failed to download a credential report.') printException(e)
def load(self, rule_type, quiet = False): """ Open a JSON file definiting a ruleset and load it into a Ruleset object :param quiet: :return: """ if self.filename and os.path.exists(self.filename): try: with open(self.filename, 'rt') as f: ruleset = json.load(f) self.about = ruleset['about'] if 'about' in ruleset else '' self.rules = {} for filename in ruleset['rules']: self.rules[filename] = [] for rule in ruleset['rules'][filename]: self.handle_rule_versions(filename, rule_type, rule) except Exception as e: printException(e) printError('Error: ruleset file %s contains malformed JSON.' % self.filename) self.rules = [] self.about = '' else: self.rules = [] if not quiet: printError('Error: the file %s does not exist.' % self.filename)
def __get_inline_policies(self, api_client, iam_resource_type, resource_id, resource_name): fetched_policies = {} get_policy_method = getattr(api_client, 'get_' + iam_resource_type + '_policy') list_policy_method = getattr(api_client, 'list_' + iam_resource_type + '_policies') args = {} args[iam_resource_type.title() + 'Name'] = resource_name try: policy_names = list_policy_method(**args)['PolicyNames'] except Exception as e: if is_throttled(e): raise e else: printException(e) return fetched_policies try: for policy_name in policy_names: args['PolicyName'] = policy_name policy_document = get_policy_method(**args)['PolicyDocument'] policy_id = self.get_non_aws_id(policy_name) manage_dictionary(fetched_policies, policy_id, {}) fetched_policies[policy_id]['PolicyDocument'] = policy_document fetched_policies[policy_id]['name'] = policy_name self.__parse_permissions(policy_id, policy_document, 'inline_policies', iam_resource_type + 's', resource_id) except Exception as e: if is_throttled(e): raise e else: printException(e) return fetched_policies
def get_value_at(all_info, current_path, key, to_string=False): """ Get value located at a given path :param all_info: :param current_path: :param key: :param to_string: :return: """ keys = key.split('.') if keys[-1] == 'id': target_obj = current_path[len(keys)-1] else: if key == 'this': target_path = current_path elif '.' in key: target_path = [] for i, key in enumerate(keys): if key == 'id': target_path.append(current_path[i]) else: target_path.append(key) if len(keys) > len(current_path): target_path = target_path + keys[len(target_path):] else: target_path = copy.deepcopy(current_path) target_path.append(key) target_obj = all_info for p in target_path: try: if type(target_obj) == list and type(target_obj[0]) == dict: target_obj = target_obj[int(p)] elif type(target_obj) == list: target_obj = p elif p == '': target_obj = target_obj else: try: target_obj = target_obj[p] except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception except Exception as e: printInfo('Info: %s\n' 'Path: %s\n' 'Key: %s' % (str(all_info), str(current_path), str(key))) printException(e) raise Exception if to_string: return str(target_obj) else: return target_obj
def _fetch_targets(self, api_client, q, target): ''' Make an API call defined in metadata.json. Parse the returned object as implemented in the "parse_[object name]" method. :param api_client: :param q: :param target: :return: ''' # Handle & format the target type target_type, response_attribute, list_method_name, list_params, ignore_list_error = target list_method = getattr(api_client, list_method_name) try: targets = handle_truncated_response(list_method, list_params, [response_attribute])[response_attribute] except Exception as e: if not ignore_list_error: printException(e) targets = [] setattr(self, '%s_count' % target_type, len(targets)) self.fetchstatuslogger.counts[target_type]['discovered'] += len(targets) region = api_client._client_config.region_name # Queue resources for target in targets: # call callback methods try: callback = getattr(self, 'parse_%s' % target_type[0:-1]) except: callback = self.store_target target['scout2_target_type'] = target_type if q: # Add to the queue q.put((callback, region, target))
def update_metadata(aws_config): service_map = {} for service_group in aws_config['metadata']: for service in aws_config['metadata'][service_group]: if service not in aws_config['service_list']: continue if 'hidden' in aws_config['metadata'][service_group][service] and aws_config['metadata'][service_group][service]['hidden'] == True: continue if 'resources' not in aws_config['metadata'][service_group][service]: continue service_map[service] = service_group for resource in aws_config['metadata'][service_group][service]['resources']: # full_path = path if needed if not 'full_path' in aws_config['metadata'][service_group][service]['resources'][resource]: aws_config['metadata'][service_group][service]['resources'][resource]['full_path'] = aws_config['metadata'][service_group][service]['resources'][resource]['path'] # Script is the full path minus "id" (TODO: change that) if not 'script' in aws_config['metadata'][service_group][service]['resources'][resource]: aws_config['metadata'][service_group][service]['resources'][resource]['script'] = '.'.join([x for x in aws_config['metadata'][service_group][service]['resources'][resource]['full_path'].split('.') if x != 'id']) # Update counts count = '%s_count' % resource service_config = aws_config['services'][service] if service_config and resource != 'regions': if 'regions' in service_config.keys(): # hasattr(service_config, 'regions'): aws_config['metadata'][service_group][service]['resources'][resource]['count'] = 0 for region in service_config['regions']: if count in service_config['regions'][region].keys(): aws_config['metadata'][service_group][service]['resources'][resource]['count'] += service_config['regions'][region][count] else: try: aws_config['metadata'][service_group][service]['resources'][resource]['count'] = service_config[count] except Exception as e: printException(e)
def save_to_file(self, config, config_type, force_write, debug): config_path, first_line = get_filename(config_type, self.profile, self.report_dir) print('Saving data to %s' % config_path) try: with self.__open_file(config_path, force_write, False) as f: if first_line: print('%s' % first_line, file=f) print('%s' % json.dumps(config, indent=4 if debug else None, separators=(',', ': '), sort_keys=True, cls=Scout2Encoder), file=f) except Exception as e: printException(e)
def postprocessing(aws_config): for service in aws_config['services']: method_name = '%s_postprocessing' % service if method_name in globals(): try: printInfo('Post-processing %s config...' % format_service_name(service)) method = globals()[method_name] method(aws_config) except Exception as e: printException(e) pass
def new_go_to_and_do(aws_config, current_config, path, current_path, callbacks): """ Recursively go to a target and execute a callback :param aws_config: :param current_config: :param path: :param current_path: :param callbacks: :return: """ try: key = path.pop(0) if not current_config: current_config = aws_config if not current_path: current_path = [] keys = key.split('.') if len(keys) > 1: while True: key = keys.pop(0) if not len(keys): break current_path.append(key) current_config = current_config[key] if key in current_config: current_path.append(key) for (i, value) in enumerate(list(current_config[key])): if len(path) == 0: for callback_info in callbacks: callback_name = callback_info[0] callback = globals()[callback_name] callback_args = callback_info[1] if type(current_config[key] == dict) and type(value) != dict and type(value) != list: callback(aws_config, current_config[key][value], path, current_path, value, callback_args) else: callback(aws_config, current_config, path, current_path, value, callback_args) else: tmp = copy.deepcopy(current_path) try: tmp.append(value) new_go_to_and_do(aws_config, current_config[key][value], copy.deepcopy(path), tmp, callbacks) except: tmp.pop() tmp.append(i) new_go_to_and_do(aws_config, current_config[key][i], copy.deepcopy(path), tmp, callbacks) except Exception as e: printException(e) printInfo('Path: %s' % str(current_path)) printInfo('Key = %s' % str(key)) printInfo('Value = %s' % str(value)) printInfo('Path = %s' % path)
def analyze_ec2_config(ec2_info, aws_account_id, force_write): try: printInfo('Analyzing EC2 config... ', newLine = False) # Tweaks link_elastic_ips(ec2_info) add_security_group_name_to_ec2_grants(ec2_info, aws_account_id) # Custom EC2 analysis # check_for_elastic_ip(ec2_info) list_network_attack_surface(ec2_info, 'attack_surface', 'PublicIpAddress') # TODO: make this optional, commented out for now # list_network_attack_surface(ec2_info, 'private_attack_surface', 'PrivateIpAddress') printInfo('Success') except Exception as e: printInfo('Error') printException(e)
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] try: parameters = handle_truncated_response(api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] if 'ParameterValue' in parameter else None param['source'] = parameter['Source'] manage_dictionary(parameter_group, 'parameters', {}) parameter_group['parameters'][parameter['ParameterName']] = param except Exception as e: printException(e) printError('Failed fetching DB parameters for %s' % parameter_group['name']) # Save (self).parameter_groups[parameter_group['name']] = parameter_group
def gunzip_file(q, params): while True: src = q.get() src = os.path.join(download_folder, src) try: dst = re.sub(r'\.(\w*)?$', '', src) if src.endswith('.gz'): with gzip.open(src, 'rb') as f1: file_contents = f1.read() with open(dst, 'wt') as f2: f2.write(file_contents) os.remove(src) except Exception as e: printException(e) pass finally: q.task_done()
def __fetch_target(self, q, params): global status try: while True: try: target_type, target = q.get() method = getattr(self, 'parse_%s' % target_type) method(target, params) self.fetchstatuslogger.counts[target_type]['fetched'] += 1 self.fetchstatuslogger.show() except Exception as e: printException(e) finally: q.task_done() except Exception as e: printException(e) pass
def get_s3_bucket_keys(api_client, bucket_name, bucket, check_encryption, check_acls): """ Get key-specific information (server-side encryption, acls, etc...) :param api_client: :param bucket_name: :param bucket: :param check_encryption: :param check_acls: :return: """ bucket['keys'] = [] keys = handle_truncated_response(api_client.list_objects, {'Bucket': bucket_name}, ['Contents']) bucket['keys_count'] = len(keys['Contents']) key_count = 0 # FIXME - commented for now as this method doesn't seem to be defined anywhere' # update_status(key_count, bucket['keys_count'], 'keys') for key in keys['Contents']: key_count += 1 key['name'] = key.pop('Key') key['LastModified'] = str(key['LastModified']) if check_encryption: try: # The encryption configuration is only accessible via an HTTP header, # only returned when requesting one object at a time... k = api_client.get_object(Bucket=bucket_name, Key=key['name']) key['ServerSideEncryption'] = k[ 'ServerSideEncryption'] if 'ServerSideEncryption' in k else None key['SSEKMSKeyId'] = k[ 'SSEKMSKeyId'] if 'SSEKMSKeyId' in k else None except Exception as e: printException(e) continue if check_acls: try: key['grantees'] = get_s3_acls(api_client, bucket_name, bucket, key_name=key['name']) except Exception as e: continue # Save it bucket['keys'].append(key)
def pass_conditions(all_info, current_path, conditions, unknown_as_pass_condition = False): """ Pass all conditions? :param all_info: :param current_path: :param conditions: :param unknown_as_pass_condition: Consider an undetermined condition as passed :return: """ result = False if len(conditions) == 0: return True condition_operator = conditions.pop(0) for condition in conditions: if condition[0] in condition_operators: res = pass_conditions(all_info, current_path, condition, unknown_as_pass_condition) else: # Conditions are formed as "path to value", "type of test", "value(s) for test" path_to_value, test_name, test_values = condition path_to_value = fix_path_string(all_info, current_path, path_to_value) target_obj = get_value_at(all_info, current_path, path_to_value) if type(test_values) != list: dynamic_value = re_get_value_at.match(test_values) if dynamic_value: test_values = get_value_at(all_info, current_path, dynamic_value.groups()[0], True) try: res = pass_condition(target_obj, test_name, test_values) except Exception as e: res = True if unknown_as_pass_condition else False printError('Unable to process testcase \'%s\' on value \'%s\', interpreted as %s.' % (test_name, str(target_obj), res)) printException(e, True) # Quick exit and + false if condition_operator == 'and' and not res: return False # Quick exit or + true if condition_operator == 'or' and res: return True # Still here ? # or -> false # and -> true if condition_operator == 'or': return False else: return True
def load(self): """ Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions :return: None """ file_name_valid = False rule_type_valid = False # Look for a locally-defined rule for rule_dir in self.rule_dirs: file_path = os.path.join( rule_dir, self.file_name) if rule_dir else self.file_name if os.path.isfile(file_path): self.file_path = file_path file_name_valid = True break # Look for a built-in rule if not file_name_valid: for rule_type in self.rule_types: if self.file_name.startswith(rule_type): self.file_path = os.path.join(self.rules_data_path, self.file_name) rule_type_valid = True file_name_valid = True break if not rule_type_valid: for rule_type in self.rule_types: self.file_path = os.path.join(self.rules_data_path, rule_type, self.file_name) if os.path.isfile(self.file_path): file_name_valid = True break else: if os.path.isfile(self.file_path): file_name_valid = True if not file_name_valid: printError('Error: could not find %s' % self.file_name) else: try: with open(self.file_path, 'rt') as f: self.string_definition = f.read() self.load_from_string_definition() except Exception as e: printException(e) printError('Failed to load rule defined in %s' % file_path)
def _update_metadata(self): service_map = {} for service_group in self.metadata: for service in self.metadata[service_group]: if service not in self.service_list: continue if 'hidden' in self.metadata[service_group][service] and \ self.metadata[service_group][service]['hidden'] == True: continue if 'resources' not in self.metadata[service_group][service]: continue service_map[service] = service_group for resource in self.metadata[service_group][service]['resources']: # full_path = path if needed if not 'full_path' in self.metadata[service_group][service]['resources'][resource]: self.metadata[service_group][service]['resources'][resource]['full_path'] = \ self.metadata[service_group][service]['resources'][resource]['path'] # Script is the full path minus "id" (TODO: change that) if not 'script' in self.metadata[service_group][service]['resources'][resource]: self.metadata[service_group][service]['resources'][resource]['script'] = '.'.join( [x for x in self.metadata[service_group][service]['resources'][resource]['full_path'].split( '.') if x != 'id']) # Update counts service_config = self.services[service] if not service_config : continue count = '%s_count' % resource if resource != 'regions': if 'regions' in service_config.keys() and isinstance(service_config['regions'], dict): self.metadata[service_group][service]['resources'][resource]['count'] = 0 for region in service_config['regions']: if count in service_config['regions'][region].keys(): self.metadata[service_group][service]['resources'][resource]['count'] += \ service_config['regions'][region][count] else: try: self.metadata[service_group][service]['resources'][resource]['count'] = \ service_config[count] except Exception as e: printException(e) else: self.metadata[service_group][service]['resources'][resource]['count'] = len(service_config['regions'])
def download_object(q, params): bucket_name = params['Bucket'] s3_client = params['S3Client'] while True: key, tries = q.get() filename = os.path.join(download_folder, key.split('/')[-1]) dst = re.sub(r'\.(\w*)?$', '', filename) if (not os.path.exists(filename) and not os.path.exists(dst)) or (os.path.exists(filename) and os.path.getsize(filename) == 0) or (os.path.exists(dst) and os.path.getsize(dst) == 0): try: s3_client.download_file(bucket_name, key, filename) except Exception as e: if tries < 2: q.put([key, tries + 1]) printInfo('Error downloading %s; re-queued.' % filename) else: printException(e) printInfo('Error downloading %s; discarded.' % filename) q.task_done()
def get_zones(self, client, project): try: if self.zones: return self.zones else: zones_list = [] zones = client.zones().list(project=project).execute()['items'] for zone in zones: zones_list.append(zone['name']) self.zones = zones_list return zones_list except HttpError as e: raise e except Exception as e: printException(e) return None
def threaded_per_region(q, params): """ Helper for multithreading on a per-region basis :param q: :param params: :return: """ while True: try: params['region'] = q.get() method = params['method'] method(params) except Exception as e: printException(e) finally: q.task_done()
def get_group_membership(q, params): iam_client = params['iam_client'] user_info = params['user_info'] while True: try: user = q.get() user_name = user['UserName'] groups = iam_client.list_groups_for_user( UserName=user_name)['Groups'] user_info[user_name] = {} user_info[user_name]['groups'] = [] for group in groups: user_info[user_name]['groups'].append(group['GroupName']) show_status(user_info, newline=False) except Exception as e: printException(e) finally: q.task_done()
def save_blob_as_json(filename, blob, force_write, debug): """ Creates/Modifies file and saves python object as JSON :param filename: :param blob: :param force_write: :param debug: :return: """ try: if prompt_4_overwrite(filename, force_write): with open(filename, 'wt') as f: print('%s' % json.dumps(blob, indent=4 if debug else None, separators=(',', ': '), sort_keys=True, cls=CustomJSONEncoder), file=f) except Exception as e: printException(e) pass
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] try: parameters = handle_truncated_response(api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] manage_dictionary(parameter_group, 'parameters', {}) for parameter in parameters: if not parameter['IsModifiable']: # Discard non-modifiable parameters continue parameter_name = parameter.pop('ParameterName') parameter_group['parameters'][parameter_name] = parameter except Exception as e: printException(e) printError('Failed fetching DB parameters for %s' % parameter_group['name']) # Save parameter_group_id = self.get_non_aws_id(parameter_group['name']) (self).parameter_groups[parameter_group_id] = parameter_group
def update_cloudformation_resource_from_template(api_client, resource_type, name, template_path, template_parameters=[], tags=[], quiet=False, wait_for_completion=False): """ :param callback: :param name: :param template_path: :param template_parameters: :param quiet: :return: """ try: update = getattr(api_client, 'update_%s' % resource_type) api_resource_type = snake_to_camel(resource_type) # Add a timestamps tags.append({'Key': 'OpinelTimestamp', 'Value': str(time.time())}) params = prepare_cloudformation_params(name, template_path, template_parameters, api_resource_type, tags) if not quiet: printInfo('Updating the %s...' % resource_type, newLine=False) response = update(**params) operation_id = response[ 'OperationId'] if resource_type == 'stack_set' else None if wait_for_completion: cloudformation_wait(api_client, resource_type, name, operation_id) except Exception as e: if api_resource_type == 'Stack' and hasattr(e, 'response') and type( e.response == dict ) and e.response['Error']['Code'] == 'ValidationError' and e.response[ 'Error']['Message'] == 'No updates are to be performed.': printInfo(' Already up to date.') else: printException(e) printError(' Failed.')
def _fetch_region(self, q, params): global api_clients try: while True: try: region, targets = q.get() #print('Targets for region %s : %s' % (region, str(targets))) self.init_region_config(region) api_client = connect_service(params['api_service'], params['credentials'], region, silent = True) api_clients[region] = api_client # TODO : something here for single_region stuff self.regions[region].fetch_all(api_client, self.fetchstatuslogger, params['q'], targets) # params['targets']) self.fetchstatuslogger.counts['regions']['fetched'] += 1 except Exception as e: printException(e) finally: q.task_done() except Exception as e: printException(e) pass
def __open_file(self, config_filename, force_write, quiet=False): """ :param config_filename: :param force_write: :param quiet: :return: """ if not quiet: printInfo('Saving config...') if prompt_4_overwrite(config_filename, force_write): try: config_dirname = os.path.dirname(config_filename) if not os.path.isdir(config_dirname): os.makedirs(config_dirname) return open(config_filename, 'wt') except Exception as e: printException(e) else: return None
def _fetch_target(self, q, params): try: while True: try: method, region, target = q.get() if method.__name__ == 'store_target': target_type = target['scout2_target_type'] else: target_type = method.__name__.replace('parse_', '') + 's' method(params, region, target) self.fetchstatuslogger.counts[target_type]['fetched'] += 1 self.fetchstatuslogger.show() except Exception as e: printException(e) finally: q.task_done() except Exception as e: printException(e) pass
def connect_service(service, credentials, region_name=None, config=None, silent=False): """ Instantiates an AWS API client :param service: :param credentials: :param region_name: :param config: :param silent: :return: """ api_client = None try: client_params = {} client_params['service_name'] = service.lower() session_params = {} session_params['aws_access_key_id'] = credentials['AccessKeyId'] session_params['aws_secret_access_key'] = credentials[ 'SecretAccessKey'] session_params['aws_session_token'] = credentials['SessionToken'] if region_name: client_params['region_name'] = region_name session_params['region_name'] = region_name if config: client_params['config'] = config aws_session = boto3.session.Session(**session_params) if not silent: infoMessage = 'Connecting to AWS %s' % service if region_name: infoMessage = infoMessage + ' in %s' % region_name printInfo('%s...' % infoMessage) api_client = aws_session.client(**client_params) except Exception as e: printException(e) return api_client
def authenticate(self, key_file=None, user_account=None, service_account=None, **kargs): """ Implement authentication for the GCP provider Refer to https://google-auth.readthedocs.io/en/stable/reference/google.auth.html. :return: """ try: # TODO this is temporary import os # Azure subscription SUBSCRIPTION_ID = os.environ['SUBSCRIPTION_ID'] # Tenant ID for your Azure Subscription TENANT_ID = os.environ['TENANT_ID'] # Your Service Principal App ID CLIENT = os.environ['CLIENT'] # Your Service Principal Password KEY = os.environ['KEY'] self.aws_account_id = TENANT_ID # TODO this is for AWS credentials = ServicePrincipalCredentials(client_id=CLIENT, secret=KEY, tenant=TENANT_ID) self.credentials = AzureCredentials(credentials, SUBSCRIPTION_ID) return True except Exception as e: printError('Failed to authenticate to Azure') printException(e) return False
def _fetch_region(self, q, params): global api_clients try: while True: try: region = q.get() self.init_region_config(region) api_client = connect_service(params['api_service'], params['credentials'], region) api_clients[region] = api_client self.regions[region].fetch_all(api_client, self.fetchstatuslogger, params['q'], params['targets']) self.fetchstatuslogger.counts['regions']['fetched'] += 1 except Exception as e: printException(e) finally: q.task_done() except Exception as e: printException(e) pass
def _fetch_targets(self, api_client, q, target, list_params): # Handle & format the target type target_type, response_attribute, list_method_name, list_params, ignore_list_error = target list_method = getattr(api_client, list_method_name) try: targets = handle_truncated_response( list_method, list_params, [response_attribute])[response_attribute] except Exception as e: if not ignore_list_error: printException(e) targets = [] setattr(self, '%s_count' % target_type, len(targets)) self.fetchstatuslogger.counts[target_type]['discovered'] += len( targets) region = api_client._client_config.region_name # Queue resources for target in targets: callback = getattr(self, 'parse_%s' % target_type[0:-1]) if q: # Add to the queue q.put((callback, region, target))
def get_zones(self, client, project): """ Returns a list of all the zones. Uses an attribute to store the list in order to only run once. :param client: :param project: :return: """ try: if self.zones: return self.zones else: zones_list = [] zones = client.zones().list(project=project).execute()['items'] for zone in zones: zones_list.append(zone['name']) self.zones = zones_list return zones_list except HttpError as e: raise e except Exception as e: printException(e) return None
def run(self, aws_config, skip_dashboard = False): # Clean up existing findings for service in aws_config['services']: aws_config['services'][service][self.ruleset.rule_type] = {} # Process each rule for finding_path in self.rules: for rule in self.rules[finding_path]: if not rule.enabled: # or rule.service not in []: # TODO: handle this... continue printDebug('Processing %s rule[%s]: "%s"' % (rule.service, rule.filename, rule.description)) finding_path = rule.path path = finding_path.split('.') service = path[0] manage_dictionary(aws_config['services'][service], self.ruleset.rule_type, {}) aws_config['services'][service][self.ruleset.rule_type][rule.key] = {} aws_config['services'][service][self.ruleset.rule_type][rule.key]['description'] = rule.description aws_config['services'][service][self.ruleset.rule_type][rule.key]['path'] = rule.path for attr in ['level', 'id_suffix', 'display_path']: if hasattr(rule, attr): aws_config['services'][service][self.ruleset.rule_type][rule.key][attr] = getattr(rule, attr) try: setattr(rule, 'checked_items', 0) aws_config['services'][service][self.ruleset.rule_type][rule.key]['items'] = recurse(aws_config['services'], aws_config['services'], path, [], rule, True) if skip_dashboard: continue aws_config['services'][service][self.ruleset.rule_type][rule.key]['dashboard_name'] = rule.dashboard_name aws_config['services'][service][self.ruleset.rule_type][rule.key]['checked_items'] = rule.checked_items aws_config['services'][service][self.ruleset.rule_type][rule.key]['flagged_items'] = len(aws_config['services'][service][self.ruleset.rule_type][rule.key]['items']) aws_config['services'][service][self.ruleset.rule_type][rule.key]['service'] = rule.service aws_config['services'][service][self.ruleset.rule_type][rule.key]['rationale'] = rule.rationale if hasattr(rule, 'rationale') else 'N/A' except Exception as e: printException(e) printError('Failed to process rule defined in %s' % rule.filename) # Fallback if process rule failed to ensure report creation and data dump still happen aws_config['services'][service][self.ruleset.rule_type][rule.key]['checked_items'] = 0 aws_config['services'][service][self.ruleset.rule_type][rule.key]['flagged_items'] = 0
def get_policies(iam_client, managed_policies, resource_type, resource_name): print('Fetching policies for IAM %s %s...' % (resource_type, resource_name)) fetched_policies = [] # Managed policies list_policy_method = getattr( iam_client, 'list_attached_' + resource_type + '_policies') policy_names = 'AttachedPolicies' args = {} args[resource_type.title() + 'Name'] = resource_name try: policies = list_policy_method(**args)[policy_names] for policy in policies: try: policy_arn = policy['PolicyArn'] fetched_policies.append( get_managed_policy_document(iam_client, policy_arn, managed_policies)) except Exception as e: printException(e) except Exception as e: printException(e) # Inline policies get_policy_method = getattr(iam_client, 'get_' + resource_type + '_policy') list_policy_method = getattr(iam_client, 'list_' + resource_type + '_policies') policy_names = 'PolicyNames' args = {} args[resource_type.title() + 'Name'] = resource_name try: policy_names = list_policy_method(**args)[policy_names] except Exception as e: printException(e) for policy_name in policy_names: try: args['PolicyName'] = policy_name policy_document = get_policy_method(**args)['PolicyDocument'] fetched_policies.append(policy_document) except Exception as e: printException(e) # Group policies (for users only) if resource_type == 'user': groups = [] for group in iam_client.list_groups_for_user( UserName=resource_name)['Groups']: fetched_policies = fetched_policies + get_policies( iam_client, managed_policies, 'group', group['GroupName']) return fetched_policies
def read_creds_from_aws_credentials_file(profile_name, credentials_file=aws_credentials_file ): credentials = init_creds() profile_found = False try: # Make sure the ~.aws folder exists if not os.path.exists(aws_config_dir): os.makedirs(aws_config_dir) with open(credentials_file, 'rt') as cf: for line in cf: profile_line = re_profile_name.match(line) if profile_line: if profile_line.groups()[0] == profile_name: profile_found = True else: profile_found = False if profile_found: if re_access_key.match(line): credentials['AccessKeyId'] = line.split("=")[1].strip() elif re_secret_key.match(line): credentials['SecretAccessKey'] = line.split( "=")[1].strip() elif re_mfa_serial.match(line): credentials['SerialNumber'] = ( line.split('=')[1]).strip() elif re_session_token.match( line) or re_security_token.match(line): credentials['SessionToken'] = ('='.join( x for x in line.split('=')[1:])).strip() elif re_expiration.match(line): credentials['Expiration'] = ('='.join( x for x in line.split('=')[1:])).strip() except Exception as e: # Silent if error is due to no ~/.aws/credentials file if not hasattr(e, 'errno') or e.errno != 2: printException(e) return credentials
def create_groups(iam_client, groups): """ Create a number of IAM group, silently handling exceptions when entity already exists . :param iam_client: AWS API client for IAM :param groups: Name of IAM groups to be created. :return: None """ groups_data = [] if type(groups) != list: groups = [groups] for group in groups: errors = [] try: printInfo('Creating group %s...' % group) iam_client.create_group(GroupName=group) except Exception as e: if e.response['Error']['Code'] != 'EntityAlreadyExists': printException(e) errors.append('iam:creategroup') groups_data.append({'groupname': group, 'errors': errors}) return groups_data
def get_regions(self, client, project): """ Returns a list of all the regions. Uses an attribute to store the list in order to only run once. :param client: :param project: :return: """ try: if self.regions: return self.regions else: regions_list = [] regions = client.regions().list( project=project).execute()['items'] for region in regions: regions_list.append(region['name']) self.regions = regions_list return regions_list except HttpError as e: raise e except Exception as e: printException(e) return None
def parse_parameter_group(self, global_params, region, parameter_group): parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn') parameter_group['name'] = parameter_group.pop('DBParameterGroupName') api_client = api_clients[region] try: parameters = handle_truncated_response( api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters'] manage_dictionary(parameter_group, 'parameters', {}) for parameter in parameters: if not parameter['IsModifiable']: # Discard non-modifiable parameters continue parameter_name = parameter.pop('ParameterName') parameter_group['parameters'][parameter_name] = parameter except Exception as e: printException(e) printError('Failed fetching DB parameters for %s' % parameter_group['name']) # Save parameter_group_id = self.get_non_provider_id(parameter_group['name']) (self).parameter_groups[parameter_group_id] = parameter_group
def __get_inline_policies(self, api_client, iam_resource_type, resource_id, resource_name): fetched_policies = {} get_policy_method = getattr(api_client, 'get_' + iam_resource_type + '_policy') list_policy_method = getattr(api_client, 'list_' + iam_resource_type + '_policies') args = {} args[iam_resource_type.title() + 'Name'] = resource_name try: policy_names = list_policy_method(**args)['PolicyNames'] except Exception as e: printException(e) return fetched_policies try: for policy_name in policy_names: args['PolicyName'] = policy_name policy_document = get_policy_method(**args)['PolicyDocument'] policy_id = self.get_non_aws_id(policy_name) manage_dictionary(fetched_policies, policy_id, {}) fetched_policies[policy_id]['PolicyDocument'] = policy_document fetched_policies[policy_id]['name'] = policy_name self.__parse_permissions(policy_id, policy_document, 'inline_policies', iam_resource_type + 's', resource_id) except Exception as e: printException(e) return fetched_policies
def get_s3_bucket_keys(api_client, bucket_name, bucket, check_encryption, check_acls): """ Get key-specific information (server-side encryption, acls, etc...) :param api_client: :param bucket_name: :param bucket: :param check_encryption: :param check_acls: :return: """ bucket['keys'] = [] keys = handle_truncated_response(api_client.list_objects, {'Bucket': bucket_name}, ['Contents']) bucket['keys_count'] = len(keys['Contents']) key_count = 0 update_status(key_count, bucket['keys_count'], 'keys') for key in keys['Contents']: key_count += 1 key['name'] = key.pop('Key') key['LastModified'] = str(key['LastModified']) if check_encryption: try: # The encryption configuration is only accessible via an HTTP header, only returned when requesting one object at a time... k = api_client.get_object(Bucket = bucket_name, Key = key['name']) key['ServerSideEncryption'] = k['ServerSideEncryption'] if 'ServerSideEncryption' in k else None key['SSEKMSKeyId'] = k['SSEKMSKeyId'] if 'SSEKMSKeyId' in k else None except Exception as e: printException(e) continue if check_acls: try: key['grantees'] = get_s3_acls(api_client, bucket_name, bucket, key_name = key['name']) except Exception as e: continue # Save it bucket['keys'].append(key) update_status(key_count, bucket['keys_count'], 'keys')
def read_profile_from_aws_config_file(profile_name, config_file = aws_config_file): """ Read profiles from AWS config file :param profile_name: :param config_file: :return: """ role_arn = None source_profile = 'default' mfa_serial = None profile_found = False external_id = None try: with open(config_file, 'rt') as config: for line in config: profile_line = re_profile_name.match(line) if profile_line: role_profile_name = profile_line.groups()[0].split()[-1] if role_profile_name == profile_name: profile_found = True else: profile_found = False if profile_found: if re_role_arn.match(line): role_arn = line.split('=')[1].strip() elif re_source_profile.match(line): source_profile = line.split('=')[1].strip() elif re_mfa_serial.match(line): mfa_serial = line.split('=')[1].strip() elif re_external_id.match(line): external_id = line.split('=')[1].strip() except Exception as e: # Silent if error is due to no .aws/config file if not hasattr(e, 'errno') or e.errno != 2: printException(e) return role_arn, source_profile, mfa_serial, external_id
def enable_mfa(iam_client, user, qrcode_file=None): """ Create and activate an MFA virtual device :param iam_client: :param user: :param qrcode_file: :return: """ mfa_serial = '' tmp_qrcode_file = None try: printInfo('Enabling MFA for user \'%s\'...' % user) mfa_device = iam_client.create_virtual_mfa_device( VirtualMFADeviceName=user)['VirtualMFADevice'] mfa_serial = mfa_device['SerialNumber'] mfa_png = mfa_device['QRCodePNG'] mfa_seed = mfa_device['Base32StringSeed'] tmp_qrcode_file = display_qr_code(mfa_png, mfa_seed) if qrcode_file != None: with open(qrcode_file, 'wt') as f: f.write(mfa_png) while True: mfa_code1 = prompt_4_mfa_code() mfa_code2 = prompt_4_mfa_code(activate=True) if mfa_code1 == 'q' or mfa_code2 == 'q': try: delete_virtual_mfa_device(iam_client, mfa_serial) except Exception as e: printException(e) pass raise Exception try: iam_client.enable_mfa_device(UserName=user, SerialNumber=mfa_serial, AuthenticationCode1=mfa_code1, AuthenticationCode2=mfa_code2) printInfo( 'Succesfully enabled MFA for for \'%s\'. The device\'s ARN is \'%s\'.' % (user, mfa_serial)) break except Exception as e: printException(e) pass except Exception as e: printException(e) # We shouldn't return normally because if we've gotten here # the user has potentially not set up the MFA device # correctly, so we don't want to e.g. write the .no-mfa # credentials file or anything. raise finally: if tmp_qrcode_file is not None: # This is a tempfile.NamedTemporaryFile, so simply closing # it will also unlink it. tmp_qrcode_file.close() return mfa_serial
def _fetch_targets(self, api_client, q, target): ''' Make an API call defined in metadata.json. Parse the returned object as implemented in the "parse_[object name]" method. :param api_client: :param q: :param target: :return: ''' # Handle & format the target type target_type, response_attribute, list_method_name, list_params, ignore_list_error = target list_method = getattr(api_client, list_method_name) try: targets = handle_truncated_response( list_method, list_params, [response_attribute])[response_attribute] except Exception as e: if not ignore_list_error: printException(e) targets = [] setattr(self, '%s_count' % target_type, len(targets)) self.fetchstatuslogger.counts[target_type]['discovered'] += len( targets) region = api_client._client_config.region_name # Queue resources for target in targets: # call callback methods try: callback = getattr(self, 'parse_%s' % target_type[0:-1]) except: callback = self.store_target target['scout2_target_type'] = target_type if q: # Add to the queue q.put((callback, region, target))
def _fetch_target(self, q, params): try: while True: try: method, region, target = q.get() backup = copy.deepcopy(target) if method.__name__ == 'store_target': target_type = target['scout2_target_type'] else: target_type = method.__name__.replace('parse_', '') + 's' method(params, region, target) self.fetchstatuslogger.counts[target_type]['fetched'] += 1 self.fetchstatuslogger.show() except Exception as e: if is_throttled(e): q.put((method, region, backup)) else: printException(e) finally: q.task_done() except Exception as e: printException(e) pass
def main(): # Parse arguments parser = ListallArgumentParser() args = parser.parse_args() # Configure the debug level configPrintException(args.debug) # Check version of opinel if not check_requirements(os.path.realpath(__file__)): return 42 # Support multiple environments for profile_name in args.profile: # Load the config try: report = Scout2Report(profile_name, args.report_dir, args.timestamp) aws_config = report.jsrw.load_from_file(AWSCONFIG) services = aws_config['service_list'] except Exception as e: printException(e) printError('Error, failed to load the configuration for profile %s' % profile_name) continue # Create a ruleset with only whatever rules were specified... if args.config: rule_filename = args.config ruleset = TmpRuleset(rule_dirs = [os.getcwd()], rule_filename = args.config, rule_args = args.config_args) elif len(args.path) > 0: # Create a local tmp rule rule_dict = {'description': 'artifact'} rule_dict['path'] = args.path[0] rule_dict['conditions'] = [] rule_filename = 'listall-artifact.json' with open(os.path.join(os.getcwd(), rule_filename), 'wt') as f: f.write(json.dumps(rule_dict)) ruleset = TmpRuleset(rule_dirs = [os.getcwd()], rule_filename = rule_filename, rule_args = []) else: printError('Error, you must provide either a rule configuration file or the path to the resources targeted.') continue # Process the rule pe = ProcessingEngine(ruleset) pe.run(aws_config, skip_dashboard = True) # Retrieve items rule = ruleset.rules[rule_filename][0] rule_service = rule.service.lower() rule_key = rule.key rule_type = rule.rule_type resources = aws_config['services'][rule_service][rule_type][rule_key]['items'] # Set the keys to output if len(args.keys): # 1. Explicitly provided on the CLI rule.keys = args.keys elif len(args.keys_file): # 2. Explicitly provided files that contain the list of keys rule.keys = [] for filename in args.keys_file: with open(filename, 'rt') as f: rule.keys += json.load(f)['keys'] else: try: # 3. Load default set of keys based on path target_path = rule.display_path if hasattr(rule, 'display_path') else rule.path listall_configs_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'output/data/listall-configs') target_file = os.path.join(listall_configs_dir, '%s.json' % target_path) if os.path.isfile(target_file): with open(target_file, 'rt') as f: rule.keys = json.load(f)['keys'] except: # 4. Print the object name rule.keys = ['name'] # Prepare the output format (lines, template) = format_listall_output(args.format_file[0], None, args.format, rule) # Print the output printInfo(generate_listall_output(lines, resources, aws_config, template, []))
def match_security_groups_and_resources_callback(aws_config, current_config, path, current_path, resource_id, callback_args): service = current_path[1] original_resource_path = combine_paths(copy.deepcopy(current_path), [ resource_id ]) resource = get_object_at(aws_config, original_resource_path) if not 'resource_id_path' in callback_args: resource_type = current_path[-1] resource_path = copy.deepcopy(current_path) resource_path.append(resource_id) else: resource_path = combine_paths(copy.deepcopy(current_path), callback_args['resource_id_path']) resource_id = resource_path[-1] resource_type = resource_path[-2] if 'status_path' in callback_args: status_path = combine_paths(copy.deepcopy(original_resource_path), callback_args['status_path']) resource_status = get_object_at(aws_config, status_path).replace('.', '_') else: resource_status = None unknown_vpc_id = True if current_path[4] != 'vpcs' else False # Issue 89 & 91 : can instances have no security group? try: try: sg_attribute = get_object_at(resource, callback_args['sg_list_attribute_name']) except: return if type(sg_attribute) != list: sg_attribute = [ sg_attribute ] for resource_sg in sg_attribute: if type(resource_sg) == dict: sg_id = resource_sg[callback_args['sg_id_attribute_name']] else: sg_id = resource_sg if unknown_vpc_id: vpc_id = sg_map[sg_id]['vpc_id'] sg_base_path = copy.deepcopy(current_path[0:4]) sg_base_path[1] = 'ec2' sg_base_path = sg_base_path + [ 'vpcs', vpc_id, 'security_groups' ] else: sg_base_path = copy.deepcopy(current_path[0:6]) sg_base_path[1] = 'ec2' sg_base_path.append('security_groups') sg_path = copy.deepcopy(sg_base_path) sg_path.append(sg_id) sg = get_object_at(aws_config, sg_path) # Add usage information manage_dictionary(sg, 'used_by', {}) manage_dictionary(sg['used_by'], service, {}) manage_dictionary(sg['used_by'][service], 'resource_type', {}) manage_dictionary(sg['used_by'][service]['resource_type'], resource_type, {} if resource_status else []) if resource_status: manage_dictionary(sg['used_by'][service]['resource_type'][resource_type], resource_status, []) if not resource_id in sg['used_by'][service]['resource_type'][resource_type][resource_status]: sg['used_by'][service]['resource_type'][resource_type][resource_status].append(resource_id) else: sg['used_by'][service]['resource_type'][resource_type].append(resource_id) except Exception as e: if resource_type in ['elbs', 'functions']: pass else: region = current_path[3] vpc_id = current_path[5] if vpc_id == ec2_classic: pass else: printError('Failed to parse %s in %s in %s' % (resource_type, vpc_id, region)) printException(e)