def _backup_host_auth_mapping(profile): """Backup the ``profile`` host auth mapping file. When a profile is removed, it 'archives' the host auth mapping by renaming it to '(DELETED PROFILE)<profile_name>_host_auth_mapping for identification by the user. The time stamps in mapping files help in identifying the various forms and times in which the said profile existed. """ mapping_name = profile + PROFILE_HOST_AUTH_MAPPING_SUFFIX mapping_path = get_config_path(mapping_name) if os.path.isfile(mapping_path): renamed_mapping_name = '(DELETED PROFILE)' + mapping_name renamed_mapping_path = get_config_path(renamed_mapping_name) os.rename(mapping_path, renamed_mapping_path)
def _create_main_inventory(vault, success_hosts, success_port_map, auth_map, profile): yml_dict = make_inventory_dict(success_hosts, success_port_map, auth_map) hosts_yml = profile + PROFILE_HOSTS_SUFIX hosts_yml_path = utilities.get_config_path(hosts_yml) vault.dump_as_yaml_to_file(yml_dict, hosts_yml_path) log_yaml_inventory('Main inventory', yml_dict)
def _do_command(self): if not os.path.isfile(utilities.PROFILES_PATH): print(_("All network profiles removed")) return if self.options.name: vault = get_vault(self.options.vaultfile) profile = self.options.name profiles_list = vault.load_as_json(utilities.PROFILES_PATH) profile_found = False for index, curr_profile in enumerate(profiles_list): if curr_profile.get('name') == profile: del profiles_list[index] print(_('Profile "%s" was removed' % profile)) profile_found = True break if not profile_found: print(_("No such profile: '%s'") % profile) sys.exit(1) vault.dump_as_json_to_file(profiles_list, utilities.PROFILES_PATH) # removes inventory associated with the profile profile_hosts_path = get_config_path(profile + PROFILE_HOSTS_SUFIX) if os.path.isfile(profile_hosts_path): os.remove(profile_hosts_path) _backup_host_auth_mapping(profile) # removes all inventories ever. elif self.options.all: os.remove(utilities.PROFILES_PATH) wildcard_hosts_path = get_config_path('*' + PROFILE_HOSTS_SUFIX) for file_list in glob.glob(wildcard_hosts_path): os.remove(file_list) file_list = os.path.basename(file_list) profile = file_list[:file_list.rfind(PROFILE_HOSTS_SUFIX)] _backup_host_auth_mapping(profile) print(_("All network profiles removed"))
def _create_hosts_auths_file(success_auth_map, profile): host_auth_mapping = profile + PROFILE_HOST_AUTH_MAPPING_SUFFIX host_auth_mapping_path = utilities.get_config_path(host_auth_mapping) with open(host_auth_mapping_path, 'a') as host_auth_file: string_to_write = time.strftime("%c") + '\n-' \ '---' \ '---' \ '---' \ '---' \ '---' \ '---' \ '---' \ '---' \ '---' \ '---\n' for host, line in iteritems(success_auth_map): string_to_write += host + '\n----------------------\n' for auth in line: string_to_write += redacted_auth_string(auth) string_to_write += '\n\n' string_to_write += '\n*******************************' \ '*********************************' \ '**************\n\n' host_auth_file.write(string_to_write)
def _do_command(self): # pylint: disable=too-many-locals # pylint: disable=too-many-branches vault, vault_pass = get_vault_and_password(self.options.vaultfile) profile_found = False profile_auth_list = [] profile_ranges = [] profile_port = 22 profile = self.options.profile forks = self.options.ansible_forks \ if self.options.ansible_forks else '50' report_path = os.path.abspath( os.path.normpath(self.options.report_path)) hosts_yml = profile + PROFILE_HOSTS_SUFIX hosts_yml_path = utilities.get_config_path(hosts_yml) # Checks if profile exists and stores information # about that profile for later use. if not os.path.isfile(utilities.PROFILES_PATH): print(_('No profiles exist yet.')) sys.exit(1) if not os.path.isfile(utilities.CREDENTIALS_PATH): print(_('No auth credentials exist yet.')) sys.exit(1) profiles_list = vault.load_as_json(utilities.PROFILES_PATH) for curr_profile in profiles_list: if self.options.profile == curr_profile.get('name'): profile_found = True profile_ranges = curr_profile.get('hosts') profile_auths = curr_profile.get('auth') profile_port = curr_profile.get('ssh_port') cred_list = vault.load_as_json(utilities.CREDENTIALS_PATH) for auth in profile_auths: for cred in cred_list: if auth.get('id') == cred.get('id'): profile_auth_list.append(cred) break if not profile_found: print(_("Invalid profile. Create profile first")) sys.exit(1) # log data about current version of rho and system data log.info('Version information - rho: %s; ansible: %s; python: %s.', rho_version, ansible_version, sys.version) log.info( 'System information - platform: ' '%s; machine: %s; processor: %s.', platform.platform(), platform.machine(), platform.processor()) # cache is used when the profile/auth mapping has been previously # used and does not need to be rerun. if not self.options.cache: success_hosts = [] unreachalbe_hosts = [] success_port_map = {} auth_map = {} remaining_hosts = profile_ranges cred_names = [cred.get('name') for cred in profile_auth_list] creds_str = ', '.join(cred_names) log.info( 'Connection discovery will be perform with the following' ' auth credentials: %s', creds_str) print( _('Connection discovery will be perform with the following' ' auth credentials: %s' % (creds_str))) print( _('Note: Any ssh-agent connection setup for a target host ' 'will be used as a fallback if it exists.')) print() for cred_item in profile_auth_list: log.info('Discovery starting with credential %s.', cred_item.get('name')) success_hosts_, success_port_map_, \ auth_map_, remaining_hosts_, unreachalbe_hosts_ = \ host_discovery.create_ping_inventory( vault, vault_pass, remaining_hosts, profile_port, cred_item, forks, self.verbosity) success_hosts = success_hosts + success_hosts_ remaining_hosts = remaining_hosts_ unreachalbe_hosts = unreachalbe_hosts_ # If credential used ssh keyfile then re-process unreachable # systems due to issue #576 if cred_item.get('ssh_key_file'): remaining_hosts = remaining_hosts_ + unreachalbe_hosts_ success_port_map.update(success_port_map_) auth_map.update(auth_map_) log.info('Discovery with credential %s completed.', cred_item.get('name')) if not success_hosts: print(_('All auths are invalid for this profile')) sys.exit(1) num_success = len(success_hosts) num_failed = len(remaining_hosts) num_unreachable = len(unreachalbe_hosts) num_total = num_success + num_failed + num_unreachable if num_failed > 0: with NamedTemporaryFile(mode='w', delete=False) as failed_temp: for failed in remaining_hosts: failed_temp.write(failed + '\n') print( _('Failed to connect to %d systems with all auth ' 'credentials. See the following file "%s" for a ' 'list of the failed systems.' % (num_failed, failed_temp.name))) if self.verbosity: failed_hosts = ', '.join(remaining_hosts) print( _('Failed to connect to the following systems: %s.' % (failed_hosts))) print() log.info('Scan will be performed against %d of %d systems.', num_success, num_total) print( _('Scan will be performed against %d of %d systems.' % (num_success, num_total))) print() _create_hosts_auths_file(auth_map, profile) inventory_scan.create_main_inventory(vault, success_hosts, success_port_map, auth_map, hosts_yml_path) elif os.path.isfile(hosts_yml_path) is False: print("Profile '" + profile + "' has not been processed. " + "Please run without using --cache with the profile first.") sys.exit(1) log.info('Host scan starting for profile %s.', profile) inventory_scan.inventory_scan(hosts_yml_path, self.facts_to_collect, report_path, vault_pass, profile, forks=forks, scan_dirs=self.options.scan_dirs, log_path=self.options.logfile, verbosity=self.verbosity) host_auth_mapping = \ self.options.profile + PROFILE_HOST_AUTH_MAPPING_SUFFIX host_auth_mapping_path = \ utilities.get_config_path(host_auth_mapping) log.info('Host scan completed for profile %s.', profile) print( _("Scanning has completed. The mapping has been" " stored in file '" + host_auth_mapping_path + "'. The facts have been stored in '" + report_path + "'"))
def inventory_scan(hosts_yml_path, facts_to_collect, report_path, vault_pass, base_name, forks=None, scan_dirs=None, log_path=None, verbosity=0): """Run an inventory scan. :param hosts_yml_path: path to an Ansible inventory file to scan. :param facts_to_collect: a list of facts to collect. :param report_path: the path to write a report to. :param vault_pass: the vault password used to protect user data :param base_name: the base name of the output files :param forks: the number of Ansible forks, or None for default. :param scan_dirs: the directories on the remote host to scan, or None for default. :param log_path: path to log to, or None for default. :param verbosity: number of v's of Ansible verbosity. :returns: True if scan completed successfully, False if not. """ hosts_yml = base_name + utilities.PROFILE_HOSTS_SUFIX hosts_yml_path = utilities.get_config_path(hosts_yml) ansible_vars = { 'facts_to_collect': list(facts_to_collect), 'report_path': report_path, 'scan_dirs': ' '.join(scan_dirs or []) } if os.path.isfile(utilities.PLAYBOOK_DEV_PATH): playbook = utilities.PLAYBOOK_DEV_PATH elif os.path.isfile(utilities.PLAYBOOK_RPM_PATH): playbook = utilities.PLAYBOOK_RPM_PATH else: print(_("rho scan playbook not found locally or in '%s'") % playbook) sys.exit(1) cmd_string = ('ansible-playbook {playbook} ' '-i {inventory} -f {forks} ' '--ask-vault-pass ' '--extra-vars \'{vars}\'').format( playbook=playbook, inventory=hosts_yml_path, forks=forks, vars=json.dumps(ansible_vars)) log_path = log_path or utilities.SCAN_LOG_PATH print('Running:', cmd_string) my_env = os.environ.copy() my_env["ANSIBLE_HOST_KEY_CHECKING"] = "False" my_env["ANSIBLE_NOCOLOR"] = "True" process = ansible_utils.run_with_vault(cmd_string, vault_pass, env=my_env, log_path=log_path, log_to_stdout=True, ansible_verbosity=verbosity) return process.exitstatus == 0 and process.signalstatus is None
def inventory_scan(hosts_yml_path, facts_to_collect, report_path, vault_pass, base_name, forks=None, scan_dirs=None, log_path=None, verbosity=0): """Run an inventory scan. :param hosts_yml_path: path to an Ansible inventory file to scan. :param facts_to_collect: a list of facts to collect. :param report_path: the path to write a report to. :param vault_pass: the vault password used to protect user data :param base_name: the base name of the output files :param forks: the number of Ansible forks, or None for default. :param scan_dirs: the directories on the remote host to scan, or None for default. :param log_path: path to log to, or None for default. :param verbosity: number of v's of Ansible verbosity. :returns: True if scan completed successfully, False if not. """ hosts_yml = base_name + utilities.PROFILE_HOSTS_SUFIX hosts_yml_path = utilities.get_config_path(hosts_yml) vault = vault_module.Vault(vault_pass) hosts_dict = vault.load_as_yaml(hosts_yml_path) host_groups = hosts_by_group(hosts_dict) variables_prefix = os.path.join(tempfile.gettempdir(), 'rho-fact-temp-' + str(time.time()) + '-') if os.path.isfile(utilities.PLAYBOOK_DEV_PATH): playbook = utilities.PLAYBOOK_DEV_PATH elif os.path.isfile(utilities.PLAYBOOK_RPM_PATH): playbook = utilities.PLAYBOOK_RPM_PATH else: print(t("rho scan playbook not found locally or in '%s'") % playbook) sys.exit(1) log_path = log_path or utilities.SCAN_LOG_PATH my_env = os.environ.copy() my_env["ANSIBLE_HOST_KEY_CHECKING"] = "False" my_env["ANSIBLE_NOCOLOR"] = "True" facts_out = [] total_hosts_count = 0 for group in host_groups.keys(): hosts = host_groups.get(group, []) total_hosts_count += len(hosts) utilities.log.info('Starting scan of %d systems broken into %d groups.', total_hosts_count, len(host_groups.keys())) print('\nStarting scan of %d systems broken into %d groups.' % (total_hosts_count, len(host_groups.keys()))) for group in host_groups.keys(): variables_path = variables_prefix + group hosts = host_groups.get(group, []) ansible_vars = { 'facts_to_collect': list(facts_to_collect), 'scan_dirs': ' '.join(scan_dirs or []), 'variables_path': variables_path } cmd_string = ('ansible-playbook {playbook} ' '--limit {group},localhost ' '-i {inventory} -f {forks} ' '--ask-vault-pass ' '--extra-vars \'{vars}\'').format( group=group, playbook=playbook, inventory=hosts_yml_path, forks=forks, vars=json.dumps(ansible_vars)) rho_host_scan_timeout = os.getenv('RHO_HOST_SCAN_TIMEOUT', DEFAULT_HOST_SCAN_TIMEOUT) try: rho_host_scan_timeout = int(rho_host_scan_timeout) except ValueError: rho_host_scan_timeout = DEFAULT_HOST_SCAN_TIMEOUT host_scan_timeout = ((len(hosts) // int(forks)) + 1) \ * rho_host_scan_timeout utilities.log.info( 'Starting scan for group "%s" with %d systems' ' with timeout of %d minutes.', group, len(hosts), host_scan_timeout) print('\nStarting scan for group "%s" with %d systems' ' with timeout of %d minutes.\n' % (group, len(hosts), host_scan_timeout)) try: ansible_utils.run_with_vault( cmd_string, vault_pass, env=my_env, log_path=log_path, log_to_stdout=utilities.process_host_scan, ansible_verbosity=verbosity, timeout=host_scan_timeout * 60, print_before_run=True) except ansible_utils.AnsibleProcessException as ex: print( t("An error has occurred during the scan. Please review" + " the output to resolve the given issue: %s" % str(ex))) sys.exit(1) except ansible_utils.AnsibleTimeoutException as ex: utilities.log.warning( 'Scan for group "%s" timed out. Hosts \n' '%s\nwill be skipped. The rest of the scan ' 'is not affected.', group, host_groups[group]) continue if os.path.isfile(variables_path): with open(variables_path, 'r') as variables_file: vars_by_host = {} update_json = json.load(variables_file) for host in hosts: host_facts = update_json.get(host, {}) vars_by_host[host] = host_facts os.remove(variables_path) utilities.log.info('Processing scan data for %d more systems.', len(hosts)) print('\nProcessing scan data for %d more systems.' % (len(hosts))) group_facts = process_host_vars(facts_to_collect, vars_by_host) facts_out += group_facts utilities.log.info('Completed scanning %d systems.', len(facts_out)) print('Completed scanning %d systems.\n' % (len(facts_out))) else: utilities.log.error( 'Error collecting data for group %s.' 'output file %s not found.', group, variables_path) if facts_out == []: print( t("An error has occurred during the scan. " + "No data was collected for any groups. " + "Please review the output to resolve the given issues")) sys.exit(1) write_fact_report(facts_to_collect, facts_out, report_path)
def _do_command(self): # pylint: disable=too-many-locals # pylint: disable=too-many-branches vault, vault_pass = get_vault_and_password(self.options.vaultfile) profile_found = False profile_auth_list = [] profile_ranges = [] profile_port = 22 profile = self.options.profile forks = self.options.ansible_forks \ if self.options.ansible_forks else '50' report_path = os.path.abspath(os.path.normpath( self.options.report_path)) hosts_yml = profile + PROFILE_HOSTS_SUFIX hosts_yml_path = utilities.get_config_path(hosts_yml) # Checks if profile exists and stores information # about that profile for later use. if not os.path.isfile(utilities.PROFILES_PATH): print(_('No profiles exist yet.')) sys.exit(1) if not os.path.isfile(utilities.CREDENTIALS_PATH): print(_('No auth credentials exist yet.')) sys.exit(1) profiles_list = vault.load_as_json(utilities.PROFILES_PATH) for curr_profile in profiles_list: if self.options.profile == curr_profile.get('name'): profile_found = True profile_ranges = curr_profile.get('hosts') profile_auths = curr_profile.get('auth') profile_port = curr_profile.get('ssh_port') cred_list = vault.load_as_json(utilities.CREDENTIALS_PATH) for auth in profile_auths: for cred in cred_list: if auth.get('id') == cred.get('id'): profile_auth_list.append(cred) break if not profile_found: print(_("Invalid profile. Create profile first")) sys.exit(1) # cache is used when the profile/auth mapping has been previously # used and does not need to be rerun. if not self.options.cache: success_hosts = [] success_port_map = {} auth_map = {} remaining_hosts = profile_ranges for cred_item in profile_auth_list: success_hosts_, success_port_map_, \ auth_map_, remaining_hosts_ = \ _create_ping_inventory(vault, vault_pass, remaining_hosts, profile_port, cred_item, forks, self.verbosity) success_hosts = success_hosts + success_hosts_ remaining_hosts = remaining_hosts_ success_port_map.update(success_port_map_) auth_map.update(auth_map_) if not success_hosts: print(_('All auths are invalid for this profile')) sys.exit(1) _create_hosts_auths_file(auth_map, profile) _create_main_inventory(vault, success_hosts, success_port_map, auth_map, profile) elif os.path.isfile(hosts_yml_path): print("Profile '" + profile + "' has not been processed. " + "Please run without using --cache with the profile first.") sys.exit(1) # always output connection.x for key in utilities.CONNECTION_FACTS_TUPLE: if key not in self.facts_to_collect: self.facts_to_collect.append(key) scan_dirs = ' '.join(self.options.scan_dirs) ansible_vars = {'facts_to_collect': self.facts_to_collect, 'report_path': report_path, 'scan_dirs': scan_dirs} playbook = utilities.PLAYBOOK_DEV_PATH if not os.path.isfile(playbook): playbook = utilities.PLAYBOOK_RPM_PATH if not os.path.isfile(playbook): print(_("rho scan playbook not found locally or in '%s'") % playbook) sys.exit(1) cmd_string = ('ansible-playbook {playbook} ' '-i {inventory} -f {forks} ' '--ask-vault-pass ' '--extra-vars \'{vars}\'').format( playbook=playbook, inventory=hosts_yml_path, forks=forks, vars=json.dumps(ansible_vars)) # process finally runs ansible on the # playbook and inventories thus created. if self.options.logfile: log_path = self.options.logfile else: log_path = SCAN_LOG_PATH print('Running:', cmd_string) process = run_ansible_with_vault(cmd_string, vault_pass, log_path=log_path, log_to_stdout=True, ansible_verbosity=self.verbosity) if process.exitstatus == 0 and process.signalstatus is None: host_auth_mapping = \ self.options.profile + PROFILE_HOST_AUTH_MAPPING_SUFFIX host_auth_mapping_path = \ utilities.get_config_path(host_auth_mapping) print(_("Scanning has completed. The mapping has been" " stored in file '" + host_auth_mapping_path + "'. The facts have been stored in '" + report_path + "'")) else: print(_("An error has occurred during the scan. Please review" + " the output to resolve the given issue.")) sys.exit(1)