def _load_dr_config( config_file: FileInstance, ) -> Tuple[ReportItemList, DrConfigFacade]: if not config_file.raw_file.exists(): return [reports.dr_config_does_not_exist()], DrConfigFacade.empty() try: return [], config_file.read_to_facade() except RawFileError as e: return [raw_file_error_report(e)], DrConfigFacade.empty() except ParserErrorException as e: return (config_file.parser_exception_to_report_list(e), DrConfigFacade.empty())
def __init__(self, instance_name, booth_files_data): """ Create a new BoothEnv string|None instance_name -- booth instance name dict booth_files_data -- ghost files (config_data, key_data, key_path) """ if ( "config_data" in booth_files_data and "key_data" not in booth_files_data ): raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentNotConsistent( [file_type_codes.BOOTH_CONFIG], [file_type_codes.BOOTH_KEY], ) ) ) if ( "config_data" not in booth_files_data and "key_data" in booth_files_data ): raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentNotConsistent( [file_type_codes.BOOTH_KEY], [file_type_codes.BOOTH_CONFIG], ) ) ) self._instance_name = instance_name or constants.DEFAULT_INSTANCE_NAME report_list = config_validators.check_instance_name(self._instance_name) if report_list: raise LibraryError(*report_list) self._config_file = FileInstance.for_booth_config( f"{self._instance_name}.conf", **self._init_file_data(booth_files_data, "config_data"), ) self._key_file = FileInstance.for_booth_key( f"{self._instance_name}.key", **self._init_file_data(booth_files_data, "key_data"), ) if isinstance(self._key_file.raw_file, raw_file.GhostFile): self._key_path = booth_files_data.get("key_path", "") else: self._key_path = self._key_file.raw_file.metadata.path
def get_corosync_conf(self) -> CorosyncConfigFacade: # TODO The architecture of working with corosync.conf needs to be # overhauled to match the new file framework. The code below is # complicated, because we read corosync.conf data at one place outside # of the file framework or get it from outside (from CLI) and then we # put them back into the framework. corosync_instance = FileInstance.for_corosync_conf() try: facade = cast( CorosyncConfigFacade, corosync_instance.raw_to_facade( self.get_corosync_conf_data().encode("utf-8")), ) except ParserErrorException as e: if self.report_processor.report_list( corosync_instance.toolbox.parser.exception_to_report_list( e, corosync_instance.toolbox.file_type_code, (corosync_instance.raw_file.metadata.path if self.is_corosync_conf_live else None), force_code=None, is_forced_or_warning=False, )).has_errors: raise LibraryError() from e return facade
def get_authfile_name_and_data(booth_conf_facade): """ Get booth auth filename, content and reports based on booth config facade pcs.lib.booth.config_facade.ConfigFacade booth_conf_facade -- booth config """ authfile_name = None authfile_data = None report_list = [] authfile_path = booth_conf_facade.get_authfile() if authfile_path: authfile_dir, authfile_name = os.path.split(authfile_path) if (authfile_dir == settings.booth_config_dir) and authfile_name: authfile_data = FileInstance.for_booth_key( authfile_name).read_raw() else: authfile_name = None report_list.append( ReportItem.warning( reports.messages.BoothUnsupportedFileLocation( authfile_path, settings.booth_config_dir, file_type_codes.BOOTH_KEY, ))) return authfile_name, authfile_data, report_list
def pull_config(env: LibraryEnvironment, node_name, instance_name=None): """ Get config from specified node and save it on local system. It will rewrite existing files. env string node_name -- name of the node from which the config should be fetched string instance_name -- booth instance name """ report_processor = env.report_processor booth_env = env.get_booth_env(instance_name) instance_name = booth_env.instance_name _ensure_live_env(env, booth_env) conf_dir = os.path.dirname(booth_env.config_path) env.report_processor.report( ReportItem.info( reports.messages.BoothFetchingConfigFromNode( node_name, config=instance_name, ))) com_cmd = BoothGetConfig(env.report_processor, instance_name) com_cmd.set_targets( [env.get_node_target_factory().get_target_from_hostname(node_name)]) # pylint: disable=unsubscriptable-object # In general, pylint is right. And it cannot know in this case code is OK. # It is covered by tests. output = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: # TODO adapt to new file transfer framework once it is written if (output["authfile"]["name"] is not None and output["authfile"]["data"]): authfile_name = output["authfile"]["name"] report_list = config_validators.check_instance_name(authfile_name) if report_list: raise LibraryError(*report_list) booth_key = FileInstance.for_booth_key(authfile_name) booth_key.write_raw( base64.b64decode(output["authfile"]["data"].encode("utf-8")), can_overwrite=True, ) booth_env.config.write_raw(output["config"]["data"].encode("utf-8"), can_overwrite=True) env.report_processor.report( ReportItem.info( reports.messages.BoothConfigAcceptedByNode( name_list=[instance_name]))) except RawFileError as e: if not os.path.exists(conf_dir): report_processor.report( ReportItem.error( reports.messages.BoothPathNotExists(conf_dir))) else: report_processor.report(raw_file_error_report(e)) except KeyError as e: raise LibraryError( ReportItem.error( reports.messages.InvalidResponseFormat(node_name))) from e if report_processor.has_errors: raise LibraryError()
def _load_dr_config( config_file: FileInstance, ) -> Tuple[ReportItemList, DrConfigFacade]: if not config_file.raw_file.exists(): return ( [ ReportItem.error(reports.messages.DrConfigDoesNotExist()), ], DrConfigFacade.empty(), ) try: return [], cast(DrConfigFacade, config_file.read_to_facade()) except RawFileError as e: return [raw_file_error_report(e)], DrConfigFacade.empty() except ParserErrorException as e: return ( config_file.parser_exception_to_report_list(e), DrConfigFacade.empty(), )
def get_local_corosync_conf(): """ Read corosync.conf file from local machine """ # TODO The architecture of working with corosync.conf needs to be # overhauled to match the new file framework. instance = FileInstance.for_corosync_conf() try: return instance.read_raw().decode("utf-8") except RawFileError as e: raise LibraryError(raw_file_error_report(e)) from e
def config_destroy( env: LibraryEnvironment, instance_name=None, ignore_config_load_problems=False, ): # pylint: disable=too-many-branches """ remove booth configuration files env string instance_name -- booth instance name bool ignore_config_load_problems -- delete as much as possible when unable to read booth configs for the given booth instance """ report_processor = env.report_processor booth_env = env.get_booth_env(instance_name) instance_name = booth_env.instance_name _ensure_live_env(env, booth_env) # TODO use constants in reports if resource.find_for_config( get_resources(env.get_cib()), booth_env.config_path, ): report_processor.report( ReportItem.error( reports.messages.BoothConfigIsUsed( instance_name, "in cluster resource", ) ) ) # Only systemd is currently supported. Initd does not supports multiple # instances (here specified by name) if external.is_systemctl(): if external.is_service_running( env.cmd_runner(), "booth", instance_name ): report_processor.report( ReportItem.error( reports.messages.BoothConfigIsUsed( instance_name, "(running in systemd)", ) ) ) if external.is_service_enabled( env.cmd_runner(), "booth", instance_name ): report_processor.report( ReportItem.error( reports.messages.BoothConfigIsUsed( instance_name, "(enabled in systemd)", ) ) ) if report_processor.has_errors: raise LibraryError() try: authfile_path = None booth_conf = booth_env.config.read_to_facade() authfile_path = booth_conf.get_authfile() except RawFileError as e: report_processor.report( raw_file_error_report( e, force_code=report_codes.FORCE_BOOTH_DESTROY, is_forced_or_warning=ignore_config_load_problems, ) ) except ParserErrorException as e: report_processor.report_list( booth_env.config.parser_exception_to_report_list( e, force_code=report_codes.FORCE_BOOTH_DESTROY, is_forced_or_warning=ignore_config_load_problems, ) ) if report_processor.has_errors: raise LibraryError() if authfile_path: authfile_dir, authfile_name = os.path.split(authfile_path) if (authfile_dir == settings.booth_config_dir) and authfile_name: try: key_file = FileInstance.for_booth_key(authfile_name) key_file.raw_file.remove(fail_if_file_not_found=False) except RawFileError as e: report_processor.report( raw_file_error_report( e, force_code=report_codes.FORCE_BOOTH_DESTROY, is_forced_or_warning=ignore_config_load_problems, ) ) else: report_processor.report( ReportItem.warning( reports.messages.BoothUnsupportedFileLocation( authfile_path, settings.booth_config_dir, file_type_codes.BOOTH_KEY, ) ) ) if report_processor.has_errors: raise LibraryError() try: booth_env.config.raw_file.remove() except RawFileError as e: report_processor.report(raw_file_error_report(e)) if report_processor.has_errors: raise LibraryError()
def send_all_config_to_node( communicator, reporter, target_list, rewrite_existing=False, skip_wrong_config=False, ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor target_list list -- list of targets to which configs should be delivered rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ # TODO adapt to new file transfer framework once it is written # TODO the function is not modular enough - it raises LibraryError file_list = [] for conf_file_name in sorted(config_files.get_all_configs_file_names()): config_file = FileInstance.for_booth_config(conf_file_name) try: booth_conf_data = config_file.raw_file.read() ( authfile_name, authfile_data, authfile_report_list, ) = config_files.get_authfile_name_and_data( config_file.raw_to_facade(booth_conf_data)) reporter.report_list(authfile_report_list) file_list.append({ "name": conf_file_name, "data": booth_conf_data.decode("utf-8"), "is_authfile": False, }) if authfile_name and authfile_data: file_list.append({ "name": authfile_name, "data": base64.b64encode(authfile_data).decode("utf-8"), "is_authfile": True, }) except RawFileError as e: reporter.report( raw_file_error_report( e, force_code=report_codes.SKIP_UNREADABLE_CONFIG, is_forced_or_warning=skip_wrong_config, )) except ParserErrorException as e: reporter.report_list( config_file.parser_exception_to_report_list( e, force_code=report_codes.SKIP_UNREADABLE_CONFIG, is_forced_or_warning=skip_wrong_config, )) if reporter.has_errors: raise LibraryError() if not file_list: # no booth configs exist, nothing to be synced return reporter.report( ReportItem.info(reports.messages.BoothConfigDistributionStarted())) com_cmd = BoothSaveFiles(reporter, file_list, rewrite_existing=rewrite_existing) com_cmd.set_targets(target_list) run(communicator, com_cmd) if reporter.has_errors: raise LibraryError()
def _prepare_pacemaker_remote_environment( env, report_processor, existing_nodes_target_list, new_node_target, new_node_name, skip_offline_nodes, allow_incomplete_distribution, allow_fails, ): if new_node_target: com_cmd = GetOnlineTargets( report_processor, ignore_offline_targets=skip_offline_nodes, ) com_cmd.set_targets([new_node_target]) online_new_target_list = run_com(env.get_node_communicator(), com_cmd) if not online_new_target_list and not skip_offline_nodes: raise LibraryError() else: online_new_target_list = [] # check new nodes if online_new_target_list: com_cmd = GetHostInfo(report_processor) com_cmd.set_targets(online_new_target_list) report_processor.report_list( _host_check_remote_node( run_com(env.get_node_communicator(), com_cmd))) if report_processor.has_errors: raise LibraryError() else: report_processor.report_list( _reports_skip_new_node(new_node_name, "unreachable")) # share pacemaker authkey authkey_file = FileInstance.for_pacemaker_key() try: if authkey_file.raw_file.exists(): authkey_content = authkey_file.read_raw() authkey_targets = online_new_target_list else: authkey_content = generate_binary_key( random_bytes_count=settings.pacemaker_authkey_bytes) authkey_targets = (existing_nodes_target_list + online_new_target_list) except RawFileError as e: report_processor.report(raw_file_error_report(e)) if report_processor.has_errors: raise LibraryError() if authkey_targets: com_cmd = DistributeFiles( report_processor, node_communication_format.pcmk_authkey_file(authkey_content), skip_offline_targets=skip_offline_nodes, allow_fails=allow_incomplete_distribution, ) com_cmd.set_targets(authkey_targets) run_and_raise(env.get_node_communicator(), com_cmd) # start and enable pacemaker_remote if online_new_target_list: com_cmd = ServiceAction( report_processor, node_communication_format.create_pcmk_remote_actions([ "start", "enable", ]), allow_fails=allow_fails, ) com_cmd.set_targets(online_new_target_list) run_and_raise(env.get_node_communicator(), com_cmd)
def __init__(self) -> None: self._config_file = FileInstance.for_dr_config()
def set_recovery_site(env: LibraryEnvironment, node_name: str) -> None: """ Set up disaster recovery with the local cluster being the primary site env node_name -- a known host from the recovery site """ # pylint: disable=too-many-locals if env.ghost_file_codes: raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentRequired( env.ghost_file_codes))) report_processor = env.report_processor dr_env = env.get_dr_env() if dr_env.config.raw_file.exists(): report_processor.report( ReportItem.error(reports.messages.DrConfigAlreadyExist())) target_factory = env.get_node_target_factory() local_nodes, report_list = get_existing_nodes_names( env.get_corosync_conf(), error_on_missing_name=True) report_processor.report_list(report_list) if node_name in local_nodes: report_processor.report( ReportItem.error(reports.messages.NodeInLocalCluster(node_name))) report_list, local_targets = target_factory.get_target_list_with_reports( local_nodes, allow_skip=False, report_none_host_found=False) report_processor.report_list(report_list) report_list, remote_targets = target_factory.get_target_list_with_reports( [node_name], allow_skip=False, report_none_host_found=False) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # TODO The new file framework doesn't support network communication yet. com_cmd = GetCorosyncConf(env.report_processor) com_cmd.set_targets(remote_targets) corosync_conf_instance = FileInstance.for_corosync_conf() try: remote_cluster_nodes, report_list = get_existing_nodes_names( cast( CorosyncConfigFacade, corosync_conf_instance.raw_to_facade( run_and_raise(env.get_node_communicator(), com_cmd).encode("utf-8")), ), error_on_missing_name=True, ) except ParserErrorException as e: report_processor.report_list( corosync_conf_instance.toolbox.parser.exception_to_report_list( e, file_type_codes.COROSYNC_CONF, None, force_code=None, is_forced_or_warning=False, )) if report_processor.report_list(report_list).has_errors: raise LibraryError() # ensure we have tokens for all nodes of remote cluster report_list, remote_targets = target_factory.get_target_list_with_reports( remote_cluster_nodes, allow_skip=False, report_none_host_found=False) if report_processor.report_list(report_list).has_errors: raise LibraryError() dr_config_exporter = get_file_toolbox( file_type_codes.PCS_DR_CONFIG).exporter # create dr config for remote cluster remote_dr_cfg = dr_env.create_facade(DrRole.RECOVERY) remote_dr_cfg.add_site(DrRole.PRIMARY, local_nodes) # send config to all node of remote cluster distribute_file_cmd = DistributeFilesWithoutForces( env.report_processor, node_communication_format.pcs_dr_config_file( dr_config_exporter.export(remote_dr_cfg.config)), ) distribute_file_cmd.set_targets(remote_targets) run_and_raise(env.get_node_communicator(), distribute_file_cmd) # create new dr config, with local cluster as primary site local_dr_cfg = dr_env.create_facade(DrRole.PRIMARY) local_dr_cfg.add_site(DrRole.RECOVERY, remote_cluster_nodes) distribute_file_cmd = DistributeFilesWithoutForces( env.report_processor, node_communication_format.pcs_dr_config_file( dr_config_exporter.export(local_dr_cfg.config)), ) distribute_file_cmd.set_targets(local_targets) run_and_raise(env.get_node_communicator(), distribute_file_cmd)
def config_destroy( env: LibraryEnvironment, instance_name: Optional[str] = None, ignore_config_load_problems: bool = False, ) -> None: # pylint: disable=too-many-branches """ remove booth configuration files env instance_name -- booth instance name ignore_config_load_problems -- delete as much as possible when unable to read booth configs for the given booth instance """ report_processor = env.report_processor booth_env = env.get_booth_env(instance_name) found_instance_name = booth_env.instance_name _ensure_live_env(env, booth_env) booth_resource_list = resource.find_for_config( get_resources(env.get_cib()), booth_env.config_path, ) if booth_resource_list: report_processor.report( ReportItem.error( reports.messages.BoothConfigIsUsed( found_instance_name, reports.const.BOOTH_CONFIG_USED_IN_CLUSTER_RESOURCE, resource_name=str(booth_resource_list[0].get("id", "")), ) ) ) # Only systemd is currently supported. Initd does not supports multiple # instances (here specified by name) if is_systemd(env.service_manager): if env.service_manager.is_running("booth", found_instance_name): report_processor.report( ReportItem.error( reports.messages.BoothConfigIsUsed( found_instance_name, reports.const.BOOTH_CONFIG_USED_RUNNING_IN_SYSTEMD, ) ) ) if env.service_manager.is_enabled("booth", found_instance_name): report_processor.report( ReportItem.error( reports.messages.BoothConfigIsUsed( found_instance_name, reports.const.BOOTH_CONFIG_USED_ENABLED_IN_SYSTEMD, ) ) ) if report_processor.has_errors: raise LibraryError() try: authfile_path = None booth_conf = booth_env.config.read_to_facade() authfile_path = booth_conf.get_authfile() except RawFileError as e: report_processor.report( raw_file_error_report( e, force_code=report_codes.FORCE, is_forced_or_warning=ignore_config_load_problems, ) ) except ParserErrorException as e: report_processor.report_list( booth_env.config.parser_exception_to_report_list( e, force_code=report_codes.FORCE, is_forced_or_warning=ignore_config_load_problems, ) ) if report_processor.has_errors: raise LibraryError() if authfile_path: authfile_dir, authfile_name = os.path.split(authfile_path) if (authfile_dir == settings.booth_config_dir) and authfile_name: try: key_file = FileInstance.for_booth_key(authfile_name) key_file.raw_file.remove(fail_if_file_not_found=False) except RawFileError as e: report_processor.report( raw_file_error_report( e, force_code=report_codes.FORCE, is_forced_or_warning=ignore_config_load_problems, ) ) else: report_processor.report( ReportItem.warning( reports.messages.BoothUnsupportedFileLocation( authfile_path, settings.booth_config_dir, file_type_codes.BOOTH_KEY, ) ) ) if report_processor.has_errors: raise LibraryError() try: booth_env.config.raw_file.remove() except RawFileError as e: report_processor.report(raw_file_error_report(e)) if report_processor.has_errors: raise LibraryError()