def _push_corosync_conf_live( self, target_list, corosync_conf_data, need_stopped_cluster, need_qdevice_reload, skip_offline_nodes ): if need_stopped_cluster: com_cmd = CheckCorosyncOffline( self.report_processor, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) com_cmd = DistributeCorosyncConf( self.report_processor, corosync_conf_data, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) if is_service_running(self.cmd_runner(), "corosync"): reload_corosync_config(self.cmd_runner()) self.report_processor.process( reports.corosync_config_reloaded() ) if need_qdevice_reload: self.report_processor.process( reports.qdevice_client_reload_started() ) com_cmd = qdevice.Stop(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) report_list = com_cmd.error_list com_cmd = qdevice.Start(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) report_list += com_cmd.error_list if report_list: raise LibraryError()
def send_all_config_to_node( communicator, reporter, target_list, rewrite_existing=False, skip_wrong_config=False ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor target_list list -- list of targets to which configs should be delivered rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ _reporter = SimpleReportProcessor(reporter) config_dict = booth_conf.read_configs(reporter, skip_wrong_config) if not config_dict: return _reporter.report(reports.booth_config_distribution_started()) file_list = [] for config, config_data in sorted(config_dict.items()): try: authfile_path = config_structure.get_authfile( config_parser.parse(config_data) ) file_list.append({ "name": config, "data": config_data, "is_authfile": False }) if authfile_path: content = booth_conf.read_authfile(reporter, authfile_path) if not content: continue file_list.append({ "name": os.path.basename(authfile_path), "data": base64.b64encode(content).decode("utf-8"), "is_authfile": True }) except LibraryError: _reporter.report(reports.booth_skipping_config( config, "unable to parse config" )) com_cmd = BoothSaveFiles( _reporter, file_list, rewrite_existing=rewrite_existing ) com_cmd.set_targets(target_list) run(communicator, com_cmd) if _reporter.has_errors: raise LibraryError()
def send_all_config_to_node(communicator, reporter, target_list, rewrite_existing=False, skip_wrong_config=False): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor target_list list -- list of targets to which configs should be delivered rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ _reporter = SimpleReportProcessor(reporter) config_dict = booth_conf.read_configs(reporter, skip_wrong_config) if not config_dict: return _reporter.report(reports.booth_config_distribution_started()) file_list = [] for config, config_data in sorted(config_dict.items()): try: authfile_path = config_structure.get_authfile( config_parser.parse(config_data)) file_list.append({ "name": config, "data": config_data, "is_authfile": False }) if authfile_path: content = booth_conf.read_authfile(reporter, authfile_path) if not content: continue file_list.append({ "name": os.path.basename(authfile_path), "data": base64.b64encode(content).decode("utf-8"), "is_authfile": True }) except LibraryError: _reporter.report( reports.booth_skipping_config(config, "unable to parse config")) com_cmd = BoothSaveFiles(_reporter, file_list, rewrite_existing=rewrite_existing) com_cmd.set_targets(target_list) run(communicator, com_cmd) if _reporter.has_errors: raise LibraryError()
def _ensure_can_add_node_to_remote_cluster( env, node_addresses, warn_on_communication_exception=False): report_items = [] com_cmd = PrecheckNewNode( report_items, availability_checker_remote_node, skip_offline_targets=warn_on_communication_exception, ) com_cmd.add_request( env.get_node_target_factory().get_target(node_addresses)) run(env.get_node_communicator(), com_cmd) env.report_processor.process_list(report_items)
def _push_corosync_conf_live( self, target_list, corosync_conf_data, need_stopped_cluster, need_qdevice_reload, skip_offline_nodes, ): # TODO # * check for online nodes and run all commands on them only # * if those commands fail, exit with an error # * add support for allow_skip_offline=False # * use simple report procesor # Correct reloading is done in pcs.lib.cluster.remove_nodes for example. # Check if the cluster is stopped when needed if need_stopped_cluster: com_cmd = CheckCorosyncOffline( self.report_processor, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Distribute corosync.conf com_cmd = DistributeCorosyncConf( self.report_processor, corosync_conf_data, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload corosync if not need_stopped_cluster: # If cluster must be stopped then we cannot reload corosync because # the cluster is stopped. If it is not stopped, we do not even get # here. com_cmd = ReloadCorosyncConf(self.report_processor) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload qdevice if needed if need_qdevice_reload: self.report_processor.report( ReportItem.info(reports.messages.QdeviceClientReloadStarted()) ) com_cmd = qdevice.Stop(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) has_errors = com_cmd.has_errors com_cmd = qdevice.Start(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) has_errors = has_errors or com_cmd.has_errors if has_errors: raise LibraryError()
def _push_corosync_conf_live( self, target_list, corosync_conf_data, need_stopped_cluster, need_qdevice_reload, skip_offline_nodes, ): # Check if the cluster is stopped when needed if need_stopped_cluster: com_cmd = CheckCorosyncOffline(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Distribute corosync.conf com_cmd = DistributeCorosyncConf(self.report_processor, corosync_conf_data, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload corosync if not need_stopped_cluster: # If cluster must be stopped then we cannot reload corosync because # the cluster is stopped. If it is not stopped, we do not even get # here. com_cmd = ReloadCorosyncConf(self.report_processor) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload qdevice if needed if need_qdevice_reload: self.report_processor.report( ReportItem.info(reports.messages.QdeviceClientReloadStarted())) com_cmd = qdevice.Stop(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) has_errors = com_cmd.has_errors com_cmd = qdevice.Start(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) has_errors = has_errors or com_cmd.has_errors if has_errors: raise LibraryError()
def _push_corosync_conf_live( self, target_list, corosync_conf_data, need_stopped_cluster, need_qdevice_reload, skip_offline_nodes ): # Check if the cluster is stopped when needed if need_stopped_cluster: com_cmd = CheckCorosyncOffline( self.report_processor, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Distribute corosync.conf com_cmd = DistributeCorosyncConf( self.report_processor, corosync_conf_data, skip_offline_nodes ) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload corosync if not need_stopped_cluster: # If cluster must be stopped then we cannot reload corosync because # the cluster is stopped. If it is not stopped, we do not even get # here. com_cmd = ReloadCorosyncConf(self.report_processor) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload qdevice if needed if need_qdevice_reload: self.report_processor.process( reports.qdevice_client_reload_started() ) com_cmd = qdevice.Stop(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) report_list = com_cmd.error_list com_cmd = qdevice.Start(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) report_list += com_cmd.error_list if report_list: raise LibraryError()
def send_all_config_to_node( communicator, reporter, target_list, rewrite_existing=False, skip_wrong_config=False, ): """ Send all booth configs from default booth config directory and theri authfiles to specified node. communicator -- NodeCommunicator reporter -- report processor target_list list -- list of targets to which configs should be delivered rewrite_existing -- if True rewrite existing file skip_wrong_config -- if True skip local configs that are unreadable """ # TODO adapt to new file transfer framework once it is written # TODO the function is not modular enough - it raises LibraryError file_list = [] for conf_file_name in sorted(config_files.get_all_configs_file_names()): config_file = FileInstance.for_booth_config(conf_file_name) try: booth_conf_data = config_file.raw_file.read() ( authfile_name, authfile_data, authfile_report_list, ) = config_files.get_authfile_name_and_data( config_file.raw_to_facade(booth_conf_data)) reporter.report_list(authfile_report_list) file_list.append({ "name": conf_file_name, "data": booth_conf_data.decode("utf-8"), "is_authfile": False, }) if authfile_name and authfile_data: file_list.append({ "name": authfile_name, "data": base64.b64encode(authfile_data).decode("utf-8"), "is_authfile": True, }) except RawFileError as e: reporter.report( raw_file_error_report( e, force_code=report_codes.SKIP_UNREADABLE_CONFIG, is_forced_or_warning=skip_wrong_config, )) except ParserErrorException as e: reporter.report_list( config_file.parser_exception_to_report_list( e, force_code=report_codes.SKIP_UNREADABLE_CONFIG, is_forced_or_warning=skip_wrong_config, )) if reporter.has_errors: raise LibraryError() if not file_list: # no booth configs exist, nothing to be synced return reporter.report( ReportItem.info(reports.messages.BoothConfigDistributionStarted())) com_cmd = BoothSaveFiles(reporter, file_list, rewrite_existing=rewrite_existing) com_cmd.set_targets(target_list) run(communicator, com_cmd) if reporter.has_errors: raise LibraryError()