def get_local_node_status(runner): try: cluster_status = ClusterState(get_cluster_status_xml(runner)) except CrmMonErrorException: return {"offline": True} node_name = get_local_node_name(runner) for node_status in cluster_status.node_section.nodes: if node_status.attrs.name == node_name: result = { "offline": False, } for attr in ( 'id', 'name', 'type', 'online', 'standby', 'standby_onfail', 'maintenance', 'pending', 'unclean', 'shutdown', 'expected_up', 'is_dc', 'resources_running', ): result[attr] = getattr(node_status.attrs, attr) return result raise LibraryError(reports.node_not_found(node_name))
def _get_full_watchdog_list(node_list, default_watchdog, watchdog_dict): """ Validate if all nodes in watchdog_dict does exist and returns dictionary where keys are nodes and value is corresponding watchdog. Raises LibraryError if any of nodes doesn't belong to cluster. node_list -- NodeAddressesList default_watchdog -- watchdog for nodes which are not specified in watchdog_dict watchdog_dict -- dictionary with node names as keys and value as watchdog """ full_dict = dict([(node, default_watchdog) for node in node_list]) report_item_list = [] for node_name, watchdog in watchdog_dict.items(): if not watchdog or not os.path.isabs(watchdog): report_item_list.append(reports.invalid_watchdog_path(watchdog)) continue try: full_dict[node_list.find_by_label(node_name)] = watchdog except NodeNotFound: report_item_list.append(reports.node_not_found(node_name)) if report_item_list: raise LibraryError(*report_item_list) return full_dict
def remove_nodes(nodes_names_to_remove, existing_nodes, quorum_device_settings): """ Validate removing nodes iterable nodes_names_to_remove -- list of names of nodes to remove iterable existing_nodes -- list of all existing nodes tuple quorum_device_settings -- output of get_quorum_device_settings """ existing_node_names = [node.name for node in existing_nodes] report_items = [] for node in set(nodes_names_to_remove) - set(existing_node_names): report_items.append(reports.node_not_found(node)) if len(set(existing_node_names) - set(nodes_names_to_remove)) == 0: report_items.append(reports.cannot_remove_all_cluster_nodes()) qdevice_model, qdevice_model_options, _, _ = quorum_device_settings if qdevice_model == "net": tie_breaker_nodeid = qdevice_model_options.get("tie_breaker") if tie_breaker_nodeid not in [None, "lowest", "highest"]: for node in existing_nodes: if ( node.name in nodes_names_to_remove and # "4" != 4, convert ids to string to detect a match for sure str(node.nodeid) == str(tie_breaker_nodeid) ): report_items.append( reports.node_used_as_tie_breaker(node.name, node.nodeid) ) return report_items
def _ensure_node_exists(tree, node_name, state_nodes=None): """ Make sure node with specified name exists in the tree. If the node doesn't exist, raise LibraryError. If state_nodes is provided and contains state of a node with the specified name, create the node in the tree. Return existing or created node element. etree tree -- node parent element string name -- node name iterable state_nodes -- optional list of node state objects """ node_el = _get_node_by_uname(tree, node_name) if node_el is None and state_nodes: for node_state in state_nodes: if node_state.attrs.name == node_name: node_el = _create_node( tree, node_state.attrs.id, node_state.attrs.name, node_state.attrs.type ) break if node_el is None: raise LibraryError(reports.node_not_found(node_name)) return node_el
def _find_resources_to_remove( cib, report_processor: ReportProcessor, node_type, node_identifier, allow_remove_multiple_nodes, find_resources ): resource_element_list = find_resources(get_resources(cib), node_identifier) if not resource_element_list: raise LibraryError(reports.node_not_found(node_identifier, node_type)) if len(resource_element_list) > 1: if report_processor.report( reports.get_problem_creator( report_codes.FORCE_REMOVE_MULTIPLE_NODES, allow_remove_multiple_nodes )( reports.multiple_result_found, "resource", [resource.attrib["id"] for resource in resource_element_list], node_identifier ) ).has_errors: raise LibraryError() return resource_element_list
def _verify_node_name(node, existing_nodes): report_list = [] if node not in existing_nodes: report_list.append( reports.node_not_found( node, forceable=report_codes.FORCE_NODE_DOES_NOT_EXIST)) return report_list
def test_invalid_target(self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, mock_append): mock_val_target.side_effect = ( lambda reporter, status_nodes, target_type, target_value, force: reporter.append(reports.node_not_found(target_value))) self.assert_called_invalid(mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, mock_append, dupl_called=False)
def _set_instance_attrs_node_list(lib_env, attrs, node_names, wait): with cib_runner_nodes(lib_env, wait) as (cib, dummy_runner, state_nodes): known_nodes = [node.attrs.name for node in state_nodes] report = [] for node in node_names: if node not in known_nodes: report.append(reports.node_not_found(node)) if report: raise LibraryError(*report) for node in node_names: update_node_instance_attrs(cib, node, attrs, state_nodes)
def test_invalid_target( self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, mock_append ): mock_val_target.side_effect = ( lambda reporter, status_nodes, target_type, target_value, force: reporter.add( reports.node_not_found(target_value) ) ) self.assert_called_invalid( mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, mock_append, dupl_called=False )
def _check_node_names_in_cluster(node_list, node_name_list): """ Check whenever all node names from node_name_list exists in node_list. Returns list of ReportItem node_list -- NodeAddressesList node_name_list -- list of stings """ not_existing_node_set = set() for node_name in node_name_list: try: node_list.find_by_label(node_name) except NodeNotFound: not_existing_node_set.add(node_name) return [reports.node_not_found(node) for node in not_existing_node_set]
def test_invalid_target(self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, mock_append): mock_val_target.return_value = [ reports.node_not_found(self.target_value) ] report_list = [ fixture.error( report_codes.NODE_NOT_FOUND, node=self.target_value, searched_types=[], ), ] self.assert_called_invalid(mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, mock_append, dupl_called=False, report_list=report_list)
def _validate_target_valuewise(reporter, cluster_status_nodes, target_type, target_value, force_node=False, allow_force=True): if target_type == TARGET_TYPE_NODE: node_found = False for node in cluster_status_nodes: if target_value == node.attrs.name: node_found = True break if not node_found: reporter.add( reports.node_not_found( target_value, ReportItemSeverity.WARNING if force_node and allow_force else ReportItemSeverity.ERROR, None if force_node or not allow_force else report_codes.FORCE_NODE_DOES_NOT_EXIST))
def get_local_node_status(runner): try: cluster_status = ClusterState(get_cluster_status_xml(runner)) except CrmMonErrorException: return {"offline": True} node_name = __get_local_node_name(runner) for node_status in cluster_status.node_section.nodes: if node_status.attrs.name == node_name: result = { "offline": False, } for attr in ( 'id', 'name', 'type', 'online', 'standby', 'standby_onfail', 'maintenance', 'pending', 'unclean', 'shutdown', 'expected_up', 'is_dc', 'resources_running', ): result[attr] = getattr(node_status.attrs, attr) return result raise LibraryError(reports.node_not_found(node_name))
def _validate_target_valuewise(cluster_status_nodes, target_type, target_value, force_node=False, allow_force=True) -> ReportItemList: report_list: ReportItemList = [] if target_type == TARGET_TYPE_NODE: node_found = False for node in cluster_status_nodes: if target_value == node.attrs.name: node_found = True break if not node_found: report_list.append( reports.node_not_found( target_value, severity=ReportItemSeverity.WARNING if force_node and allow_force else ReportItemSeverity.ERROR, forceable=None if force_node or not allow_force else report_codes.FORCE_NODE_DOES_NOT_EXIST)) return report_list
def _validate_target_valuewise( reporter, cluster_status_nodes, target_type, target_value, force_node=False, allow_force=True ): if target_type == TARGET_TYPE_NODE: node_found = False for node in cluster_status_nodes: if target_value == node.attrs.name: node_found = True break if not node_found: reporter.add( reports.node_not_found( target_value, ReportItemSeverity.WARNING if force_node and allow_force else ReportItemSeverity.ERROR , None if force_node or not allow_force else report_codes.FORCE_NODE_DOES_NOT_EXIST ) )
def __nodes_standby_unstandby( runner, standby=True, node_list=None, all_nodes=False ): if node_list or all_nodes: # TODO once we switch to editing CIB instead of running crm_stanby, we # cannot always relly on getClusterState. If we're not editing a CIB # from a live cluster, there is no status. state = ClusterState(get_cluster_status_xml(runner)).node_section.nodes known_nodes = [node.attrs.name for node in state] if all_nodes: node_list = known_nodes elif node_list: report = [] for node in node_list: if node not in known_nodes: report.append(reports.node_not_found(node)) if report: raise LibraryError(*report) # TODO Edit CIB directly instead of running commands for each node; be aware # remote nodes might not be in the CIB yet so we need to put them there. cmd_template = [__exec("crm_standby")] cmd_template.extend(["-v", "on"] if standby else ["-D"]) cmd_list = [] if node_list: for node in node_list: cmd_list.append(cmd_template + ["-N", node]) else: cmd_list.append(cmd_template) report = [] for cmd in cmd_list: stdout, stderr, retval = runner.run(cmd) if retval != 0: report.append( reports.common_error(join_multilines([stderr, stdout])) ) if report: raise LibraryError(*report)
def _find_resources_to_remove( cib, report_processor, node_type, node_identifier, allow_remove_multiple_nodes, find_resources ): resource_element_list = find_resources(get_resources(cib), node_identifier) if not resource_element_list: raise LibraryError(reports.node_not_found(node_identifier, node_type)) if len(resource_element_list) > 1: report_processor.process( reports.get_problem_creator( report_codes.FORCE_REMOVE_MULTIPLE_NODES, allow_remove_multiple_nodes )( reports.multiple_result_found, "resource", [resource.attrib["id"] for resource in resource_element_list], node_identifier ) ) return resource_element_list
def enable_sbd( lib_env, default_watchdog, watchdog_dict, sbd_options, default_device_list=None, node_device_dict=None, allow_unknown_opts=False, ignore_offline_nodes=False, ): """ Enable SBD on all nodes in cluster. lib_env -- LibraryEnvironment default_watchdog -- watchdog for nodes which are not specified in watchdog_dict. Uses default value from settings if None. watchdog_dict -- dictionary with node names as keys and watchdog path as value sbd_options -- dictionary in format: <SBD config option>: <value> default_device_list -- list of devices for all nodes node_device_dict -- dictionary with node names as keys and list of devices as value allow_unknown_opts -- if True, accept also unknown options. ignore_offline_nodes -- if True, omit offline nodes """ corosync_conf = lib_env.get_corosync_conf() node_list = corosync_conf.get_nodes_names() target_list = lib_env.get_node_target_factory().get_target_list( node_list, skip_non_existing=ignore_offline_nodes, ) using_devices = not (default_device_list is None and node_device_dict is None) if default_device_list is None: default_device_list = [] if node_device_dict is None: node_device_dict = {} if not default_watchdog: default_watchdog = settings.sbd_watchdog_default sbd_options = dict([(opt.upper(), val) for opt, val in sbd_options.items()]) full_watchdog_dict = _get_full_target_dict(target_list, watchdog_dict, default_watchdog) full_device_dict = _get_full_target_dict(target_list, node_device_dict, default_device_list) lib_env.report_processor.process_list([ reports.node_not_found(node) for node in (set(list(watchdog_dict.keys()) + list(node_device_dict.keys())) - set(node_list)) ] + _validate_watchdog_dict(full_watchdog_dict) + ( sbd.validate_nodes_devices(full_device_dict) if using_devices else [] ) + _validate_sbd_options(sbd_options, allow_unknown_opts)) com_cmd = GetOnlineTargets( lib_env.report_processor, ignore_offline_targets=ignore_offline_nodes, ) com_cmd.set_targets(target_list) online_targets = run_and_raise(lib_env.get_node_communicator(), com_cmd) # check if SBD can be enabled com_cmd = CheckSbd(lib_env.report_processor) for target in online_targets: com_cmd.add_request( target, full_watchdog_dict[target.label], full_device_dict[target.label] if using_devices else [], ) run_and_raise(lib_env.get_node_communicator(), com_cmd) # enable ATB if neede if not using_devices: if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf): lib_env.report_processor.process( reports.corosync_quorum_atb_will_be_enabled_due_to_sbd()) corosync_conf.set_quorum_options({"auto_tie_breaker": "1"}) lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes) # distribute SBD configuration config = sbd.get_default_sbd_config() config.update(sbd_options) com_cmd = SetSbdConfig(lib_env.report_processor) for target in online_targets: com_cmd.add_request( target, sbd.create_sbd_config(config, target.label, full_watchdog_dict[target.label], full_device_dict[target.label])) run_and_raise(lib_env.get_node_communicator(), com_cmd) # remove cluster prop 'stonith_watchdog_timeout' com_cmd = RemoveStonithWatchdogTimeout(lib_env.report_processor) com_cmd.set_targets(online_targets) run_and_raise(lib_env.get_node_communicator(), com_cmd) # enable SBD service an all nodes com_cmd = EnableSbdService(lib_env.report_processor) com_cmd.set_targets(online_targets) run_and_raise(lib_env.get_node_communicator(), com_cmd) lib_env.report_processor.process( reports.cluster_restart_required_to_apply_changes())
def enable_sbd( lib_env, default_watchdog, watchdog_dict, sbd_options, default_device_list=None, node_device_dict=None, allow_unknown_opts=False, ignore_offline_nodes=False, no_watchdog_validation=False, allow_invalid_option_values=False, ): # pylint: disable=too-many-arguments, too-many-locals """ Enable SBD on all nodes in cluster. lib_env -- LibraryEnvironment default_watchdog -- watchdog for nodes which are not specified in watchdog_dict. Uses default value from settings if None. watchdog_dict -- dictionary with node names as keys and watchdog path as value sbd_options -- dictionary in format: <SBD config option>: <value> default_device_list -- list of devices for all nodes node_device_dict -- dictionary with node names as keys and list of devices as value allow_unknown_opts -- if True, accept also unknown options. ignore_offline_nodes -- if True, omit offline nodes no_watchdog_validation -- it True, do not validate existance of a watchdog on the nodes allow_invalid_option_values -- if True, invalid values of some options will be treated as warning instead of errors """ using_devices = not ( default_device_list is None and node_device_dict is None ) if default_device_list is None: default_device_list = [] if node_device_dict is None: node_device_dict = {} if not default_watchdog: default_watchdog = settings.sbd_watchdog_default sbd_options = {opt.upper(): val for opt, val in sbd_options.items()} corosync_conf = lib_env.get_corosync_conf() node_list, get_nodes_report_list = get_existing_nodes_names(corosync_conf) if not node_list: get_nodes_report_list.append(reports.corosync_config_no_nodes_defined()) target_list = lib_env.get_node_target_factory().get_target_list( node_list, skip_non_existing=ignore_offline_nodes, ) full_watchdog_dict = _get_full_target_dict( target_list, watchdog_dict, default_watchdog ) full_device_dict = _get_full_target_dict( target_list, node_device_dict, default_device_list ) lib_env.report_processor.process_list( get_nodes_report_list + [ reports.node_not_found(node) for node in ( set(list(watchdog_dict.keys()) + list(node_device_dict.keys())) - set(node_list) ) ] + _validate_watchdog_dict(full_watchdog_dict) + (sbd.validate_nodes_devices(full_device_dict) if using_devices else []) + _validate_sbd_options( sbd_options, allow_unknown_opts, allow_invalid_option_values ) ) com_cmd = GetOnlineTargets( lib_env.report_processor, ignore_offline_targets=ignore_offline_nodes, ) com_cmd.set_targets(target_list) online_targets = run_and_raise(lib_env.get_node_communicator(), com_cmd) # check if SBD can be enabled if no_watchdog_validation: lib_env.report_processor.report( reports.sbd_watchdog_validation_inactive() ) com_cmd = CheckSbd(lib_env.report_processor) for target in online_targets: com_cmd.add_request( target, ( # Do not send watchdog if validation is turned off. Listing of # available watchdogs in pcsd may restart the machine in some # corner cases. "" if no_watchdog_validation else full_watchdog_dict[target.label] ), full_device_dict[target.label] if using_devices else [], ) run_and_raise(lib_env.get_node_communicator(), com_cmd) # enable ATB if needed if not using_devices: if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf): lib_env.report_processor.process( reports.corosync_quorum_atb_will_be_enabled_due_to_sbd() ) corosync_conf.set_quorum_options({"auto_tie_breaker": "1"}) lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes) # distribute SBD configuration config = sbd.get_default_sbd_config() config.update(sbd_options) com_cmd = SetSbdConfig(lib_env.report_processor) for target in online_targets: com_cmd.add_request( target, sbd.create_sbd_config( config, target.label, full_watchdog_dict[target.label], full_device_dict[target.label] ) ) run_and_raise(lib_env.get_node_communicator(), com_cmd) # remove cluster prop 'stonith_watchdog_timeout' com_cmd = RemoveStonithWatchdogTimeout(lib_env.report_processor) com_cmd.set_targets(online_targets) run_and_raise(lib_env.get_node_communicator(), com_cmd) # enable SBD service an all nodes com_cmd = EnableSbdService(lib_env.report_processor) com_cmd.set_targets(online_targets) run_and_raise(lib_env.get_node_communicator(), com_cmd) lib_env.report_processor.process( reports.cluster_restart_required_to_apply_changes() )
def location_add(lib, argv, modifiers): """ Options: * --force - allow unknown options, allow constraint for any resource type * -f - CIB file """ del lib modifiers.ensure_only_supported("--force", "-f") if len(argv) < 4: raise CmdLineInputError() constraint_id = argv.pop(0) rsc_type, rsc_value = parse_args.parse_typed_arg( argv.pop(0), [RESOURCE_TYPE_RESOURCE, RESOURCE_TYPE_REGEXP], RESOURCE_TYPE_RESOURCE) node = argv.pop(0) score = argv.pop(0) options = [] # For now we only allow setting resource-discovery if argv: for arg in argv: if '=' in arg: options.append(arg.split('=', 1)) else: raise CmdLineInputError(f"bad option '{arg}'") if (options[-1][0] != "resource-discovery" and not modifiers.get("--force")): utils.err("bad option '%s', use --force to override" % options[-1][0]) id_valid, id_error = utils.validate_xml_id(constraint_id, 'constraint id') if not id_valid: utils.err(id_error) if not utils.is_score(score): utils.err("invalid score '%s', use integer or INFINITY or -INFINITY" % score) required_version = None if [x for x in options if x[0] == "resource-discovery"]: required_version = 2, 2, 0 if rsc_type == RESOURCE_TYPE_REGEXP: required_version = 2, 6, 0 if required_version: dom = utils.cluster_upgrade_to_version(required_version) else: dom = utils.get_cib_dom() if rsc_type == RESOURCE_TYPE_RESOURCE: rsc_valid, rsc_error, dummy_correct_id = ( utils.validate_constraint_resource(dom, rsc_value)) if not rsc_valid: utils.err(rsc_error) # Verify that specified node exists in the cluster if not (modifiers.is_specified("-f") or modifiers.get("--force")): lib_env = utils.get_lib_env() existing_nodes, report_list = get_existing_nodes_names( corosync_conf=lib_env.get_corosync_conf(), cib=lib_env.get_cib(), ) if node not in existing_nodes: report_list.append( reports.node_not_found( node, forceable=report_codes.FORCE_NODE_DOES_NOT_EXIST)) if report_list: process_library_reports(report_list) else: warn(LOCATION_NODE_VALIDATION_SKIP_MSG) # Verify current constraint doesn't already exist # If it does we replace it with the new constraint dummy_dom, constraintsElement = getCurrentConstraints(dom) elementsToRemove = [] # If the id matches, or the rsc & node match, then we replace/remove for rsc_loc in constraintsElement.getElementsByTagName('rsc_location'): # pylint: disable=too-many-boolean-expressions if (rsc_loc.getAttribute("id") == constraint_id or (rsc_loc.getAttribute("node") == node and ((RESOURCE_TYPE_RESOURCE == rsc_type and rsc_loc.getAttribute("rsc") == rsc_value) or (RESOURCE_TYPE_REGEXP == rsc_type and rsc_loc.getAttribute("rsc-pattern") == rsc_value)))): elementsToRemove.append(rsc_loc) for etr in elementsToRemove: constraintsElement.removeChild(etr) element = dom.createElement("rsc_location") element.setAttribute("id", constraint_id) if rsc_type == RESOURCE_TYPE_RESOURCE: element.setAttribute("rsc", rsc_value) elif rsc_type == RESOURCE_TYPE_REGEXP: element.setAttribute("rsc-pattern", rsc_value) element.setAttribute("node", node) element.setAttribute("score", score) for option in options: element.setAttribute(option[0], option[1]) constraintsElement.appendChild(element) utils.replace_cib_configuration(dom)
def enable_sbd( lib_env, default_watchdog, watchdog_dict, sbd_options, default_device_list=None, node_device_dict=None, allow_unknown_opts=False, ignore_offline_nodes=False, no_watchdog_validation=False, allow_invalid_option_values=False, ): # pylint: disable=too-many-arguments, too-many-locals """ Enable SBD on all nodes in cluster. lib_env -- LibraryEnvironment default_watchdog -- watchdog for nodes which are not specified in watchdog_dict. Uses default value from settings if None. watchdog_dict -- dictionary with node names as keys and watchdog path as value sbd_options -- dictionary in format: <SBD config option>: <value> default_device_list -- list of devices for all nodes node_device_dict -- dictionary with node names as keys and list of devices as value allow_unknown_opts -- if True, accept also unknown options. ignore_offline_nodes -- if True, omit offline nodes no_watchdog_validation -- it True, do not validate existance of a watchdog on the nodes allow_invalid_option_values -- if True, invalid values of some options will be treated as warning instead of errors """ using_devices = not (default_device_list is None and node_device_dict is None) if default_device_list is None: default_device_list = [] if node_device_dict is None: node_device_dict = {} if not default_watchdog: default_watchdog = settings.sbd_watchdog_default sbd_options = {opt.upper(): val for opt, val in sbd_options.items()} corosync_conf = lib_env.get_corosync_conf() node_list, get_nodes_report_list = get_existing_nodes_names(corosync_conf) if not node_list: get_nodes_report_list.append( reports.corosync_config_no_nodes_defined()) target_list = lib_env.get_node_target_factory().get_target_list( node_list, skip_non_existing=ignore_offline_nodes, ) full_watchdog_dict = _get_full_target_dict(target_list, watchdog_dict, default_watchdog) full_device_dict = _get_full_target_dict(target_list, node_device_dict, default_device_list) if lib_env.report_processor.report_list( get_nodes_report_list + [ reports.node_not_found(node) for node in (set(list(watchdog_dict.keys()) + list(node_device_dict.keys())) - set(node_list)) ] + _validate_watchdog_dict(full_watchdog_dict) + (sbd.validate_nodes_devices(full_device_dict) if using_devices else [] ) + _validate_sbd_options(sbd_options, allow_unknown_opts, allow_invalid_option_values)).has_errors: raise LibraryError() com_cmd = GetOnlineTargets( lib_env.report_processor, ignore_offline_targets=ignore_offline_nodes, ) com_cmd.set_targets(target_list) online_targets = run_and_raise(lib_env.get_node_communicator(), com_cmd) # check if SBD can be enabled if no_watchdog_validation: lib_env.report_processor.report( reports.sbd_watchdog_validation_inactive()) com_cmd = CheckSbd(lib_env.report_processor) for target in online_targets: com_cmd.add_request( target, ( # Do not send watchdog if validation is turned off. Listing of # available watchdogs in pcsd may restart the machine in some # corner cases. "" if no_watchdog_validation else full_watchdog_dict[target.label]), full_device_dict[target.label] if using_devices else [], ) run_and_raise(lib_env.get_node_communicator(), com_cmd) # enable ATB if needed if not using_devices: if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf): lib_env.report_processor.report( reports.corosync_quorum_atb_will_be_enabled_due_to_sbd()) corosync_conf.set_quorum_options({"auto_tie_breaker": "1"}) lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes) # distribute SBD configuration config = sbd.get_default_sbd_config() config.update(sbd_options) com_cmd = SetSbdConfig(lib_env.report_processor) for target in online_targets: com_cmd.add_request( target, sbd.create_sbd_config(config, target.label, full_watchdog_dict[target.label], full_device_dict[target.label])) run_and_raise(lib_env.get_node_communicator(), com_cmd) # remove cluster prop 'stonith_watchdog_timeout' com_cmd = RemoveStonithWatchdogTimeout(lib_env.report_processor) com_cmd.set_targets(online_targets) run_and_raise(lib_env.get_node_communicator(), com_cmd) # enable SBD service an all nodes com_cmd = EnableSbdService(lib_env.report_processor) com_cmd.set_targets(online_targets) run_and_raise(lib_env.get_node_communicator(), com_cmd) lib_env.report_processor.report( reports.cluster_restart_required_to_apply_changes())