def add_resource(bundle_element, primitive_element): """ Add an existing resource to an existing bundle etree bundle_element -- where to add the resource to etree primitive_element -- the resource to be added to the bundle """ # TODO possibly split to 'validate' and 'do' functions # a bundle may currently contain at most one primitive resource inner_primitive = bundle_element.find(TAG_PRIMITIVE) if inner_primitive is not None: raise LibraryError( ReportItem.error( reports.messages.ResourceBundleAlreadyContainsAResource( bundle_element.get("id"), inner_primitive.get("id"), ) ) ) bundle_element.append(primitive_element)
def remove(env: LibraryEnvironment, tag_list: Iterable[str]) -> None: """ Remove specified tags from a cib. env -- provides all for communication with externals tag_list -- list of tags for the removal """ with cib_tags_section(env) as tags_section: env.report_processor.report_list( tag.validate_remove_tag( get_constraints(get_root(tags_section)), tag_list, ) ) tag_elements, report_list = tag.find_tag_elements_by_ids( tags_section, tag_list, ) if env.report_processor.report_list(report_list).has_errors: raise LibraryError() tag.remove_tag(tag_elements)
def add_recipient( lib_env: LibraryEnvironment, alert_id, recipient_value, instance_attribute_dict, meta_attribute_dict, recipient_id=None, description=None, allow_same_value=False, ): """ Add new recipient to alert witch id alert_id. lib_env -- LibraryEnvironment alert_id -- id of alert to which new recipient should be added recipient_value -- value of new recipient instance_attribute_dict -- dictionary of instance attributes to update meta_attribute_dict -- dictionary of meta attributes to update recipient_id -- id of new recipient, if None it will be generated description -- recipient description allow_same_value -- if True unique recipient value is not required """ if not recipient_value: raise LibraryError( ReportItem.error( reports.messages.RequiredOptionsAreMissing(["value"]))) cib = lib_env.get_cib() id_provider = IdProvider(cib) recipient = alert.add_recipient( lib_env.report_processor, cib, alert_id, recipient_value, recipient_id=recipient_id, description=description, allow_same_value=allow_same_value, ) arrange_first_instance_attributes(recipient, instance_attribute_dict, id_provider) arrange_first_meta_attributes(recipient, meta_attribute_dict, id_provider) lib_env.push_cib()
def _upgrade_cib(runner): """ Upgrade CIB to the latest schema available locally or clusterwise. CommandRunner runner """ stdout, stderr, retval = runner.run( [__exec("cibadmin"), "--upgrade", "--force"] ) # If we are already on the latest schema available, cibadmin exits with 0. # That is fine. We do not know here what version is required anyway. The # caller knows that and is responsible for dealing with it. if retval != 0: raise LibraryError( ReportItem.error( reports.messages.CibUpgradeFailed( join_multilines([stderr, stdout]) ) ) )
def initialize_block_devices( report_processor: reports.ReportProcessor, cmd_runner, device_list, option_dict, ): """ Initialize devices with specified options in option_dict. Raise LibraryError on failure. report_processor -- report processor cmd_runner -- CommandRunner device_list -- list of strings option_dict -- dictionary of options and their values """ report_processor.report( reports.ReportItem.info( reports.messages.SbdDeviceInitializationStarted(device_list) ) ) cmd = [settings.sbd_binary] for device in device_list: cmd += ["-d", device] for option, value in sorted(option_dict.items()): cmd += [DEVICE_INITIALIZATION_OPTIONS_MAPPING[option], str(value)] cmd.append("create") _, std_err, ret_val = cmd_runner.run(cmd) if ret_val != 0: raise LibraryError( reports.ReportItem.error( reports.messages.SbdDeviceInitializationError( device_list, std_err ) ) ) report_processor.report( reports.ReportItem.info( reports.messages.SbdDeviceInitializationSuccess(device_list) ) )
def prepare_options_with_set(cib, options, resource_set_list): options = constraint.prepare_options( tuple(ATTRIB.keys()), options, create_id_fn=partial( constraint.create_id, cib, "ticket", resource_set_list ), validate_id=partial(tools.check_new_id_applicable, cib, DESCRIPTION), ) report_list = _validate_options_common(options) if "ticket" not in options or not options["ticket"].strip(): report_list.append( ReportItem.error( reports.messages.RequiredOptionsAreMissing(["ticket"]) ) ) if report_list: raise LibraryError(*report_list) return options
def _find_resources_to_remove(cib, report_processor, node_type, node_identifier, allow_remove_multiple_nodes, find_resources): resource_element_list = find_resources(get_resources(cib), node_identifier) if not resource_element_list: raise LibraryError(reports.node_not_found(node_identifier, node_type)) if len(resource_element_list) > 1: report_processor.process( reports.get_problem_creator( report_codes.FORCE_REMOVE_MULTIPLE_NODES, allow_remove_multiple_nodes)( reports.multiple_result_found, "resource", [ resource.attrib["id"] for resource in resource_element_list ], node_identifier)) return resource_element_list
def test_destroy_failed( self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status ): mock_status.return_value = "" mock_net_destroy.side_effect = LibraryError("mock_report_item") self.assertRaises( LibraryError, lambda: lib.qdevice_destroy(self.lib_env, "net") ) mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") mock_net_disable.assert_called_once_with( "mock_runner", "corosync-qnetd" ) mock_net_destroy.assert_called_once_with() assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.INFO, report_codes.SERVICE_STOP_STARTED, { "service": "quorum device", } ), ( severity.INFO, report_codes.SERVICE_STOP_SUCCESS, { "service": "quorum device", } ), ( severity.INFO, report_codes.SERVICE_DISABLE_SUCCESS, { "service": "quorum device", } ) ] )
def set_stonith_watchdog_timeout_to_zero_on_all_nodes(node_communicator, node_list): """ Sets cluster property 'stonith-watchdog-timeout' to value '0' an all nodes in 'node_list', even if cluster is not currently running on them (direct editing CIB file). Raises LibraryError with all ReportItems in case of any failure. node_communicator -- NodeCommunicator node_list -- NodeAddressesList """ report_list = [] for node in node_list: try: set_stonith_watchdog_timeout_to_zero(node_communicator, node) except NodeCommunicationException as e: report_list.append(node_communicator_exception_to_report_item(e)) if report_list: raise LibraryError(*report_list)
def stonith_level_clear_cmd(lib, argv, modifiers): if len(argv) > 1: raise CmdLineInputError() if not argv: lib.fencing_topology.remove_all_levels() return target_type, target_value = stonith_level_parse_node(argv[0]) # backward compatibility mode # Command parameters are: node, stonith-list # Both the node and the stonith list are optional. If the node is ommited # and the stonith list is present, there is no way to figure it out, since # there is no specification of what the parameter is. Hence the pre-lib # code tried both. It deleted all levels having the first parameter as # either a node or a device list. Since it was only possible to specify # node as a target back then, this is enabled only in that case. report_item_list = [] try: lib.fencing_topology.remove_levels_by_params( None, target_type, target_value, None, # pre-lib code didn't return any error when no level was found ignore_if_missing=True ) except LibraryError as e: report_item_list.extend(e.args) if target_type == TARGET_TYPE_NODE: try: lib.fencing_topology.remove_levels_by_params( None, None, None, argv[0].split(","), # pre-lib code didn't return any error when no level was found ignore_if_missing=True ) except LibraryError as e: report_item_list.extend(e.args) if report_item_list: raise LibraryError(*report_item_list)
def config_text(env, name, node_name=None): """ get configuration in raw format string name -- name of booth instance whose config should be returned string node_name -- get the config from specified node or local host if None """ if node_name is None: # TODO add name support return env.booth.get_config_content() com_cmd = BoothGetConfig(env.report_processor, name) com_cmd.set_targets([ env.get_node_target_factory().get_target_from_hostname(node_name) ]) remote_data = run_and_raise(env.get_node_communicator(), com_cmd)[0][1] try: return remote_data["config"]["data"] except KeyError: raise LibraryError(reports.invalid_response_format(node_name))
def create_in_cluster(env, ip, allow_absent_resource_agent=False): """ Create group with ip resource and booth resource LibraryEnvironment env provides all for communication with externals string ip determines float ip for the operation of the booth bool allow_absent_resource_agent is flag allowing create booth resource even if its agent is not installed """ resources_section = get_resources(env.get_cib()) id_provider = IdProvider(resources_section) name = env.booth.name booth_config_file_path = get_config_file_name(name) if resource.find_for_config(resources_section, booth_config_file_path): raise LibraryError(booth_reports.booth_already_in_cib(name)) create_id = partial(resource.create_resource_id, resources_section, name) get_agent = partial(find_valid_resource_agent_by_name, env.report_processor, env.cmd_runner(), allowed_absent=allow_absent_resource_agent) create_primitive = partial(primitive.create, env.report_processor, resources_section, id_provider) into_booth_group = partial( group.place_resource, group.provide_group(resources_section, create_id("group")), ) into_booth_group( create_primitive( create_id("ip"), get_agent("ocf:heartbeat:IPaddr2"), instance_attributes={"ip": ip}, )) into_booth_group( create_primitive( create_id("service"), get_agent("ocf:pacemaker:booth-site"), instance_attributes={"config": booth_config_file_path}, )) env.push_cib()
def qdevice_net_sign_certificate_request(lib_env, certificate_request, cluster_name): """ Sign node certificate request by qnetd CA string certificate_request base64 encoded certificate request string cluster_name name of the cluster to which qdevice is being added """ _ensure_not_cman(lib_env) try: certificate_request_data = base64.b64decode(certificate_request) except (TypeError, binascii.Error): raise LibraryError( reports.invalid_option_value("qnetd certificate request", certificate_request, ["base64 encoded certificate"])) return base64.b64encode( qdevice_net.qdevice_sign_certificate_request(lib_env.cmd_runner(), certificate_request_data, cluster_name))
def place_resource( group_element, primitive_element, adjacent_resource_id=None, put_after_adjacent=False, ): """ Add resource into group. This function is also applicable for a modification of the resource position because the primitive element is replanted from anywhere (including group itself) to concrete place inside group. etree.Element group_element is element where to put primitive_element etree.Element primitive_element is element for placement string adjacent_resource_id is id of the existing resource in group. primitive_element will be put beside adjacent_resource_id if specified. bool put_after_adjacent is flag where put primitive_element: before adjacent_resource_id if put_after_adjacent=False after adjacent_resource_id if put_after_adjacent=True Note that it make sense only if adjacent_resource_id is specified """ if primitive_element.attrib["id"] == adjacent_resource_id: raise LibraryError( ReportItem.error( reports.messages.CannotGroupResourceNextToItself( adjacent_resource_id))) if not adjacent_resource_id: group_element.append(primitive_element) return adjacent_resource = find_element_by_tag_and_id( "primitive", group_element, adjacent_resource_id, ) if put_after_adjacent and adjacent_resource.getnext() is None: group_element.append(primitive_element) return index = group_element.index(adjacent_resource.getnext( ) if put_after_adjacent else adjacent_resource) group_element.insert(index, primitive_element)
def update_quorum_device(self, model_options, generic_options, heuristics_options): """ Update existing quorum device configuration dict model_options -- model specific options dict generic_options -- generic quorum device options dict heuristics_options -- heuristics options """ if not self.has_quorum_device(): raise LibraryError( ReportItem.error(reports.messages.QdeviceNotDefined())) model = self.get_quorum_device_model() # set new configuration device_sections = [] model_sections = [] heuristics_sections = [] for quorum in self.config.get_sections("quorum"): device_sections.extend(quorum.get_sections("device")) for device in quorum.get_sections("device"): model_sections.extend(device.get_sections(model)) heuristics_sections.extend(device.get_sections("heuristics")) # we know device sections exist, otherwise the function would exit at # has_quorum_device line above if not model_sections: new_model = config_parser.Section(model) device_sections[-1].add_section(new_model) model_sections.append(new_model) if not heuristics_sections: new_heuristics = config_parser.Section("heuristics") device_sections[-1].add_section(new_heuristics) heuristics_sections.append(new_heuristics) self.__set_section_options(device_sections, generic_options) self.__set_section_options(model_sections, model_options) self.__set_section_options(heuristics_sections, heuristics_options) self.__update_qdevice_votes() self.__update_two_node() self.__remove_empty_sections(self.config) self._need_qdevice_reload = True
def set_expected_votes_live(lib_env, expected_votes): """ set expected votes in live cluster to specified value numeric expected_votes desired value of expected votes """ try: votes_int = int(expected_votes) if votes_int < 1: raise ValueError() except ValueError: raise LibraryError( ReportItem.error( reports.messages.InvalidOptionValue( "expected votes", expected_votes, "positive integer" ) ) ) from None corosync_live.set_expected_votes(lib_env.cmd_runner(), votes_int)
def unassign_role(target_el, role_id, autodelete_target=False): """ Unassign role with role_id from specified target/user target_el. Raise LibraryError if role is not assigned to target/group. target_el -- etree element of target/group from which role should be unassign role_id -- id of role autodelete_target -- if True remove target_el if there is no role assigned """ assigned_role = target_el.find("./role[@id='{0}']".format(role_id)) if assigned_role is None: raise LibraryError( ReportItem.error( reports.messages.CibAclRoleIsNotAssignedToTarget( role_id, target_el.get("id")))) target_el.remove(assigned_role) if autodelete_target and target_el.find("./role") is None: target_el.getparent().remove(target_el)
def test_file_error(self, mock_config): node = "node" reason = "reason" mock_config.side_effect = LibraryError( ReportItem.error( reports.messages.UnableToGetSbdConfig(node, reason) ) ) assert_raise_library_error( lambda: cmd_sbd.get_local_sbd_config(self.mock_env), ( Severities.ERROR, reports.codes.UNABLE_TO_GET_SBD_CONFIG, { "node": node, "reason": reason, }, ), )
def _service_start(lib_env: LibraryEnvironment, func): lib_env.report_processor.report( ReportItem.info( reports.messages.ServiceActionStarted( reports.const.SERVICE_ACTION_START, "quorum device"))) try: func(lib_env.cmd_runner()) except external.StartServiceError as e: raise LibraryError( ReportItem.error( reports.messages.ServiceActionFailed( reports.const.SERVICE_ACTION_START, e.service, e.message))) from e lib_env.report_processor.report( ReportItem.info( reports.messages.ServiceActionSucceeded( reports.const.SERVICE_ACTION_START, "quorum device", )))
def get_local_node_status(runner): try: cluster_status = ClusterState(get_cluster_status_xml(runner)) except CrmMonErrorException: return {"offline": True} node_name = get_local_node_name(runner) for node_status in cluster_status.node_section.nodes: if node_status.attrs.name == node_name: result = { "offline": False, } for attr in ( 'id', 'name', 'type', 'online', 'standby', 'standby_onfail', 'maintenance', 'pending', 'unclean', 'shutdown', 'expected_up', 'is_dc', 'resources_running', ): result[attr] = getattr(node_status.attrs, attr) return result raise LibraryError(reports.node_not_found(node_name))
def update( env: LibraryEnvironment, tag_id: str, idref_add: Sequence[str], idref_remove: Sequence[str], adjacent_idref: Optional[str] = None, put_after_adjacent: bool = False, ) -> None: """ Update specified tag by given id references. env -- provides all for communication with externals tag_id -- id of an existing tag to be updated idref_add -- reference ids to be added idref_remove -- reference ids to be removed adjacent_idref -- id of the element next to which the added elements will be put put_after_adjacent -- put elements after (True) or before (False) the adjacent element """ with cib_tags_section(env) as tags_section: validator = tag.ValidateTagUpdateByIds( tag_id, idref_add, idref_remove, adjacent_idref, ) if env.report_processor.report_list( validator.validate( get_resources(get_root(tags_section)), tags_section, )).has_errors: raise LibraryError() # check for mypy tag_element = validator.tag_element() if tag_element is not None: tag.add_obj_ref( tag_element, validator.add_obj_ref_element_list(), validator.adjacent_obj_ref_element(), put_after_adjacent, ) tag.remove_obj_ref(validator.remove_obj_ref_element_list())
def _get_target_or_group(cib, target_or_group_id): """ Returns acl_target or acl_group element with id target_or_group_id. Target element has bigger pririty so if there are target and group with same id only target element will be affected by this function. Raises LibraryError if there is no target or group element with specified id. cib -- cib etree node target_or_group_id -- id of target/group element which should be returned """ try: return acl.find_target(cib, target_or_group_id) except acl.AclTargetNotFound: try: return acl.find_group(cib, target_or_group_id) except acl.AclGroupNotFound: raise LibraryError( reports.id_not_found(target_or_group_id, "user/group"))
def simulate_cib(runner, cib): """ Run crm_simulate to get effects the cib would have on the live cluster CommandRunner runner -- runner etree cib -- cib tree to simulate """ cib_xml = etree_to_str(cib) try: plaintext_result, transitions_xml, new_cib_xml = simulate_cib_xml( runner, cib_xml) return ( plaintext_result.strip(), xml_fromstring(transitions_xml), xml_fromstring(new_cib_xml), ) except (etree.XMLSyntaxError, etree.DocumentInvalid) as e: raise LibraryError( ReportItem.error(reports.messages.CibSimulateError(str(e))))
def start_booth(env): """ Start specified instance of booth service. Currently it is supported only systemd systems. On non systems it can be run like this: BOOTH_CONF_FILE=<booth-file-path> /etc/initd/booth-arbitrator env -- LibraryEnvironment """ external.ensure_is_systemd() name = env.booth.name try: external.start_service(env.cmd_runner(), "booth", name) except external.StartServiceError as e: raise LibraryError(reports.service_start_error( "booth", e.message, instance=name )) env.report_processor.process(reports.service_start_success( "booth", instance=name ))
def check_is_without_duplication( report_processor: ReportProcessor, constraint_section: _Element, element: _Element, are_duplicate: Callable[[_Element, _Element], bool], export_element: Callable[[_Element], Dict[str, Any]], duplication_allowed: bool = False, ) -> None: duplicate_element_list = [ duplicate_element for duplicate_element in cast( # The xpath method has a complicated return value, but we know our # xpath expression returns only elements. List[_Element], constraint_section.xpath(".//*[local-name()=$tag_name]", tag_name=element.tag), ) if (element is not duplicate_element and are_duplicate(element, duplicate_element)) ] if not duplicate_element_list: return if report_processor.report_list([ ReportItem.info( reports.messages.DuplicateConstraintsList( element.tag, [ export_element(duplicate_element) for duplicate_element in duplicate_element_list ], )), ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, duplication_allowed, ), message=reports.messages.DuplicateConstraintsExist([ str(duplicate.attrib["id"]) for duplicate in duplicate_element_list ]), ), ]).has_errors: raise LibraryError()
def _push_corosync_conf_live( self, target_list, corosync_conf_data, need_stopped_cluster, need_qdevice_reload, skip_offline_nodes, ): # Check if the cluster is stopped when needed if need_stopped_cluster: com_cmd = CheckCorosyncOffline(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Distribute corosync.conf com_cmd = DistributeCorosyncConf(self.report_processor, corosync_conf_data, skip_offline_nodes) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload corosync if not need_stopped_cluster: # If cluster must be stopped then we cannot reload corosync because # the cluster is stopped. If it is not stopped, we do not even get # here. com_cmd = ReloadCorosyncConf(self.report_processor) com_cmd.set_targets(target_list) run_and_raise(self.get_node_communicator(), com_cmd) # Reload qdevice if needed if need_qdevice_reload: self.report_processor.report( ReportItem.info(reports.messages.QdeviceClientReloadStarted())) com_cmd = qdevice.Stop(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) has_errors = com_cmd.has_errors com_cmd = qdevice.Start(self.report_processor, skip_offline_nodes) com_cmd.set_targets(target_list) run(self.get_node_communicator(), com_cmd) has_errors = has_errors or com_cmd.has_errors if has_errors: raise LibraryError()
def get_local_devices_info(lib_env, dump=False): """ Returns list of local devices info in format: { "device": <device_path>, "list": <output of 'sbd list' command>, "dump": <output of 'sbd dump' command> if dump is True, None otherwise } If sbd is not enabled, empty list will be returned. lib_env -- LibraryEnvironment dump -- if True returns also output of command 'sbd dump' """ if not sbd.is_sbd_enabled(lib_env.service_manager): return [] device_list = sbd.get_local_sbd_device_list() report_item_list = [] output = [] for device in device_list: obj = { "device": device, "list": None, "dump": None, } try: obj["list"] = sbd.get_device_messages_info( lib_env.cmd_runner(), device ) if dump: obj["dump"] = sbd.get_device_sbd_header_dump( lib_env.cmd_runner(), device ) except LibraryError as e: report_item_list += e.args output.append(obj) for report_item in report_item_list: report_item.severity = reports.item.ReportItemSeverity.warning() if lib_env.report_processor.report_list(report_item_list).has_errors: raise LibraryError() return output
def update_recipient( lib_env: LibraryEnvironment, recipient_id, instance_attribute_dict, meta_attribute_dict, recipient_value=None, description=None, allow_same_value=False, ): """ Update existing recipient. lib_env -- LibraryEnvironment recipient_id -- id of recipient to be updated instance_attribute_dict -- dictionary of instance attributes to update meta_attribute_dict -- dictionary of meta attributes to update recipient_value -- new recipient value, if None old value will stay unchanged description -- new description, if empty string, old description will be deleted, if None old value will stay unchanged allow_same_value -- if True unique recipient value is not required """ if not recipient_value and recipient_value is not None: raise LibraryError( ReportItem.error( reports.messages.CibAlertRecipientValueInvalid( recipient_value))) cib = lib_env.get_cib(REQUIRED_CIB_VERSION) id_provider = IdProvider(cib) recipient = alert.update_recipient( lib_env.report_processor, cib, recipient_id, recipient_value=recipient_value, description=description, allow_same_value=allow_same_value, ) arrange_first_instance_attributes(recipient, instance_attribute_dict, id_provider) arrange_first_meta_attributes(recipient, meta_attribute_dict, id_provider) lib_env.push_cib()
def find_valid_resource_id( report_processor, cib, can_repair_to_clone, in_clone_allowed, id ): parent_tags = resource.clone.ALL_TAGS + [resource.bundle.TAG] resource_element = find_element_by_tag_and_id( parent_tags + [resource.primitive.TAG, resource.group.TAG], cib, id, ) if resource_element.tag in parent_tags: return resource_element.attrib["id"] clone = find_parent(resource_element, parent_tags) if clone is None: return resource_element.attrib["id"] if can_repair_to_clone: #this is workaround for web ui, console should not use it, so we do not #warn about it return clone.attrib["id"] if in_clone_allowed: report_processor.process( reports.resource_for_constraint_is_multiinstance( resource_element.attrib["id"], clone.tag, clone.attrib["id"], ReportItemSeverity.WARNING, ) ) return resource_element.attrib["id"] raise LibraryError(reports.resource_for_constraint_is_multiinstance( resource_element.attrib["id"], clone.tag, clone.attrib["id"], ReportItemSeverity.ERROR, #repair to clone is workaround for web ui, so we put only information #about one forceable possibility forceable=report_codes.FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE ))
def disable_sbd(lib_env, ignore_offline_nodes=False): """ Disable SBD on all nodes in cluster. lib_env -- LibraryEnvironment ignore_offline_nodes -- if True, omit offline nodes """ node_list, get_nodes_report_list = get_existing_nodes_names( lib_env.get_corosync_conf() ) if not node_list: get_nodes_report_list.append( ReportItem.error(reports.messages.CorosyncConfigNoNodesDefined()) ) if lib_env.report_processor.report_list(get_nodes_report_list).has_errors: raise LibraryError() com_cmd = GetOnlineTargets( lib_env.report_processor, ignore_offline_targets=ignore_offline_nodes, ) com_cmd.set_targets( lib_env.get_node_target_factory().get_target_list( node_list, skip_non_existing=ignore_offline_nodes, ) ) online_nodes = run_and_raise(lib_env.get_node_communicator(), com_cmd) com_cmd = SetStonithWatchdogTimeoutToZero(lib_env.report_processor) com_cmd.set_targets(online_nodes) run_and_raise(lib_env.get_node_communicator(), com_cmd) com_cmd = DisableSbdService(lib_env.report_processor) com_cmd.set_targets(online_nodes) run_and_raise(lib_env.get_node_communicator(), com_cmd) lib_env.report_processor.report( ReportItem.warning( reports.messages.ClusterRestartRequiredToApplyChanges() ) )