def get_node_key_map_for_mpath( stonith_el: _Element, node_labels: Iterable[str] ) -> Dict[str, str]: library_error = lambda host_map, missing_nodes: LibraryError( ReportItem.error( reports.messages.StonithRestartlessUpdateMissingMpathKeys( host_map, sorted(missing_nodes) ) ) ) pcmk_host_map_value = get_value( INSTANCE_ATTRIBUTES_TAG, stonith_el, "pcmk_host_map" ) missing_nodes = set(node_labels) if not pcmk_host_map_value: raise library_error(pcmk_host_map_value, missing_nodes) node_key_map = {} pattern = re.compile(r"(?P<node>[^=:; \t]+)[=:](?P<key>[^=:; \t]+)[; \t]?") for match in pattern.finditer(pcmk_host_map_value): if match: group_dict = match.groupdict() node_key_map[group_dict["node"]] = group_dict["key"] missing_nodes -= set(node_key_map.keys()) if missing_nodes: raise library_error(pcmk_host_map_value, missing_nodes) return node_key_map
def _update_scsi_devices_get_element_and_devices( runner: CommandRunner, report_processor: ReportProcessor, cib: _Element, stonith_id: str, ) -> Tuple[_Element, List[str]]: """ Do checks and return stonith element and list of current scsi devices. Raise LibraryError if checks fail. runner -- command runner instance report_processor -- tool for warning/info/error reporting cib -- cib element stonith_id -- id of stonith resource """ if not is_getting_resource_digest_supported(runner): raise LibraryError( ReportItem.error( reports.messages. StonithRestartlessUpdateOfScsiDevicesNotSupported())) ( stonith_el, report_list, ) = resource.stonith.validate_stonith_restartless_update(cib, stonith_id) if report_processor.report_list(report_list).has_errors: raise LibraryError() # for mypy, this should not happen because exception would be raised if stonith_el is None: raise AssertionError("stonith element is None") current_device_list = get_value(INSTANCE_ATTRIBUTES_TAG, stonith_el, "devices") if current_device_list is None: raise AssertionError("current_device_list is None") return stonith_el, current_device_list.split(",")
def _validate_unique_instance_attributes( resource_agent: ResourceAgentMetadata, instance_attributes: Mapping[str, str], resources_section: _Element, resource_id: Optional[str] = None, force: bool = False, ) -> reports.ReportItemList: if not resource_agent.unique_parameter_groups: return [] report_list = [] same_agent_resources = _find_primitives_by_agent(resources_section, resource_agent.name) for ( group_name, group_attrs, ) in resource_agent.unique_parameter_groups.items(): new_group_values_map = { name: instance_attributes.get(name, "") for name in group_attrs } if not any(new_group_values_map.values()): continue conflicting_resources: Set[str] = set() for primitive in same_agent_resources: if primitive.attrib["id"] == resource_id: continue existing_group_values_map = { name: get_value(INSTANCE_ATTRIBUTES_TAG, primitive, name, "") for name in group_attrs } if new_group_values_map == existing_group_values_map: conflicting_resources.add(str(primitive.attrib["id"])) if conflicting_resources: if len(new_group_values_map) == 1: message: reports.item.ReportItemMessage = ( reports.messages.ResourceInstanceAttrValueNotUnique( *new_group_values_map.popitem(), resource_agent.name.full_name, sorted(conflicting_resources), )) else: message = ( reports.messages.ResourceInstanceAttrGroupValueNotUnique( group_name, new_group_values_map, resource_agent.name.full_name, sorted(conflicting_resources), )) report_list.append( reports.ReportItem( reports.item.get_severity(reports.codes.FORCE, force), message, )) return report_list
def is_promotable_clone(resource_el): """ Return True if resource_el is a promotable clone, False on clone and master """ return is_clone(resource_el) and is_true( nvpair.get_value( nvpair.META_ATTRIBUTES_TAG, resource_el, "promotable", default="false", ))
def is_promotable_clone(resource_el): """ Return True if resource_el is a promotable clone, False on clone and master """ return ( is_clone(resource_el) and is_true(nvpair.get_value( nvpair.META_ATTRIBUTES_TAG, resource_el, "promotable", default="false", )) )
def validate_stonith_restartless_update( cib: _Element, stonith_id: str, ) -> Tuple[Optional[_Element], ReportItemList]: """ Validate that stonith device exists and its type is supported for restartless update of scsi devices and has defined option 'devices'. cib -- cib element stonith_id -- id of a stonith resource """ stonith_el, report_list = common.find_one_resource( cib, stonith_id, resource_tags=[TAG_RESOURCE_PRIMITIVE] ) if stonith_el is None: return stonith_el, report_list stonith_type = stonith_el.get("type", "") if ( stonith_el.get("class", "") != "stonith" or stonith_el.get("provider", "") != "" or stonith_type not in SUPPORTED_RESOURCE_TYPES_FOR_RESTARTLESS_UPDATE ): report_list.append( ReportItem.error( reports.messages.StonithRestartlessUpdateUnsupportedAgent( stonith_id, stonith_type, SUPPORTED_RESOURCE_TYPES_FOR_RESTARTLESS_UPDATE, ) ) ) return stonith_el, report_list if not get_value(INSTANCE_ATTRIBUTES_TAG, stonith_el, "devices"): report_list.append( ReportItem.error( reports.messages.StonithRestartlessUpdateUnableToPerform( "no devices option configured for stonith device " f"'{stonith_id}'" ) ) ) return stonith_el, report_list
def validate_unique_instance_attributes( resource_agent, instance_attributes, resources_section, resource_id=None, force=False ): report_list = [] report_creator = reports.get_problem_creator( report_codes.FORCE_OPTIONS, force ) ra_unique_attributes = [ param["name"] for param in resource_agent.get_parameters() if param["unique"] ] same_agent_resources = find_primitives_by_agent( resources_section, resource_agent ) for attr in ra_unique_attributes: if attr not in instance_attributes: continue conflicting_resources = { primitive.get("id") for primitive in same_agent_resources if ( primitive.get("id") != resource_id and instance_attributes[attr] == get_value( "instance_attributes", primitive, attr ) ) } if conflicting_resources: report_list.append( report_creator( reports.resource_instance_attr_value_not_unique, attr, instance_attributes[attr], resource_agent.get_name(), conflicting_resources, ) ) return report_list
def validate_unique_instance_attributes( resource_agent, instance_attributes, resources_section, resource_id=None, force=False, ): report_list = [] ra_unique_attributes = [ param["name"] for param in resource_agent.get_parameters() if param["unique"] ] same_agent_resources = find_primitives_by_agent(resources_section, resource_agent) for attr in ra_unique_attributes: if attr not in instance_attributes: continue conflicting_resources = { primitive.get("id") for primitive in same_agent_resources if (primitive.get("id") != resource_id and instance_attributes[attr] == get_value( "instance_attributes", primitive, attr)) } if conflicting_resources: report_list.append( ReportItem( severity=reports.item.get_severity( report_codes.FORCE_OPTIONS, force, ), message=reports.messages. ResourceInstanceAttrValueNotUnique( attr, instance_attributes[attr], resource_agent.get_name(), sorted(conflicting_resources), ), )) return report_list
def assert_find_value(self, tag_name, name, value, xml, default=None): self.assertEqual( value, nvpair.get_value(tag_name, etree.fromstring(xml), name, default))
def full_cluster_status_plaintext( env: LibraryEnvironment, hide_inactive_resources: bool = False, verbose: bool = False, ) -> str: """ Return full cluster status as plaintext env -- LibraryEnvironment hide_inactive_resources -- if True, do not display non-running resources verbose -- if True, display more info """ # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements # validation if not env.is_cib_live and env.is_corosync_conf_live: raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentNotConsistent( [file_type_codes.CIB], [file_type_codes.COROSYNC_CONF], ) ) ) if env.is_cib_live and not env.is_corosync_conf_live: raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentNotConsistent( [file_type_codes.COROSYNC_CONF], [file_type_codes.CIB], ) ) ) # initialization runner = env.cmd_runner() report_processor = env.report_processor live = env.is_cib_live and env.is_corosync_conf_live is_sbd_running = False # load status, cib, corosync.conf status_text, warning_list = get_cluster_status_text( runner, hide_inactive_resources, verbose ) corosync_conf = None # If we are live on a remote node, we have no corosync.conf. # TODO Use the new file framework so the path is not exposed. if not live or os.path.exists(settings.corosync_conf_file): corosync_conf = env.get_corosync_conf() cib = env.get_cib() if verbose: ( ticket_status_text, ticket_status_stderr, ticket_status_retval, ) = get_ticket_status_text(runner) # get extra info if live if live: try: is_sbd_running = is_service_running(runner, get_sbd_service_name()) except LibraryError: pass local_services_status = _get_local_services_status(runner) if verbose and corosync_conf: node_name_list, node_names_report_list = get_existing_nodes_names( corosync_conf ) report_processor.report_list(node_names_report_list) node_reachability = _get_node_reachability( env.get_node_target_factory(), env.get_node_communicator(), report_processor, node_name_list, ) # check stonith configuration warning_list = list(warning_list) warning_list.extend(_stonith_warnings(cib, is_sbd_running)) # put it all together if report_processor.has_errors: raise LibraryError() cluster_name = ( corosync_conf.get_cluster_name() if corosync_conf else nvpair.get_value( "cluster_property_set", get_crm_config(cib), "cluster-name", "" ) ) parts = [] parts.append(f"Cluster name: {cluster_name}") if warning_list: parts.extend(["", "WARNINGS:"] + warning_list + [""]) parts.append(status_text) if verbose: parts.extend(["", "Tickets:"]) if ticket_status_retval != 0: ticket_warning_parts = [ "WARNING: Unable to get information about tickets" ] if ticket_status_stderr: ticket_warning_parts.extend( indent(ticket_status_stderr.splitlines()) ) parts.extend(indent(ticket_warning_parts)) else: parts.extend(indent(ticket_status_text.splitlines())) if live: if verbose and corosync_conf: parts.extend(["", "PCSD Status:"]) parts.extend( indent( _format_node_reachability(node_name_list, node_reachability) ) ) parts.extend(["", "Daemon Status:"]) parts.extend( indent(_format_local_services_status(local_services_status)) ) return "\n".join(parts)
def assert_find_value(self, tag_name, name, value, xml, default=None): self.assertEqual( value, nvpair.get_value(tag_name, etree.fromstring(xml), name, default) )