def _process_response(self, response): report_item = self._get_response_report(response) node_label = response.request.target.label if report_item is not None: self._report_list([ report_item, ReportItem( severity=ReportItemSeverity( self._failure_severity, self._failure_forceable, ), message=(reports.messages.CorosyncNotRunningCheckNodeError( node_label, )), ), ]) return try: status = response.data if not json.loads(status)["corosync"]: report_item = ReportItem.info( reports.messages.CorosyncNotRunningOnNode(node_label), ) else: report_item = ReportItem.error( reports.messages.CorosyncRunningOnNode(node_label), ) except (KeyError, json.JSONDecodeError): report_item = ReportItem( severity=ReportItemSeverity( self._failure_severity, self._failure_forceable, ), message=reports.messages.CorosyncNotRunningCheckNodeError( node_label, ), ) self._report(report_item)
def _validate_devices(resources_el: _Element, devices, force_device=False, allow_force=True) -> ReportItemList: report_list: ReportItemList = [] if not devices: report_list.append( ReportItem.error( reports.messages.RequiredOptionsAreMissing(["stonith devices" ]))) invalid_devices = [] for dev in devices: validate_id_report_list: ReportItemList = [] validate_id(dev, description="device id", reporter=validate_id_report_list) report_list.extend(validate_id_report_list) if has_errors(validate_id_report_list): continue # TODO use the new finding function if not is_stonith_resource(resources_el, dev): invalid_devices.append(dev) if invalid_devices: report_list.append( ReportItem( severity=ReportItemSeverity( level=(ReportItemSeverity.WARNING if force_device and allow_force else ReportItemSeverity.ERROR), force_code=(None if force_device or not allow_force else report_codes.FORCE), ), message=reports.messages.StonithResourcesDoNotExist( invalid_devices), )) return report_list
def _find_resource_elements_for_operation( report_processor: ReportProcessor, resources_section, booth_env, allow_multiple, ): booth_element_list = resource.find_for_config( resources_section, booth_env.config_path, ) if not booth_element_list: report_processor.report( ReportItem.error( reports.messages.BoothNotExistsInCib(booth_env.instance_name) ) ) elif len(booth_element_list) > 1: report_processor.report( ReportItem( severity=get_severity( report_codes.FORCE_BOOTH_REMOVE_FROM_CIB, allow_multiple, ), message=reports.messages.BoothMultipleTimesInCib( booth_env.instance_name, ), ) ) if report_processor.has_errors: raise LibraryError() return booth_element_list
def get_target_list_with_reports( self, host_name_list, skip_non_existing=False, allow_skip=True, report_none_host_found=True, ): target_list = [] unknown_host_list = [] for host_name in host_name_list: try: target_list.append(self.get_target(host_name)) except HostNotFound: unknown_host_list.append(host_name) report_list = [] if unknown_host_list: report_list.append( ReportItem( severity=reports.item.get_severity( (reports.codes.SKIP_OFFLINE_NODES if allow_skip else None), skip_non_existing, ), message=reports.messages.HostNotFound( sorted(unknown_host_list), ), )) if not target_list and host_name_list and report_none_host_found: # we want to create this report only if there was at least one # required address specified report_list.append( ReportItem.error(reports.messages.NoneHostFound())) return report_list, target_list
def _check_if_atb_can_be_disabled( service_manager: ServiceManagerInterface, report_processor: ReportProcessor, corosync_conf: CorosyncConfFacade, was_enabled: bool, force: bool = False, ) -> None: """ Check whenever auto_tie_breaker can be changed without affecting SBD. Raises LibraryError if change of ATB will affect SBD functionality. service_manager -- report_processor -- report processor corosync_conf -- corosync conf facade was_enabled -- True if ATB was enabled, False otherwise force -- force change """ if (was_enabled and not corosync_conf.is_enabled_auto_tie_breaker() and sbd.is_auto_tie_breaker_needed(service_manager, corosync_conf)): report_processor.report( ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, force, ), message=(reports.messages. CorosyncQuorumAtbCannotBeDisabledDueToSbd()), )) if report_processor.has_errors: raise LibraryError()
def _failure_report(self, target_label, action, reason, severity, forceable): return ReportItem( severity=reports.item.ReportItemSeverity(severity, forceable), message=reports.messages.FileRemoveFromNodeError( target_label, action, reason), )
def _validate_target_valuewise( cluster_status_nodes, target_type, target_value, force_node=False, allow_force=True, ) -> ReportItemList: report_list: ReportItemList = [] if target_type == TARGET_TYPE_NODE: node_found = False for node in cluster_status_nodes: if target_value == node.attrs.name: node_found = True break if not node_found: report_list.append( ReportItem( severity=ReportItemSeverity( level=(ReportItemSeverity.WARNING if force_node and allow_force else ReportItemSeverity.ERROR), force_code=(None if force_node or not allow_force else report_codes.FORCE_NODE_DOES_NOT_EXIST), ), message=reports.messages.NodeNotFound(target_value), )) return report_list
def resource_refresh( runner: CommandRunner, resource: Optional[str] = None, node: Optional[str] = None, strict: bool = False, force: bool = False, ): if not force and not node and not resource: summary = ClusterState(get_cluster_status_dom(runner)).summary operations = summary.nodes.attrs.count * summary.resources.attrs.count if operations > __RESOURCE_REFRESH_OPERATION_COUNT_THRESHOLD: raise LibraryError( ReportItem( reports.item.ReportItemSeverity.error(reports.codes.FORCE), reports.messages.ResourceRefreshTooTimeConsuming( __RESOURCE_REFRESH_OPERATION_COUNT_THRESHOLD), )) cmd = [__exec("crm_resource"), "--refresh"] if resource: cmd.extend(["--resource", resource]) if node: cmd.extend(["--node", node]) if strict: cmd.extend(["--force"]) stdout, stderr, retval = runner.run(cmd) if retval != 0: raise LibraryError( ReportItem.error( reports.messages.ResourceRefreshError( join_multilines([stderr, stdout]), resource, node))) # usefull output (what has been done) goes to stderr return join_multilines([stdout, stderr])
def _failure_report(self, target_label, action, reason, severity, forceable): return ReportItem( severity=reports.item.ReportItemSeverity(severity, forceable), message=reports.messages.ServiceCommandOnNodeError( target_label, action, reason), )
def config_setup( env: LibraryEnvironment, site_list, arbitrator_list, instance_name=None, overwrite_existing=False, ): """ create booth configuration env list site_list -- site adresses of multisite list arbitrator_list -- arbitrator adresses of multisite string instance_name -- booth instance name bool overwrite_existing -- allow overwriting existing files """ instance_name = instance_name or constants.DEFAULT_INSTANCE_NAME report_processor = env.report_processor report_processor.report_list( config_validators.check_instance_name(instance_name) ) report_processor.report_list( config_validators.create(site_list, arbitrator_list) ) if report_processor.has_errors: raise LibraryError() booth_env = env.get_booth_env(instance_name) booth_conf = booth_env.create_facade(site_list, arbitrator_list) booth_conf.set_authfile(booth_env.key_path) try: booth_env.key.write_raw( tools.generate_binary_key( random_bytes_count=settings.booth_authkey_bytes ), can_overwrite=overwrite_existing, ) booth_env.config.write_facade( booth_conf, can_overwrite=overwrite_existing ) except FileAlreadyExists as e: report_processor.report( ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, overwrite_existing, ), message=reports.messages.FileAlreadyExists( e.metadata.file_type_code, e.metadata.path, ), ) ) except RawFileError as e: report_processor.report(raw_file_error_report(e)) if report_processor.has_errors: raise LibraryError()
def exception_to_report_list( exception: JsonParserException, file_type_code: code.FileTypeCode, file_path: str, force_code: reports.types.ForceCode, is_forced_or_warning: bool, ) -> reports.ReportItemList: if isinstance(exception, JsonParserException): if isinstance(exception.json_exception, json.JSONDecodeError): return [ ReportItem( severity=reports.item.get_severity( force_code, is_forced_or_warning, ), message=reports.messages.ParseErrorJsonFile( file_type_code, exception.json_exception.lineno, exception.json_exception.colno, exception.json_exception.pos, exception.json_exception.msg, str(exception.json_exception), file_path=file_path, ), ) ] raise exception
def ensure_recipient_value_is_unique( reporter: ReportProcessor, alert, recipient_value, recipient_id="", allow_duplicity=False, ): """ Ensures that recipient_value is unique in alert. reporter -- report processor alert -- alert recipient_value -- recipient value recipient_id -- recipient id of to which value belongs to allow_duplicity -- if True only warning will be shown if value already exists """ recipient_list = alert.xpath( "./recipient[@value='{value}' and @id!='{id}']".format( value=recipient_value, id=recipient_id)) if recipient_list: reporter.report( ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, allow_duplicity, ), message=reports.messages.CibAlertRecipientAlreadyExists( alert.get("id", None), recipient_value, ), )) if reporter.has_errors: raise LibraryError()
def __get_nodes_names( corosync_nodes: Iterable[CorosyncNode], remote_and_guest_nodes: Iterable[PacemakerNode], error_on_missing_name: bool = False, ) -> Tuple[List[str], ReportItemList]: report_list: ReportItemList = [] corosync_names = [] name_missing_in_corosync = False for node in corosync_nodes: if node.name: corosync_names.append(node.name) else: name_missing_in_corosync = True # Just a generic report for now. It may be expanded to provide more info # about each node missing a name later if needed. if name_missing_in_corosync: report_list.append( ReportItem( severity=( ReportItemSeverity.error() if error_on_missing_name else ReportItemSeverity.warning() ), message=reports.messages.CorosyncConfigMissingNamesOfNodes( fatal=error_on_missing_name, ), ) ) return ( corosync_names + [node.name for node in remote_and_guest_nodes], report_list, )
def ensure_resource_state(expected_running, cluster_state, resource_id): roles_with_nodes = _get_primitive_roles_with_nodes( _get_primitives_for_state_check(cluster_state, resource_id, expected_running)) if not roles_with_nodes: return ReportItem( reports.item.ReportItemSeverity( reports.ReportItemSeverity.INFO if not expected_running else reports.ReportItemSeverity.ERROR), reports.messages.ResourceDoesNotRun(resource_id), ) return ReportItem( reports.item.ReportItemSeverity( reports.ReportItemSeverity.INFO if expected_running else reports. ReportItemSeverity.ERROR), reports.messages.ResourceRunningOnNodes(resource_id, roles_with_nodes), )
def _process_response(self, response): report = self._get_response_report(response) if report is not None: self._report(report) return target = response.request.target try: parsed_data = json.loads(response.data) self._report( ReportItem.info( reports.messages.BoothConfigAcceptedByNode( node=target.label, name_list=sorted(parsed_data["saved"]), ) ) ) for filename in list(parsed_data["existing"]): self._report( ReportItem( severity=reports.item.get_severity( reports.codes.FORCE_FILE_OVERWRITE, self._rewrite_existing, ), message=reports.messages.FileAlreadyExists( # TODO specify file type; this will be overhauled # to a generic file transport framework anyway "", filename, node=target.label, ), ) ) for file, reason in dict(parsed_data["failed"]).items(): self._report( ReportItem.error( reports.messages.BoothConfigDistributionNodeError( target.label, reason, file, ) ) ) except (KeyError, TypeError, ValueError): self._report( ReportItem.error( reports.messages.InvalidResponseFormat(target.label) ) )
def _validate_network_options_update( bundle_el, network_el, options, force_options ): report_list = [] inner_primitive = get_inner_resource(bundle_el) if ( inner_primitive is not None and not _is_pcmk_remote_accessible_after_update(network_el, options) ): report_list.append( ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, force_options, ), message=reports.messages.ResourceInBundleNotAccessible( bundle_el.get("id"), inner_primitive.get("id"), ), ) ) severity = reports.item.get_severity(reports.codes.FORCE, force_options) validators_optional_options = [ # TODO add validators for other keys (ip-range-start - IPv4) validate.ValuePortNumber("control-port"), # Leaving a possibility to force this validation for the case pacemaker # starts supporting IPv6 or other format of the netmask. ValueHostNetmask("host-netmask", severity=severity), ] for val in validators_optional_options: val.empty_string_valid = True validators = [ validate.NamesIn( # allow to remove options even if they are not allowed NETWORK_OPTIONS | _options_to_remove(options), option_type="network", severity=severity, ) ] + validators_optional_options return report_list + validate.ValidatorAll(validators).validate(options)
def check_is_without_duplication( report_processor: ReportProcessor, constraint_section: _Element, element: _Element, are_duplicate: Callable[[_Element, _Element], bool], export_element: Callable[[_Element], Dict[str, Any]], duplication_allowed: bool = False, ) -> None: duplicate_element_list = [ duplicate_element for duplicate_element in cast( # The xpath method has a complicated return value, but we know our # xpath expression returns only elements. List[_Element], constraint_section.xpath(".//*[local-name()=$tag_name]", tag_name=element.tag), ) if (element is not duplicate_element and are_duplicate(element, duplicate_element)) ] if not duplicate_element_list: return if report_processor.report_list([ ReportItem.info( reports.messages.DuplicateConstraintsList( element.tag, [ export_element(duplicate_element) for duplicate_element in duplicate_element_list ], )), ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, duplication_allowed, ), message=reports.messages.DuplicateConstraintsExist([ str(duplicate.attrib["id"]) for duplicate in duplicate_element_list ]), ), ]).has_errors: raise LibraryError()
def _process_response(self, response): report_item = self._get_response_report(response) node_label = response.request.target.label if report_item is None: self._report( ReportItem.info( reports.messages.CorosyncConfigAcceptedByNode(node_label))) else: self._report_list([ report_item, ReportItem( severity=ReportItemSeverity( self._failure_severity, self._failure_forceable, ), # pylint: disable=line-too-long message=reports.messages. CorosyncConfigDistributionNodeError(node_label, ), ), ])
def _check_qdevice_not_used(reporter: ReportProcessor, runner, model, force=False): _check_model(model) connected_clusters: List[str] = [] if model == "net": try: status = qdevice_net.qdevice_status_cluster_text(runner) connected_clusters = qdevice_net.qdevice_connected_clusters(status) except qdevice_net.QnetdNotRunningException: pass if connected_clusters: reporter.report( ReportItem( severity=get_severity(report_codes.FORCE_QDEVICE_USED, force), message=reports.messages.QdeviceUsedByClusters( connected_clusters, ), )) if reporter.has_errors: raise LibraryError()
def validate_unique_instance_attributes( resource_agent, instance_attributes, resources_section, resource_id=None, force=False, ): report_list = [] ra_unique_attributes = [ param["name"] for param in resource_agent.get_parameters() if param["unique"] ] same_agent_resources = find_primitives_by_agent(resources_section, resource_agent) for attr in ra_unique_attributes: if attr not in instance_attributes: continue conflicting_resources = { primitive.get("id") for primitive in same_agent_resources if (primitive.get("id") != resource_id and instance_attributes[attr] == get_value( "instance_attributes", primitive, attr)) } if conflicting_resources: report_list.append( ReportItem( severity=reports.item.get_severity( report_codes.FORCE_OPTIONS, force, ), message=reports.messages. ResourceInstanceAttrValueNotUnique( attr, instance_attributes[attr], resource_agent.get_name(), sorted(conflicting_resources), ), )) return report_list
def check_is_without_duplication( report_processor: ReportProcessor, constraint_section: _Element, element: _Element, are_duplicate: Callable[[_Element, _Element], bool], export_element: Callable[[_Element], Dict[str, Any]], duplication_alowed: bool = False, ) -> None: duplicate_element_list = [ duplicate_element for duplicate_element in constraint_section.findall(".//" + element.tag) if (element is not duplicate_element and are_duplicate(element, duplicate_element)) ] if not duplicate_element_list: return if report_processor.report_list([ ReportItem.info( reports.messages.DuplicateConstraintsList( element.tag, [ export_element(duplicate_element) for duplicate_element in duplicate_element_list ], )), ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, duplication_alowed, ), message=reports.messages.DuplicateConstraintsExist([ str(duplicate.attrib["id"]) for duplicate in duplicate_element_list ]), ), ]).has_errors: raise LibraryError()
def check_is_without_duplication( report_processor: ReportProcessor, constraint_section, element, are_duplicate, export_element, duplication_alowed=False, ): duplicate_element_list = [ duplicate_element for duplicate_element in constraint_section.findall(".//" + element.tag) if (element is not duplicate_element and are_duplicate(element, duplicate_element)) ] if not duplicate_element_list: return if report_processor.report_list([ ReportItem.info( reports.messages.DuplicateConstraintsList( element.tag, [ export_element(duplicate_element) for duplicate_element in duplicate_element_list ], )), ReportItem( severity=reports.item.get_severity( reports.codes.FORCE_CONSTRAINT_DUPLICATE, duplication_alowed, ), message=reports.messages.DuplicateConstraintsExist([ duplicate.get("id") for duplicate in duplicate_element_list ]), ), ]).has_errors: raise LibraryError()
def response_to_report_item( response, severity=ReportItemSeverity.ERROR, forceable=None, report_pcsd_too_old_on_404=False, ): """ Returns report item which corresponds to response if was not successful. Otherwise returns None. Response response -- response from which report item shoculd be created ReportItemseverity severity -- severity of report item string forceable -- force code bool report_pcsd_too_old_on_404 -- if False, report unsupported command """ response_code = response.response_code report_item = None reason = None if (report_pcsd_too_old_on_404 and response.was_connected and response_code == 404): return ReportItem.error( reports.messages.PcsdVersionTooOld(response.request.host_label)) if response.was_connected: if response_code == 400: # old pcsd protocol: error messages are commonly passed in plain # text in response body with HTTP code 400 # we need to be backward compatible with that report_item = reports.messages.NodeCommunicationCommandUnsuccessful reason = response.data.rstrip() elif response_code == 401: report_item = reports.messages.NodeCommunicationErrorNotAuthorized reason = "HTTP error: {0}".format(response_code) elif response_code == 403: report_item = ( reports.messages.NodeCommunicationErrorPermissionDenied) reason = "HTTP error: {0}".format(response_code) elif response_code == 404: report_item = ( reports.messages.NodeCommunicationErrorUnsupportedCommand) reason = "HTTP error: {0}".format(response_code) elif response_code >= 400: report_item = reports.messages.NodeCommunicationError reason = "HTTP error: {0}".format(response_code) else: if response.errno in [ pycurl.E_OPERATION_TIMEDOUT, pycurl.E_OPERATION_TIMEOUTED, ]: report_item = reports.messages.NodeCommunicationErrorTimedOut reason = response.error_msg else: report_item = reports.messages.NodeCommunicationErrorUnableToConnect reason = response.error_msg if not report_item: return None return ReportItem( severity=ReportItemSeverity(severity, forceable), message=report_item( response.request.host_label, response.request.action, reason, ), )