def expand_tag( some_or_tag_el: _Element, only_expand_types: Iterable[str] = None ) -> List[_Element]: """ Substitute a tag element with elements which the tag refers to. some_or_tag_el -- an already expanded element or a tag element to expand only_expand_types -- if specified, return only elements of these types """ if some_or_tag_el.tag != TAG_TAG: return [some_or_tag_el] conf_section = find_parent(some_or_tag_el, "configuration") if conf_section is None: return [] expanded_elements = [] for element_id in [ str(obj_ref.get("id", "")) for obj_ref in some_or_tag_el.iterfind(TAG_OBJREF) ]: if only_expand_types: searcher = ElementSearcher( only_expand_types, element_id, conf_section ) if searcher.element_found(): expanded_elements.append(searcher.get_element()) else: expanded_elements.extend( get_configuration_elements_by_id(conf_section, element_id) ) return expanded_elements
def find_resources_and_report( context_element, resource_ids, report_list, additional_search=None, resource_tags=None ): """ Find a list of resource, report errors etree context_element -- an element to be searched in string resource_id -- id of an element to find list report_list -- report items will be put in here function additional_search -- None of a func to find resources iterable resource_tags -- types of resources to look for, default all types """ if not additional_search: additional_search = lambda x: [x] if resource_tags is None: resource_tags = TAG_CLONE_ALL + [TAG_GROUP, TAG_PRIMITIVE, TAG_BUNDLE] resource_el_list = [] for res_id in resource_ids: searcher = ElementSearcher(resource_tags, res_id, context_element) if searcher.element_found(): resource_el_list.extend(additional_search(searcher.get_element())) else: report_list.extend(searcher.get_errors()) return resource_el_list
def find_tag_elements_by_ids( tags_section: Element, tag_id_list: Iterable[str], ) -> Tuple[List[Element], ReportItemList]: """ Try to find tag elements by ids and return them with non-empty report list in case of errors. tags_section -- element tags tag_id_list -- list of tag indentifiers """ element_list = [] report_list: ReportItemList = [] for tag_id in tag_id_list: searcher = ElementSearcher(TAG_TAG, tag_id, tags_section) if searcher.element_found(): element_list.append(searcher.get_element()) else: report_list.extend(searcher.get_errors()) return element_list, report_list
def find_resources( context_element: _Element, resource_ids: Iterable[str], resource_tags: Optional[Iterable[str]] = None, ) -> Tuple[List[_Element], ReportItemList]: """ Find a list of resource context_element -- an element to be searched in resource_id -- id of an element to find resource_tags -- types of resources to look for, default all types """ report_list: ReportItemList = [] if resource_tags is None: resource_tags = ALL_RESOURCE_XML_TAGS resource_el_list = [] for res_id in resource_ids: searcher = ElementSearcher(resource_tags, res_id, context_element) if searcher.element_found(): resource_el_list.append(searcher.get_element()) else: report_list.extend(searcher.get_errors()) return resource_el_list, report_list
def find_nvsets_by_ids( parent_element: _Element, id_list: Iterable[str]) -> Tuple[List[_Element], ReportItemList]: """ Find nvset elements by their IDs and return them with non-empty report list in case of errors. parent_element -- an element to look for nvsets in id_list -- nvset IDs to be looked for """ element_list = [] report_list: ReportItemList = [] for nvset_id in id_list: searcher = ElementSearcher( _tag_to_type.keys(), nvset_id, parent_element, element_type_desc="options set", ) if searcher.element_found(): element_list.append(searcher.get_element()) else: report_list.extend(searcher.get_errors()) return element_list, report_list
def node_add_guest( env: LibraryEnvironment, node_name, resource_id, options, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, wait: WaitType = False, ): # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements """ Make a guest node from the specified resource LibraryEnvironment env -- provides all for communication with externals string node_name -- name of the guest node string resource_id -- specifies resource that should become a guest node dict options -- guest node options (remote-port, remote-addr, remote-connect-timeout) bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ wait_timeout = env.ensure_wait_satisfiable(wait) report_processor = env.report_processor cib = env.get_cib() id_provider = IdProvider(cib) corosync_conf: Optional[CorosyncConfigFacade] if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( ReportItem.info( reports.messages.CorosyncNodeConflictCheckSkipped( reports.const.REASON_NOT_LIVE_CIB, ))) ( existing_nodes_names, existing_nodes_addrs, report_list, ) = get_existing_nodes_names_addrs(corosync_conf, cib) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) existing_target_list = [] if env.is_cib_live: target_factory = env.get_node_target_factory() existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes, ) new_target = new_target_list[0] if new_target_list else None # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: if new_target: new_addr = new_target.first_addr new_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS) else: new_addr = node_name new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME options["remote-addr"] = new_addr report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, new_addr, new_addr_source))) else: # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: known_hosts = env.get_known_hosts([node_name]) if known_hosts: new_addr = known_hosts[0].dest.addr new_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS) else: new_addr = node_name new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME options["remote-addr"] = new_addr report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, new_addr, new_addr_source))) # validate inputs report_list = guest_node.validate_set_as_guest(cib, existing_nodes_names, existing_nodes_addrs, node_name, options) searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib)) if searcher.element_found(): resource_element = searcher.get_element() report_list.extend(guest_node.validate_is_not_guest(resource_element)) else: report_list.extend(searcher.get_errors()) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up guest_node.set_as_guest( resource_element, id_provider, node_name, options.get("remote-addr", None), options.get("remote-port", None), options.get("remote-connect-timeout", None), ) if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib")) env.push_cib(wait_timeout=wait_timeout) if wait_timeout >= 0: _ensure_resource_running(env, resource_id)
def validate(self, resources_section, id_provider): """ Run the validation and return a report item list etree.Element resources_section -- resources section of a cib IdProvider id_provider -- elements' ids generator and uniqueness checker """ report_list = [] # Check that group_id either matches an existing group element or is # not occupied by any other element. group_missing_id_valid = False group_searcher = ElementSearcher(group.TAG, self._group_id, resources_section) if group_searcher.element_found(): self._group_element = group_searcher.get_element() elif group_searcher.validate_book_id(id_provider, id_description="group name"): group_missing_id_valid = True else: report_list.extend(group_searcher.get_errors()) # Get resource elements to move to the group. # Get all types of resources, so that validation can later tell for # example: 'C' is a clone, clones cannot be put into a group. If we # only searched for primitives here, we would get 'C' is not a # resource, which is not that informative. self._resource_element_list = common.find_resources_and_report( resources_section, self._resource_id_list, report_list) # Get an adjacent resource element. if self._adjacent_resource_id is not None: # If the group already exists, check the adjacent resource is in it. if self._group_element is not None: adjacent_searcher = ElementSearcher( primitive.TAG, self._adjacent_resource_id, self._group_element, ) if adjacent_searcher.element_found(): self._adjacent_resource_element = ( adjacent_searcher.get_element()) else: # pylint: disable=line-too-long report_list.append( ReportItem.error( reports.messages. CannotGroupResourceAdjacentResourceNotInGroup( self._adjacent_resource_id, self._group_id, ))) # The group will be created so there is no adjacent resource in it. elif group_missing_id_valid: # pylint: disable=line-too-long report_list.append( ReportItem.error( reports.messages. CannotGroupResourceAdjacentResourceForNewGroup( self._adjacent_resource_id, self._group_id, ))) # else: The group_id belongs to a non-group element, checking the # adjacent_reource is pointless. report_list.extend( self._validate_elements( bad_or_missing_group_specified=(self._group_element is None), bad_resources_specified=(self._resource_id_list and not self._resource_element_list), bad_adjacent_specified=( self._adjacent_resource_id and self._adjacent_resource_element is None), )) return report_list
def validate(self, resources_section, id_provider): """ Run the validation and return a report item list etree.Element resources_section -- resources section of a cib IdProvider id_provider -- elements' ids generator and uniqueness checker """ report_list = [] # Check that group_id either matches an existing group element or is # not occupied by any other element. group_missing_id_valid = False group_searcher = ElementSearcher( group.TAG, self._group_id, resources_section ) if group_searcher.element_found(): self._group_element = group_searcher.get_element() elif group_searcher.validate_book_id( id_provider, id_description="group name" ): group_missing_id_valid = True else: report_list.extend(group_searcher.get_errors()) # Get resource elements to move to the group. # Get all types of resources, so that validation can later tell for # example: 'C' is a clone, clones cannot be put into a group. If we # only searched for primitives here, we would get 'C' is not a # resource, which is not that informative. self._resource_element_list = common.find_resources_and_report( resources_section, self._resource_id_list, report_list ) # Get an adjacent resource element. if self._adjacent_resource_id is not None: # If the group already exists, check the adjacent resource is in it. if self._group_element is not None: adjacent_searcher = ElementSearcher( primitive.TAG, self._adjacent_resource_id, self._group_element, ) if adjacent_searcher.element_found(): self._adjacent_resource_element = ( adjacent_searcher.get_element() ) else: report_list.append( reports .cannot_group_resource_adjacent_resource_not_in_group( self._adjacent_resource_id, self._group_id, ) ) # The group will be created so there is no adjacent resource in it. elif group_missing_id_valid: report_list.append( reports .cannot_group_resource_adjacent_resource_for_new_group( self._adjacent_resource_id, self._group_id, ) ) # else: The group_id belongs to a non-group element, checking the # adjacent_reource is pointless. report_list.extend( self._validate_elements( bad_or_missing_group_specified=( self._group_element is None ), bad_resources_specified=( self._resource_id_list and not self._resource_element_list ), bad_adjacent_specified=( self._adjacent_resource_id and self._adjacent_resource_element is None ) ) ) return report_list
def node_add_guest( env, node_name, resource_id, options, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, wait=False, ): # pylint: disable=too-many-locals """ Make a guest node from the specified resource LibraryEnvironment env -- provides all for communication with externals string node_name -- name of the guest node string resource_id -- specifies resource that should become a guest node dict options -- guest node options (remote-port, remote-addr, remote-connect-timeout) bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ env.ensure_wait_satisfiable(wait) report_processor = SimpleReportProcessor(env.report_processor) target_factory = env.get_node_target_factory() cib = env.get_cib() id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( reports.corosync_node_conflict_check_skipped("not_live_cib") ) existing_nodes_names, existing_nodes_addrs, report_list = ( get_existing_nodes_names_addrs(corosync_conf, cib) ) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) existing_target_list = [] if env.is_cib_live: existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes ) new_target = new_target_list[0] if new_target_list else None # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: new_addr = new_target.first_addr if new_target else node_name options["remote-addr"] = new_addr report_processor.report( reports.using_known_host_address_for_host(node_name, new_addr) ) else: # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: known_hosts = env.get_known_hosts([node_name]) new_addr = known_hosts[0].dest.addr if known_hosts else node_name options["remote-addr"] = new_addr report_processor.report( reports.using_known_host_address_for_host(node_name, new_addr) ) # validate inputs report_list = guest_node.validate_set_as_guest( cib, existing_nodes_names, existing_nodes_addrs, node_name, options ) searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib)) if searcher.element_found(): resource_element = searcher.get_element() report_list.extend(guest_node.validate_is_not_guest(resource_element)) else: report_list.extend(searcher.get_errors()) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up guest_node.set_as_guest( resource_element, id_provider, node_name, options.get("remote-addr", None), options.get("remote-port", None), options.get("remote-connect-timeout", None), ) if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib") ) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, resource_id)
def node_add_guest( env, node_name, resource_id, options, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, wait=False, ): # pylint: disable=too-many-locals """ Make a guest node from the specified resource LibraryEnvironment env -- provides all for communication with externals string node_name -- name of the guest node string resource_id -- specifies resource that should become a guest node dict options -- guest node options (remote-port, remote-addr, remote-connect-timeout) bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ env.ensure_wait_satisfiable(wait) report_processor = SimpleReportProcessor(env.report_processor) target_factory = env.get_node_target_factory() cib = env.get_cib() id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( reports.corosync_node_conflict_check_skipped("not_live_cib")) existing_nodes_names, existing_nodes_addrs = get_existing_nodes_names_addrs( corosync_conf, cib) existing_target_list = [] if env.is_cib_live: existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes) new_target = new_target_list[0] if new_target_list else None # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: new_addr = new_target.first_addr if new_target else node_name options["remote-addr"] = new_addr report_processor.report( reports.using_known_host_address_for_host(node_name, new_addr)) else: # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: known_hosts = env.get_known_hosts([node_name]) new_addr = known_hosts[0].dest.addr if known_hosts else node_name options["remote-addr"] = new_addr report_processor.report( reports.using_known_host_address_for_host(node_name, new_addr)) # validate inputs report_list = guest_node.validate_set_as_guest(cib, existing_nodes_names, existing_nodes_addrs, node_name, options) searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib)) if searcher.element_found(): resource_element = searcher.get_element() report_list.extend(guest_node.validate_is_not_guest(resource_element)) else: report_list.extend(searcher.get_errors()) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up guest_node.set_as_guest( resource_element, id_provider, node_name, options.get("remote-addr", None), options.get("remote-port", None), options.get("remote-connect-timeout", None), ) if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib")) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, resource_id)