def test_ids_are_not_resources(self): assert_report_item_list_equal( lib._validate_reference_ids_are_resources( get_resources(self.test_tree), self.nonresource_ids, ), fixture_unexpected_element_reports(self.nonresource_ids), )
def add_level(lib_env, level, target_type, target_value, devices, force_device=False, force_node=False): """ Validate and add a new fencing level LibraryError lib_env -- environment int|string level -- level (index) of the new fencing level constant target_type -- the new fencing level target value type mixed target_value -- the new fencing level target value Iterable devices -- list of stonith devices for the new fencing level bool force_device -- continue even if a stonith device does not exist bool force_node -- continue even if a node (target) does not exist """ version_check = None if target_type == TARGET_TYPE_REGEXP: version_check = (2, 3, 0) elif target_type == TARGET_TYPE_ATTRIBUTE: version_check = (2, 4, 0) cib = lib_env.get_cib(version_check) cib_fencing_topology.add_level( lib_env.report_processor, get_fencing_topology(cib), get_resources(cib), level, target_type, target_value, devices, ClusterState(get_cluster_status_xml( lib_env.cmd_runner())).node_section.nodes, force_device, force_node) lib_env.report_processor.send() lib_env.push_cib()
def test_no_ids_empty_list(self): assert_report_item_list_equal( lib._validate_reference_ids_are_resources( get_resources(self.test_tree), [], ), [], )
def _ticket_operation( operation, env: LibraryEnvironment, ticket_name, site_ip, instance_name ): booth_env = env.get_booth_env(instance_name) _ensure_live_env(env, booth_env) if not site_ip: site_ip_list = resource.find_bound_ip( get_resources(env.get_cib()), booth_env.config_path ) if len(site_ip_list) != 1: raise LibraryError( ReportItem.error( reports.messages.BoothCannotDetermineLocalSiteIp() ) ) site_ip = site_ip_list[0] stdout, stderr, return_code = env.cmd_runner().run( [settings.booth_binary, operation, "-s", site_ip, ticket_name] ) if return_code != 0: raise LibraryError( ReportItem.error( reports.messages.BoothTicketOperationFailed( operation, join_multilines([stderr, stdout]), site_ip, ticket_name, ) ) )
def node_add_guest( env, node_name, resource_id, options, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, wait=False, ): """ setup resource (resource_id) as guest node and setup node as guest LibraryEnvironment env provides all for communication with externals string resource_id -- specifies resource that should be guest node dict options could contain keys remote-node, remote-port, remote-addr, remote-connect-timeout bool allow_incomplete_distribution -- is a flag for allowing successfully finish this command even if is file distribution not succeeded bool allow_pacemaker_remote_service_fail -- is a flag for allowing successfully finish this command even if starting/enabling pacemaker_remote not succeeded mixed wait is flag for controlling waiting for pacemaker iddle mechanism """ _ensure_consistently_live_env(env) env.ensure_wait_satisfiable(wait) cib = env.get_cib() current_nodes = get_nodes(env.get_corosync_conf(), cib) report_list = guest_node.validate_set_as_guest(cib, current_nodes, node_name, options) try: resource_element = find_element_by_tag_and_id(primitive.TAG, get_resources(cib), resource_id) report_list.extend(guest_node.validate_is_not_guest(resource_element)) except LibraryError as e: report_list.extend(e.args) env.report_processor.process_list(report_list) guest_node.set_as_guest( resource_element, node_name, options.get("remote-addr", None), options.get("remote-port", None), options.get("remote-connect-timeout", None), ) _prepare_pacemaker_remote_environment( env, current_nodes, guest_node.get_host_from_options(node_name, options), allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) env.push_cib(cib, wait) if wait: _ensure_resource_running(env, resource_id)
def run( self, env, resource_id, node=None, master=False, lifetime=None, wait=False ): # validate env.ensure_wait_satisfiable(wait) # raises on error report_list = [] resource_el = resource.common.find_one_resource_and_report( get_resources(env.get_cib()), resource_id, report_list, ) if resource_el is not None: report_list.extend(self._validate(resource_el, master)) env.report_processor.process_list(report_list) # raises on error # get current status for wait processing if wait is not False: resource_running_on_before = get_resource_state( env.get_cluster_state(), resource_id ) # run the action stdout, stderr, retval = self._run_action( env.cmd_runner(), resource_id, node=node, master=master, lifetime=lifetime ) if retval != 0: if ( f"Resource '{resource_id}' not moved: active in 0 locations" in stderr ): raise LibraryError( self._report_action_stopped_resource(resource_id) ) raise LibraryError( self._report_action_pcmk_error(resource_id, stdout, stderr) ) env.report_processor.process( self._report_action_pcmk_success(resource_id, stdout, stderr) ) # process wait if wait is not False: wait_for_idle(env.cmd_runner(), env.get_wait_timeout(wait)) resource_running_on_after = get_resource_state( env.get_cluster_state(), resource_id ) env.report_processor.process( self._report_wait_result( resource_id, node, resource_running_on_before, resource_running_on_after, ) )
def verify(env, verbose=False): runner = env.cmd_runner() dummy_stdout, verify_stderr, verify_returncode = verify_cmd( runner, verbose=verbose, ) #1) Do not even try to think about upgrading! #2) We do not need cib management in env (no need for push...). #So env.get_cib is not best choice here (there were considerations to #upgrade cib at all times inside env.get_cib). Go to a lower level here. if verify_returncode != 0: env.report_processor.append(reports.invalid_cib_content(verify_stderr)) #Cib is sometimes loadable even if `crm_verify` fails (e.g. when #fencing topology is invalid). On the other hand cib with id duplication #is not loadable. #We try extra checks when cib is possible to load. cib_xml, dummy_stderr, returncode = get_cib_xml_cmd_results(runner) if returncode != 0: #can raise; raise LibraryError is better but in this case we prefer #be consistent with raising below env.report_processor.send() else: cib_xml = get_cib_xml(runner) cib = get_cib(cib_xml) fencing_topology.verify( env.report_processor, get_fencing_topology(cib), get_resources(cib), ClusterState(get_cluster_status_xml(runner)).node_section.nodes) #can raise env.report_processor.send()
def _find_resources_to_remove( cib, report_processor: ReportProcessor, node_type, node_identifier, allow_remove_multiple_nodes, find_resources, ): resource_element_list = find_resources(get_resources(cib), node_identifier) if not resource_element_list: raise LibraryError( ReportItem.error( reports.messages.NodeNotFound(node_identifier, [node_type]))) if len(resource_element_list) > 1: if report_processor.report( ReportItem( severity=reports.item.get_severity( reports.codes.FORCE, allow_remove_multiple_nodes, ), message=reports.messages.MultipleResultsFound( "resource", [ resource.attrib["id"] for resource in resource_element_list ], node_identifier, ), )).has_errors: raise LibraryError() return resource_element_list
def _ticket_operation(operation, env, ticket, site_ip): if not env.is_cib_live: raise LibraryError(reports.live_environment_required(["CIB"])) if not site_ip: site_ip_list = resource.find_bound_ip( get_resources(env.get_cib()), get_config_file_name(env.booth.name) ) if len(site_ip_list) != 1: raise LibraryError( booth_reports.booth_cannot_determine_local_site_ip() ) site_ip = site_ip_list[0] stdout, stderr, return_code = env.cmd_runner().run([ settings.booth_binary, operation, "-s", site_ip, ticket ]) if return_code != 0: raise LibraryError( booth_reports.booth_ticket_operation_failed( operation, join_multilines([stderr, stdout]), site_ip, ticket ) )
def _stonith_warnings(cib: Element, is_sbd_running: bool) -> List[str]: warning_list = [] is_stonith_enabled = stonith.is_stonith_enabled(get_crm_config(cib)) stonith_all, stonith_with_action, stonith_with_method_cycle = ( stonith.get_misconfigured_resources(get_resources(cib))) if is_stonith_enabled and not stonith_all and not is_sbd_running: warning_list.append( "No stonith devices and stonith-enabled is not false") if stonith_with_action: warning_list.append( ("Following stonith devices have the 'action' option set, " "it is recommended to set {0} instead: {1}").format( format_list(STONITH_ACTION_REPLACED_BY), format_list([x.get("id", "") for x in stonith_with_action]), )) if stonith_with_method_cycle: warning_list.append( "Following stonith devices have the 'method' option set " "to 'cycle' which is potentially dangerous, please consider using " "'onoff': {0}".format( format_list( [x.get("id", "") for x in stonith_with_method_cycle]), )) return warning_list
def _validate(self, tag, to_add, to_remove, adjacent=None): return lib.ValidateTagUpdateByIds( tag, to_add, to_remove, adjacent_idref=adjacent, ).validate(get_resources(self.test_cib), get_tags(self.test_cib))
def test_validation_failure_all_kinds_reports(self): tag_id = "#invalid-tag-id" idref_list = 2 * self.nonresource_ids idref_list.append(tag_id) assert_report_item_list_equal( lib.validate_create_tag( get_resources(self.test_tree), tag_id, idref_list, self.id_provider, ), [ fixture.report_invalid_id(tag_id, "#", id_description="id"), fixture.error(reports.codes.TAG_CANNOT_CONTAIN_ITSELF), *fixture_unexpected_element_reports( 2 * self.nonresource_ids[:1] ), *fixture_id_not_found_reports(2 * self.nonresource_ids[1:]), *fixture_id_not_found_reports([tag_id]), fixture.error( reports.codes.TAG_ADD_REMOVE_IDS_DUPLICATION, duplicate_ids_list=self.nonresource_ids, add_or_not_remove=True, ), ], )
def _find_resources_to_remove( cib, report_processor: ReportProcessor, node_type, node_identifier, allow_remove_multiple_nodes, find_resources ): resource_element_list = find_resources(get_resources(cib), node_identifier) if not resource_element_list: raise LibraryError(reports.node_not_found(node_identifier, node_type)) if len(resource_element_list) > 1: if report_processor.report( reports.get_problem_creator( report_codes.FORCE_REMOVE_MULTIPLE_NODES, allow_remove_multiple_nodes )( reports.multiple_result_found, "resource", [resource.attrib["id"] for resource in resource_element_list], node_identifier ) ).has_errors: raise LibraryError() return resource_element_list
def ticket_operation(operation, env, name, ticket, site_ip): if not site_ip: site_ip_list = resource.find_bound_ip( get_resources(env.get_cib()), get_config_file_name(name) ) if len(site_ip_list) != 1: raise LibraryError( booth_reports.booth_cannot_determine_local_site_ip() ) site_ip = site_ip_list[0] command_output, return_code = env.cmd_runner().run([ settings.booth_binary, operation, "-s", site_ip, ticket ]) if return_code != 0: raise LibraryError( booth_reports.booth_ticket_operation_failed( operation, command_output, site_ip, ticket ) )
def test_multiple_id_does_not_exist(self): assert_report_item_list_equal( lib._validate_reference_ids_are_resources( get_resources(self.test_tree), self.nonexistent_ids, ), fixture_id_not_found_reports(self.nonexistent_ids), )
def remove_from_cluster( env: LibraryEnvironment, resource_remove, instance_name=None, allow_remove_multiple=False, ): """ Remove group with ip resource and booth resource env -- provides all for communication with externals function resource_remove -- provisional hack til resources are moved to lib string instance_name -- booth instance name bool allow_remove_multiple -- remove all resources if more than one found """ # TODO resource_remove is provisional hack til resources are moved to lib report_processor = env.report_processor booth_env = env.get_booth_env(instance_name) # This command does not work with booth config files at all, let's reject # them then. _ensure_live_booth_env(booth_env) resource.get_remover(resource_remove)( _find_resource_elements_for_operation( report_processor, get_resources(env.get_cib()), booth_env, allow_remove_multiple, ) )
def restart( env: LibraryEnvironment, resource_restart, instance_name=None, allow_multiple=False, ): """ Restart group with ip resource and booth resource env -- provides all for communication with externals function resource_restart -- provisional hack til resources are moved to lib string instance_name -- booth instance name bool allow_remove_multiple -- remove all resources if more than one found """ # TODO resource_remove is provisional hack til resources are moved to lib report_processor = env.report_processor booth_env = env.get_booth_env(instance_name) _ensure_live_env(env, booth_env) for booth_element in _find_resource_elements_for_operation( report_processor, get_resources(env.get_cib()), booth_env, allow_multiple, ): resource_restart([booth_element.attrib["id"]])
def unmove_unban( env, resource_id, node=None, master=False, expired=False, wait=False ): """ Remove all constraints created by move and ban LibraryEnvironment env string resource_id -- id of a resource to be unmoved/unbanned string node -- node to limit unmoving/unbanning to, all nodes if None bool master -- only remove constraints for Master role bool expired -- only remove constrains which have already expired mixed wait -- flag for controlling waiting for pacemaker idle mechanism """ # validate env.ensure_wait_satisfiable(wait) # raises on error report_list = [] resource_el = resource.common.find_one_resource_and_report( get_resources(env.get_cib()), resource_id, report_list, ) if resource_el is not None: report_list.extend( resource.common.validate_unmove_unban(resource_el, master) ) if ( expired and not has_resource_unmove_unban_expired_support(env.cmd_runner()) ): report_list.append( reports.resource_unmove_unban_pcmk_expired_not_supported() ) env.report_processor.process_list(report_list) # raises on error # run the action stdout, stderr, retval = resource_unmove_unban( env.cmd_runner(), resource_id, node=node, master=master, expired=expired ) if retval != 0: raise LibraryError( reports.resource_unmove_unban_pcmk_error( resource_id, stdout, stderr ) ) env.report_processor.process( reports.resource_unmove_unban_pcmk_success(resource_id, stdout, stderr) ) # process wait if wait is not False: wait_for_idle(env.cmd_runner(), env.get_wait_timeout(wait)) env.report_processor.process( info_resource_state(env.get_cluster_state(), resource_id) )
def test_raise_if_missing(self): for section in self.cib.tree.findall(".//configuration/resources"): section.getparent().remove(section) assert_raise_library_error( lambda: lib.get_resources(self.cib.tree), (severities.ERROR, report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION, { "section": "configuration/resources", }), )
def test_validation_success(self): assert_report_item_list_equal( lib.validate_create_tag( get_resources(self.test_tree), "new-tag", self.resource_ids, self.id_provider, ), [], )
def test_mixed_ids(self): assert_report_item_list_equal( lib._validate_reference_ids_are_resources( get_resources(self.test_tree), self.resource_ids + self.nonresource_ids + self.nonexistent_ids, ), fixture_unexpected_element_reports(self.nonresource_ids[:1]) + fixture_id_not_found_reports(self.nonresource_ids[1:]) + fixture_id_not_found_reports(self.nonexistent_ids), )
def resource_environment(env, resource_id, wait, disabled_after_wait): env.ensure_wait_satisfiable(wait) cib = env.get_cib() yield get_resources(cib) env.push_cib(cib, wait) if wait is not False: ensure_resource_state( not disabled_after_wait, env.report_processor, env.get_cluster_state(), resource_id )
def run(self, env, resource_id, node=None, master=False, lifetime=None, wait=False): # validate env.ensure_wait_satisfiable(wait) # raises on error report_list = [] resource_el = resource.common.find_one_resource_and_report( get_resources(env.get_cib()), resource_id, report_list, ) if resource_el is not None: report_list.extend(self._validate(resource_el, master)) env.report_processor.process_list(report_list) # raises on error # get current status for wait processing if wait is not False: resource_running_on_before = get_resource_state( env.get_cluster_state(), resource_id) # run the action stdout, stderr, retval = self._run_action(env.cmd_runner(), resource_id, node=node, master=master, lifetime=lifetime) if retval != 0: if (f"Resource '{resource_id}' not moved: active in 0 locations" in stderr): raise LibraryError( self._report_action_stopped_resource(resource_id)) raise LibraryError( self._report_action_pcmk_error(resource_id, stdout, stderr)) env.report_processor.process( self._report_action_pcmk_success(resource_id, stdout, stderr)) # process wait if wait is not False: wait_for_idle(env.cmd_runner(), env.get_wait_timeout(wait)) resource_running_on_after = get_resource_state( env.get_cluster_state(), resource_id) env.report_processor.process( self._report_wait_result( resource_id, node, resource_running_on_before, resource_running_on_after, ))
def unmove_unban(env, resource_id, node=None, master=False, expired=False, wait=False): """ Remove all constraints created by move and ban LibraryEnvironment env string resource_id -- id of a resource to be unmoved/unbanned string node -- node to limit unmoving/unbanning to, all nodes if None bool master -- only remove constraints for Master role bool expired -- only remove constrains which have already expired mixed wait -- flag for controlling waiting for pacemaker idle mechanism """ # validate env.ensure_wait_satisfiable(wait) # raises on error report_list = [] resource_el = resource.common.find_one_resource_and_report( get_resources(env.get_cib()), resource_id, report_list, ) if resource_el is not None: report_list.extend( resource.common.validate_unmove_unban(resource_el, master)) if (expired and not has_resource_unmove_unban_expired_support(env.cmd_runner())): report_list.append( reports.resource_unmove_unban_pcmk_expired_not_supported()) env.report_processor.process_list(report_list) # raises on error # run the action stdout, stderr, retval = resource_unmove_unban(env.cmd_runner(), resource_id, node=node, master=master, expired=expired) if retval != 0: raise LibraryError( reports.resource_unmove_unban_pcmk_error(resource_id, stdout, stderr)) env.report_processor.process( reports.resource_unmove_unban_pcmk_success(resource_id, stdout, stderr)) # process wait if wait is not False: wait_for_idle(env.cmd_runner(), env.get_wait_timeout(wait)) env.report_processor.process( info_resource_state(env.get_cluster_state(), resource_id))
def config_destroy(env, ignore_config_load_problems=False): env.booth.command_expect_live_env() if not env.is_cib_live: raise LibraryError(reports.live_environment_required(["CIB"])) name = env.booth.name config_is_used = partial(booth_reports.booth_config_is_used, name) report_list = [] if resource.find_for_config( get_resources(env.get_cib()), get_config_file_name(name), ): report_list.append(config_is_used("in cluster resource")) #Only systemd is currently supported. Initd does not supports multiple #instances (here specified by name) if external.is_systemctl(): if external.is_service_running(env.cmd_runner(), "booth", name): report_list.append(config_is_used("(running in systemd)")) if external.is_service_enabled(env.cmd_runner(), "booth", name): report_list.append(config_is_used("(enabled in systemd)")) if report_list: raise LibraryError(*report_list) authfile_path = None try: authfile_path = config_structure.get_authfile( parse(env.booth.get_config_content()) ) except LibraryError: if not ignore_config_load_problems: raise LibraryError(booth_reports.booth_cannot_identify_keyfile()) #if content not received, not valid,... still remove config needed env.report_processor.process( booth_reports.booth_cannot_identify_keyfile( severity=ReportItemSeverity.WARNING ) ) if( authfile_path and os.path.dirname(authfile_path) == settings.booth_config_dir ): env.booth.set_key_path(authfile_path) env.booth.remove_key() env.booth.remove_config()
def test_raise_if_missing(self): for section in self.cib.tree.findall(".//configuration/resources"): section.getparent().remove(section) assert_raise_library_error( lambda: lib.get_resources(self.cib.tree), ( severities.ERROR, report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION, { "section": "configuration/resources", } ), )
def verify(lib_env): """ Check if all cluster nodes and stonith devices used in fencing levels exist LibraryError lib_env -- environment """ cib = lib_env.get_cib() cib_fencing_topology.verify( lib_env.report_processor, get_fencing_topology(cib), get_resources(cib), ClusterState(get_cluster_status_xml( lib_env.cmd_runner())).node_section.nodes) lib_env.report_processor.send()
def create_in_cluster(env, ip, allow_absent_resource_agent=False): """ Create group with ip resource and booth resource LibraryEnvironment env provides all for communication with externals string ip determines float ip for the operation of the booth bool allow_absent_resource_agent is flag allowing create booth resource even if its agent is not installed """ resources_section = get_resources(env.get_cib()) id_provider = IdProvider(resources_section) name = env.booth.name booth_config_file_path = get_config_file_name(name) if resource.find_for_config(resources_section, booth_config_file_path): raise LibraryError(booth_reports.booth_already_in_cib(name)) create_id = partial( resource.create_resource_id, resources_section, name ) get_agent = partial( find_valid_resource_agent_by_name, env.report_processor, env.cmd_runner(), allowed_absent=allow_absent_resource_agent ) create_primitive = partial( primitive.create, env.report_processor, resources_section, id_provider ) into_booth_group = partial( group.place_resource, group.provide_group(resources_section, create_id("group")), ) into_booth_group(create_primitive( create_id("ip"), get_agent("ocf:heartbeat:IPaddr2"), instance_attributes={"ip": ip}, )) into_booth_group(create_primitive( create_id("service"), get_agent("ocf:pacemaker:booth-site"), instance_attributes={"config": booth_config_file_path}, )) env.push_cib()
def resource_environment(env, wait=False, wait_for_resource_ids=None, resource_state_reporter=info_resource_state, required_cib_version=None): env.ensure_wait_satisfiable(wait) yield get_resources(env.get_cib(required_cib_version)) env.push_cib(wait=wait) if wait is not False and wait_for_resource_ids: state = env.get_cluster_state() env.report_processor.process_list([ resource_state_reporter(state, res_id) for res_id in wait_for_resource_ids ])
def create_in_cluster(env, name, ip, resource_create, resource_remove): #TODO resource_create is provisional hack until resources are not moved to #lib resources_section = get_resources(env.get_cib()) booth_config_file_path = get_config_file_name(name) if resource.find_for_config(resources_section, booth_config_file_path): raise LibraryError(booth_reports.booth_already_in_cib(name)) resource.get_creator(resource_create, resource_remove)( ip, booth_config_file_path, create_id=partial(resource.create_resource_id, resources_section, name))
def verify(lib_env: LibraryEnvironment): """ Check if all cluster nodes and stonith devices used in fencing levels exist LibraryEnvironment lib_env -- environment """ cib = lib_env.get_cib() lib_env.report_processor.report_list( cib_fencing_topology.verify( get_fencing_topology(cib), get_resources(cib), ClusterState(lib_env.get_cluster_state()).node_section.nodes, )) if lib_env.report_processor.has_errors: raise LibraryError()
def resource_environment(env, wait=False, wait_for_resource_ids=None, disabled_after_wait=False, required_cib_version=None): env.ensure_wait_satisfiable(wait) cib = env.get_cib(required_cib_version) yield get_resources(cib) env.push_cib(cib, wait) if wait is not False and wait_for_resource_ids: state = env.get_cluster_state() env.report_processor.process_list([ ensure_resource_state(not disabled_after_wait, state, res_id) for res_id in wait_for_resource_ids ])
def disable_simulate(env, resource_ids): """ Simulate disallowing specified resource to be started by the cluster LibraryEnvironment env -- strings resource_ids -- ids of the resources to be disabled """ if not env.is_cib_live: raise LibraryError( reports.live_environment_required([file_type_codes.CIB])) resources_section = get_resources(env.get_cib()) _disable_validate_and_edit_cib(env, resources_section, resource_ids) plaintext_status, dummy_transitions, dummy_cib = simulate_cib( env.cmd_runner(), get_root(resources_section)) return plaintext_status
def verify(lib_env): """ Check if all cluster nodes and stonith devices used in fencing levels exist LibraryEnvironment lib_env -- environment """ cib = lib_env.get_cib() cib_fencing_topology.verify( lib_env.report_processor, get_fencing_topology(cib), get_resources(cib), ClusterState( get_cluster_status_xml(lib_env.cmd_runner()) ).node_section.nodes ) lib_env.report_processor.send()
def resource_environment( env, wait=False, wait_for_resource_ids=None, resource_state_reporter=info_resource_state, required_cib_version=None ): env.ensure_wait_satisfiable(wait) yield get_resources(env.get_cib(required_cib_version)) env.push_cib(wait=wait) if wait is not False and wait_for_resource_ids: state = env.get_cluster_state() env.report_processor.process_list([ resource_state_reporter(state, res_id) for res_id in wait_for_resource_ids ])
def create_in_cluster(env, name, ip, resource_create): #TODO resource_create is provisional hack until resources are not moved to #lib resources_section = get_resources(env.get_cib()) booth_config_file_path = get_config_file_name(name) if resource.find_for_config(resources_section, booth_config_file_path): raise LibraryError(booth_reports.booth_already_in_cib(name)) resource.get_creator(resource_create)( ip, booth_config_file_path, create_id = partial( resource.create_resource_id, resources_section, name ) )
def update( env: LibraryEnvironment, tag_id: str, idref_add: Sequence[str], idref_remove: Sequence[str], adjacent_idref: Optional[str] = None, put_after_adjacent: bool = False, ) -> None: """ Update specified tag by given id references. env -- provides all for communication with externals tag_id -- id of an existing tag to be updated idref_add -- reference ids to be added idref_remove -- reference ids to be removed adjacent_idref -- id of the element next to which the added elements will be put put_after_adjacent -- put elements after (True) or before (False) the adjacent element """ with cib_tags_section(env) as tags_section: validator = tag.ValidateTagUpdateByIds( tag_id, idref_add, idref_remove, adjacent_idref, ) if env.report_processor.report_list( validator.validate( get_resources(get_root(tags_section)), tags_section, )).has_errors: raise LibraryError() # check for mypy tag_element = validator.tag_element() if tag_element is not None: tag.add_obj_ref( tag_element, validator.add_obj_ref_element_list(), validator.adjacent_obj_ref_element(), put_after_adjacent, ) tag.remove_obj_ref(validator.remove_obj_ref_element_list())
def _find_resource_elements_for_operation(env, name, allow_multiple): booth_element_list = resource.find_for_config( get_resources(env.get_cib()), get_config_file_name(name), ) if not booth_element_list: raise LibraryError(booth_reports.booth_not_exists_in_cib(name)) if len(booth_element_list) > 1: if not allow_multiple: raise LibraryError(booth_reports.booth_multiple_times_in_cib(name)) env.report_processor.process( booth_reports.booth_multiple_times_in_cib( name, severity=ReportItemSeverity.WARNING, )) return booth_element_list
def remove_from_cluster(env, name, resource_remove): #TODO resource_remove is provisional hack until resources are not moved to #lib try: num_of_removed_booth_resources = resource.get_remover(resource_remove)( get_resources(env.get_cib()), get_config_file_name(name), ) if num_of_removed_booth_resources > 1: env.report_processor.process( booth_reports.booth_multiple_times_in_cib( name, severity=ReportItemSeverity.WARNING, ) ) except resource.BoothNotFoundInCib: raise LibraryError(booth_reports.booth_not_exists_in_cib(name)) except resource.BoothMultipleOccurenceFoundInCib: raise LibraryError(booth_reports.booth_multiple_times_in_cib(name))
def _find_resource_elements_for_operation(env, name, allow_multiple): booth_element_list = resource.find_for_config( get_resources(env.get_cib()), get_config_file_name(name), ) if not booth_element_list: raise LibraryError(booth_reports.booth_not_exists_in_cib(name)) if len(booth_element_list) > 1: if not allow_multiple: raise LibraryError(booth_reports.booth_multiple_times_in_cib(name)) env.report_processor.process( booth_reports.booth_multiple_times_in_cib( name, severity=ReportItemSeverity.WARNING, ) ) return booth_element_list
def add_level( lib_env, level, target_type, target_value, devices, force_device=False, force_node=False ): """ Validate and add a new fencing level LibraryEnvironment lib_env -- environment int|string level -- level (index) of the new fencing level constant target_type -- the new fencing level target value type mixed target_value -- the new fencing level target value Iterable devices -- list of stonith devices for the new fencing level bool force_device -- continue even if a stonith device does not exist bool force_node -- continue even if a node (target) does not exist """ version_check = None if target_type == TARGET_TYPE_REGEXP: version_check = Version(2, 3, 0) elif target_type == TARGET_TYPE_ATTRIBUTE: version_check = Version(2, 4, 0) cib = lib_env.get_cib(version_check) cib_fencing_topology.add_level( lib_env.report_processor, get_fencing_topology(cib), get_resources(cib), level, target_type, target_value, devices, ClusterState( get_cluster_status_xml(lib_env.cmd_runner()) ).node_section.nodes, force_device, force_node ) lib_env.report_processor.send() lib_env.push_cib()
def _find_resources_to_remove( cib, report_processor, node_type, node_identifier, allow_remove_multiple_nodes, find_resources ): resource_element_list = find_resources(get_resources(cib), node_identifier) if not resource_element_list: raise LibraryError(reports.node_not_found(node_identifier, node_type)) if len(resource_element_list) > 1: report_processor.process( reports.get_problem_creator( report_codes.FORCE_REMOVE_MULTIPLE_NODES, allow_remove_multiple_nodes )( reports.multiple_result_found, "resource", [resource.attrib["id"] for resource in resource_element_list], node_identifier ) ) return resource_element_list
def node_add_remote( env, node_name, node_addr, operations, meta_attributes, instance_attributes, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, allow_invalid_operation=False, allow_invalid_instance_attributes=False, use_default_operations=True, wait=False, ): # pylint: disable=too-many-arguments, too-many-branches, too-many-locals """ create an ocf:pacemaker:remote resource and use it as a remote node LibraryEnvironment env -- provides all for communication with externals string node_name -- the name of the new node mixed node_addr -- the address of the new node or None for default list of dict operations -- attributes for each entered operation dict meta_attributes -- attributes for primitive/meta_attributes dict instance_attributes -- attributes for primitive/instance_attributes bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed bool allow_invalid_operation -- if True, allow to use operations that are not listed in a resource agent metadata bool allow_invalid_instance_attributes -- if True, allow to use instance attributes that are not listed in a resource agent metadata and allow to omit required instance_attributes bool use_default_operations -- if True, add operations specified in a resource agent metadata to the resource mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ env.ensure_wait_satisfiable(wait) report_processor = SimpleReportProcessor(env.report_processor) target_factory = env.get_node_target_factory() cib = env.get_cib() id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( reports.corosync_node_conflict_check_skipped("not_live_cib") ) existing_nodes_names, existing_nodes_addrs, report_list = ( get_existing_nodes_names_addrs(corosync_conf, cib) ) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) resource_agent = remote_node.get_agent( env.report_processor, env.cmd_runner() ) existing_target_list = [] if env.is_cib_live: existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes ) new_target = new_target_list[0] if new_target_list else None # default node_addr to an address from known-hosts if node_addr is None: node_addr = new_target.first_addr if new_target else node_name report_processor.report( reports.using_known_host_address_for_host(node_name, node_addr) ) else: # default node_addr to an address from known-hosts if node_addr is None: known_hosts = env.get_known_hosts([node_name]) node_addr = known_hosts[0].dest.addr if known_hosts else node_name report_processor.report( reports.using_known_host_address_for_host(node_name, node_addr) ) # validate inputs report_list = remote_node.validate_create( existing_nodes_names, existing_nodes_addrs, resource_agent, node_name, node_addr, instance_attributes ) # validation + cib setup # TODO extract the validation to a separate function try: remote_resource_element = remote_node.create( env.report_processor, resource_agent, get_resources(cib), id_provider, node_addr, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: #Check unique id conflict with check against nodes. Until validation #resource create is not separated, we need to make unique post #validation. already_exists = [] unified_report_list = [] for report in report_list + list(e.args): if report.code not in ( report_codes.ID_ALREADY_EXISTS, report_codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE, ): unified_report_list.append(report) elif ( "id" in report.info and report.info["id"] not in already_exists ): unified_report_list.append(report) already_exists.append(report.info["id"]) report_list = unified_report_list report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib") ) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, remote_resource_element.attrib["id"])
def node_add_guest( env, node_name, resource_id, options, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, wait=False, ): # pylint: disable=too-many-locals """ Make a guest node from the specified resource LibraryEnvironment env -- provides all for communication with externals string node_name -- name of the guest node string resource_id -- specifies resource that should become a guest node dict options -- guest node options (remote-port, remote-addr, remote-connect-timeout) bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ env.ensure_wait_satisfiable(wait) report_processor = SimpleReportProcessor(env.report_processor) target_factory = env.get_node_target_factory() cib = env.get_cib() id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( reports.corosync_node_conflict_check_skipped("not_live_cib") ) existing_nodes_names, existing_nodes_addrs, report_list = ( get_existing_nodes_names_addrs(corosync_conf, cib) ) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) existing_target_list = [] if env.is_cib_live: existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes ) new_target = new_target_list[0] if new_target_list else None # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: new_addr = new_target.first_addr if new_target else node_name options["remote-addr"] = new_addr report_processor.report( reports.using_known_host_address_for_host(node_name, new_addr) ) else: # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: known_hosts = env.get_known_hosts([node_name]) new_addr = known_hosts[0].dest.addr if known_hosts else node_name options["remote-addr"] = new_addr report_processor.report( reports.using_known_host_address_for_host(node_name, new_addr) ) # validate inputs report_list = guest_node.validate_set_as_guest( cib, existing_nodes_names, existing_nodes_addrs, node_name, options ) searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib)) if searcher.element_found(): resource_element = searcher.get_element() report_list.extend(guest_node.validate_is_not_guest(resource_element)) else: report_list.extend(searcher.get_errors()) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up guest_node.set_as_guest( resource_element, id_provider, node_name, options.get("remote-addr", None), options.get("remote-port", None), options.get("remote-connect-timeout", None), ) if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib") ) env.push_cib(wait=wait) if wait: _ensure_resource_running(env, resource_id)
def test_success_if_exists(self): self.assertEqual( "resources", lib.get_resources(self.cib.tree).tag )