def test_fail_all_nodes_unknown(self): (self.config .env.set_known_nodes([]) .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) ) self.env_assist.assert_raise_library_error( lambda: pcsd.synchronize_ssl_certificate(self.env_assist.get_env()), [] ) self.env_assist.assert_reports( [ fixture.error( report_codes.HOST_NOT_FOUND, force_code=report_codes.SKIP_OFFLINE_NODES, host_list=self.node_names ), fixture.error(report_codes.NONE_HOST_FOUND), ] )
def test_return_error_on_not_allowed_and_banned_names_forceable(self): code = "force_code" assert_report_item_list_equal( validate.names_in( ["a", "b"], ["x", "a", "z", "c", "d"], banned_name_list=["x", "y", "z"], code_to_allow_extra_names=code ), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=["c", "d"], allowed=["a", "b"], option_type="option", allowed_patterns=[], force_code=code, ), fixture.error( report_codes.INVALID_OPTIONS, option_names=["x", "z"], allowed=["a", "b"], option_type="option", allowed_patterns=[], ), ] )
def test_no_wrap(self): validators = validate.wrap_with_empty_or_valid( self.validators, wrap=False ) validators.append(validate.value_port_number("c")) assert_report_item_list_equal( validate.run_collection_of_option_validators( {"a": "", "b": "", "c": ""}, validators ), [ fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="a", option_value="", allowed_values=["x", "y", "z"], ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="b", option_value="", allowed_values="0..9", ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="c", option_value="", allowed_values="a port number (1-65535)", ), ] )
def test_remove_more_than_defined(self): assert_report_item_list_equal( config_validators.remove_links( self.existing + ["2"], self.existing, "knet" ), [ fixture.error( report_codes .COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS , # We try to remove more links than defined yet only defined # links are counted here - nonexistent links cannot be # defined so they are not included in the count links_change_count=len(self.existing), # the point of the test is to not get negative number here links_new_count=0, links_limit_count=1, add_or_not_remove=False, ), fixture.error( report_codes.COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE, link_list=["2"], existing_link_list=["0", "1", "3", "10", "11"], ) ] )
def test_invalid_and_unsupported_options(self): config = { "SBD_DELAY_START": "yes", "SBD_WATCHDOG_TIMEOUT": "5", "SBD_STARTMODE": "clean", "SBD_WATCHDOG_DEV": "/dev/watchdog", "SBD_UNKNOWN": "", "SBD_OPTS": " " } assert_report_item_list_equal( cmd_sbd._validate_sbd_options(config), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=sorted(["SBD_WATCHDOG_DEV", "SBD_OPTS"]), option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], ), fixture.error( report_codes.INVALID_OPTIONS, option_names=["SBD_UNKNOWN"], option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], force_code=report_codes.FORCE_OPTIONS, ), ] )
def test_validate_values(self): (self.config .local.load_cib() .corosync_conf.load(node_name_list=[NODE_1, NODE_2]) ) self.env_assist.assert_raise_library_error( lambda: node_add_guest( self.env_assist.get_env(), options={ "remote-addr": "*addr", "remote-port": "abc", "remote-connect-timeout": "def", } ), [] ) self.env_assist.assert_reports( [ fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-connect-timeout", option_value="def", allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)" ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-port", option_value="abc", allowed_values="a port number (1-65535)" ) ] )
def test_validate_values(self): (self.config .local.load_cib() ) self.env_assist.assert_raise_library_error( lambda: node_add_guest( self.env_assist.get_env(), options={ "remote-addr": "*addr", "remote-port": "abc", "remote-connect-timeout": "def", } ), [] ) self.env_assist.assert_reports([ fixture.info( report_codes.COROSYNC_NODE_CONFLICT_CHECK_SKIPPED, reason_type="not_live_cib", ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-connect-timeout", option_value="def", allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)" ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-port", option_value="abc", allowed_values="a port number (1-65535)" ) ])
def test_nodename_not_unique(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["addr03"]}, {"name": "node3", "addrs": ["addr04"]}, # invalid nodes are not reported as duplicate {"name": "", "addrs": ["addr05"]}, {"name": "", "addrs": ["addr06"]}, ], self.fixture_coronodes_1_link, [ ] ), [ fixture.error( report_codes.INVALID_OPTION_VALUE, option_value="", option_name="node 3 name", allowed_values="a non-empty string" ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_value="", option_name="node 4 name", allowed_values="a non-empty string" ), fixture.error( report_codes.NODE_NAMES_DUPLICATION, name_list=["node3"] ) ] )
def test_node_addrs_count_mismatch(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["addr03"]}, {"name": "node4", "addrs": ["addr04", "addr14"]}, {"name": "node5", "addrs": ["addr05", "addr15", "addr16"]}, ], self.fixture_coronodes_2_links, [] ), [ fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=1, min_count=2, max_count=2, node_name="node3", node_index=1 ), fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=3, min_count=2, max_count=2, node_name="node5", node_index=3 ), ] )
def test_node_addrs_unresolvable_forced(self): assert_report_item_list_equal( config_validators.add_nodes( [ # Duplicated addresses reported only once but they trigger # a duplicate addresses report. {"name": "node3", "addrs": ["addr03", "addrX2"]}, {"name": "node4", "addrs": ["addrX2", "addr14"]}, # Extra address reported as well, it triggres its own report # about being an extra address. {"name": "node5", "addrs": ["addr05", "addrX1", "addrX3"]}, ], self.fixture_coronodes_2_links, [], force_unresolvable=True ), [ fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=3, min_count=2, max_count=2, node_name="node5", node_index=3 ), fixture.warn( report_codes.NODE_ADDRESSES_UNRESOLVABLE, address_list=["addrX1", "addrX2", "addrX3"] ), fixture.error( report_codes.NODE_ADDRESSES_DUPLICATION, address_list=["addrX2"] ), ] )
def test_empty_node(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["addr03"]}, {}, {"name": "node4", "addrs": ["addr04"]}, ], self.fixture_coronodes_1_link, [] ), [ fixture.error( report_codes.REQUIRED_OPTION_IS_MISSING, option_names=["name"], option_type="node 2" ), fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=0, min_count=1, max_count=1, node_name=None, node_index=2 ), ] )
def test_missing_node_names(self): before = dedent("""\ totem { transport: knet } nodelist { node { ring0_addr: node1-addr0 ring2_addr: node1-addr2 name: node1 nodeid: 1 } node { ring0_addr: node2-addr0 ring2_addr: node2-addr2 nodeid: 2 } } """ ) (self.config .corosync_conf.load_content(before) .runner.cib.load() ) self.env_assist.assert_raise_library_error( lambda: cluster.add_link( self.env_assist.get_env(), self.node_addr_map, self.link_options, ), [], ) self.env_assist.assert_reports( [ fixture.error( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=True, ), fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=0, min_count=1, max_count=1, node_name=None, node_index=None, ), fixture.error( report_codes.NODE_NOT_FOUND, node="node2", searched_types=[], ), ] )
def test_validation(self): resources_before = """ <resources> <group id="G"> <primitive id="RG1" /> </group> <primitive id="R1"> <meta_attributes id="R1-meta_attributes" /> </primitive> <primitive id="R2"> <meta_attributes id="R2-meta_attributes" /> </primitive> <clone id="RC1-clone"> <primitive id="RC1" /> </clone> </resources> """ (self.config .runner.cib.load(resources=resources_before) ) self.env_assist.assert_raise_library_error( lambda: resource.group_add( self.env_assist.get_env(), "R1-meta_attributes", ["R2", "R4", "R3", "R2-meta_attributes", "RC1-clone", "RC1"] ), [ fixture.error( report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE, id="R1-meta_attributes", expected_types=["group"], current_type="meta_attributes", ), fixture.report_not_found("R4", context_type="resources"), fixture.report_not_found("R3", context_type="resources"), fixture.error( report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE, id="R2-meta_attributes", expected_types=[ "clone", "master", "group", "primitive", "bundle" ], current_type="meta_attributes", ), fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RC1-clone", resource_type="clone", ), fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RC1", resource_type="clone", ), ], )
def test_no_host_found(self): unknown_hosts = ["node0", "node1"] report_list = [ fixture.error( report_codes.HOST_NOT_FOUND, force_code=report_codes.SKIP_OFFLINE_NODES, host_list=unknown_hosts ), fixture.error(report_codes.NONE_HOST_FOUND) ] assert_raise_library_error( lambda: self.factory.get_target_list(unknown_hosts), *report_list ) self.report_processor.assert_reports(report_list)
def test_fails_when_remote_node_is_not_prepared(self): (self.config .local.load_cib() .corosync_conf.load(node_name_list=[NODE_1, NODE_2]) .http.host.check_auth( communication_list=[ dict(label=NODE_NAME, dest_list=NODE_DEST_LIST) ], ) .local.get_host_info( NODE_NAME, NODE_DEST_LIST, output=dict( services=dict( pacemaker_remote=dict( installed=False, enabled=False, running=False ), pacemaker=dict( installed=True, enabled=False, running=True ), corosync=dict( installed=True, enabled=False, running=True ), ), cluster_configuration_exists=True, ) ) ) self.env_assist.assert_raise_library_error( lambda: node_add_guest(self.env_assist.get_env()), [] ) self.env_assist.assert_reports([ fixture.error( report_codes.SERVICE_NOT_INSTALLED, node=NODE_NAME, service_list=["pacemaker_remote"], ), fixture.error( report_codes.HOST_ALREADY_IN_CLUSTER_SERVICES, host_name=NODE_NAME, service_list=["corosync", "pacemaker"], ), fixture.error( report_codes.HOST_ALREADY_IN_CLUSTER_CONFIG, host_name=NODE_NAME, ), ])
def test_element_exists_in_another_context(self): tree = etree.fromstring(""" <cib> <resources> <group id="g1"><primitive id="a"/></group> <group id="g2"><primitive id="b"/></group> </resources> </cib> """) searcher = lib.ElementSearcher( "primitive", "a", tree.find('.//resources/group[@id="g2"]'), ) self.assertFalse(searcher.element_found()) self.assertIsNone(searcher.get_element()) assert_report_item_list_equal( searcher.get_errors(), [ fixture.error( report_codes.OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT, id="a", type="primitive", expected_context_type="group", expected_context_id="g2", ), ] )
def test_id_already_exists(self): agent_name = "test_simple" (self.config .runner.pcmk.load_agent( agent_name=f"stonith:{agent_name}", agent_filename="stonith_agent_fence_simple.xml" ) .runner.cib.load(resources=self._expected_cib(expected_cib_simple)) ) self.env_assist.assert_raise_library_error( lambda: self._create( self.env_assist.get_env(), "stonith-test", agent_name, operations=[], meta_attributes={}, instance_attributes={ "must-set": "value", "must-set-new": "B", } ), [ fixture.error(report_codes.ID_ALREADY_EXISTS, id="stonith-test") ], expected_in_processor=False )
def test_node_offline(self): err_msg = "Failed connect to rh7-3:2224; No route to host" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth( communication_list=[ {"label": "rh7-1"}, {"label": "rh7-2"}, { "label": "rh7-3", "was_connected": False, "errno": 7, "error_msg": err_msg, } ] ) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], expected_in_processor=False ) self.env_assist.assert_reports([ fixture.error( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, force_code=report_codes.SKIP_OFFLINE_NODES, node="rh7-3", reason=err_msg, command="remote/check_auth" ) ])
def test_fail_when_resource_not_started(self): (self.config .runner.pcmk.load_state(raw_resources=dict( resource_id=VIRTUAL_MACHINE_ID, resource_agent="ocf::pacemaker:remote", node_name=NODE_1, failed="true", )) # self.config.fs is used to mock authkey file existence. Therefore, # filesystem mocking is active and we need to cover it. We tell the # file doesn't exist, because we aren't currently mocking the # function which reads it: # if os.path.isfile(settings.crm_mon_schema): # etree.RelaxNG(file=settings.crm_mon_schema).assertValid(dom) .fs.isfile(settings.crm_mon_schema, return_value=False) ) self.env_assist.assert_raise_library_error( lambda: node_add_guest(self.env_assist.get_env(), wait=self.wait), [ fixture.error( report_codes.RESOURCE_DOES_NOT_RUN, resource_id=VIRTUAL_MACHINE_ID, ) ] ) self.env_assist.assert_reports(REPORTS)
def test_set_stonith_watchdog_timeout_fails_on_all_nodes(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( communication_list=[ [dict(label=node, response_code=400, output=err_msg)] for node in self.node_list ] ) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], ) self.env_assist.assert_reports( [ fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node=node, reason=err_msg, command="remote/set_stonith_watchdog_timeout_to_zero" ) for node in self.node_list ] + [ fixture.error( report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, ) ] )
def test_used_addrs(self): pcmk_nodes = [ PacemakerNode("node-remote", "addr-remote") ] already_existing_addrs = [ pcmk_nodes[0].addr, self.coro_nodes[0].addrs_plain[0], ] self.new_addrs["node2"], self.new_addrs["node3"] = ( already_existing_addrs ) assert_report_item_list_equal( config_validators.add_link( self.new_addrs, {}, self.coro_nodes, pcmk_nodes, self.existing_link_list, self.transport, constants.IP_VERSION_64 ), [ fixture.error( report_codes.NODE_ADDRESSES_ALREADY_EXIST, address_list=already_existing_addrs, ), ] )
def test_missing_node_addrs(self): broken_nodes = sorted(self.new_addrs.keys())[1:2] for node_name in broken_nodes: del self.new_addrs[node_name] pcmk_nodes = [ PacemakerNode("node-remote", "addr-remote") ] assert_report_item_list_equal( config_validators.add_link( self.new_addrs, {}, self.coro_nodes, pcmk_nodes, self.existing_link_list, self.transport, constants.IP_VERSION_64 ), [ fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=0, min_count=1, max_count=1, node_name=node_name, node_index=None, ) for node_name in broken_nodes ] )
def test_too_many_links(self): existing_link_list = [str(x) for x in range(constants.LINKS_KNET_MAX)] coro_nodes = [ node.CorosyncNode( f"node{i}", [ node.CorosyncNodeAddress(f"addr{i}-{j}", f"{j}") for j in existing_link_list ], i ) for i in [1, 2, 3] ] assert_report_item_list_equal( config_validators.add_link( self.new_addrs, {}, coro_nodes, self.pcmk_nodes, existing_link_list, self.transport, constants.IP_VERSION_64 ), [ fixture.error( report_codes .COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS , links_change_count=1, links_new_count=(constants.LINKS_KNET_MAX + 1), links_limit_count=constants.LINKS_KNET_MAX, add_or_not_remove=True, ) ] )
def test_need_stopped_cluster_not_stopped(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ { "label": self.node_labels[0], "output": '{"corosync":true}' } ] + [ { "label": node, } for node in self.node_labels[1:] ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ), ])
def test_need_stopped_cluster_not_stopped_skip_offline(self): # If we know for sure that corosync is running, skip_offline doesn't # matter. self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ dict( label="node-1", output='{"corosync":true}', ), dict( label="node-2", ), ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ) ])
def test_resources_are_in_clones_etc(self): assert_report_item_list_equal( self._validate("G", ["RC1", "R1", "RB1"]), [ fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RC1", resource_type="clone", ), fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RB1", resource_type="bundle", ), ] )
def test_validation_adjacent(self): resources_before = """ <resources> <group id="G"> <primitive id="RG1" /> </group> <group id="X"> <primitive id="RX1" /> </group> <primitive id="R1" /> </resources> """ (self.config .runner.cib.load(resources=resources_before) ) self.env_assist.assert_raise_library_error( lambda: resource.group_add( self.env_assist.get_env(), "G", ["R1"], adjacent_resource_id="RX1" ), [ fixture.error( report_codes .CANNOT_GROUP_RESOURCE_ADJACENT_RESOURCE_NOT_IN_GROUP , adjacent_resource_id="RX1", group_id="G", ), ], )
def test_cib_remote_node(self): resources = f""" <resources> <primitive class="ocf" provider="pacemaker" type="remote" id="R" > <instance_attributes> <nvpair name="server" value="{self.node_addr_map['node1']}" /> </instance_attributes> </primitive> </resources> """ (self.config .corosync_conf.load_content(self.before) .runner.cib.load(resources=resources) ) self.env_assist.assert_raise_library_error( lambda: cluster.add_link( self.env_assist.get_env(), self.node_addr_map, self.link_options, ), [] ) self.env_assist.assert_reports( [ fixture.error( report_codes.NODE_ADDRESSES_ALREADY_EXIST, address_list=[self.node_addr_map["node1"]], ), ] )
def test_no_resources_specified(self): assert_report_item_list_equal( self._validate("G", []), [ fixture.error(report_codes.CANNOT_GROUP_RESOURCE_NO_RESOURCES), ] )
def test_new_offline(self): (self.config .local.load_cib() .corosync_conf.load(node_name_list=[NODE_1, NODE_2]) .http.host.check_auth( communication_list=[ dict( label=NODE_NAME, dest_list=NODE_DEST_LIST, **FAIL_HTTP_KWARGS ) ], ) ) self.env_assist.assert_raise_library_error( lambda: node_add_guest(self.env_assist.get_env()), [] ) self.env_assist.assert_reports([ fixture.error( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, force_code=report_codes.SKIP_OFFLINE_NODES, node=NODE_NAME, command="remote/check_auth", reason="Could not resolve host", ) ])
def test_fail_tag_referenced_in_constraint(self): # pylint: disable=no-self-use assert_report_item_list_equal( lib.validate_remove_tag( get_constraints(self.tree_each_tag_has_one_constraint), ["tag-location"], ), [ fixture.error( # pylint: disable=line-too-long reports.codes. TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS, tag_id="tag-location", constraint_id_list=["location"], ) ], )
def test_options_names(self): validator = nvpair_multi.ValidateNvsetAppendNew( self.id_provider, {}, {"not_valid": "a"}) assert_report_item_list_equal( validator.validate(), [ fixture.error( reports.codes.INVALID_OPTIONS, force_code=reports.codes.FORCE_OPTIONS, option_names=["not_valid"], allowed=["id", "score"], option_type=None, allowed_patterns=[], ), ], ) self.assertIsNone(validator.get_parsed_rule())
def test_all_failed(self): self.config.http.corosync.reload_corosync_conf(communication_list=[ [ dict( label=self.existing_nodes[0], output="not a json", ), ], [ dict( label=self.existing_nodes[1], was_connected=False, errno=7, error_msg="error msg", ), ], [ dict( label=self.existing_nodes[2], output=json.dumps(dict(code="failed", message="error msg")), ) ], ], ) self.env_assist.assert_raise_library_error( lambda: cluster.corosync_authkey_change(self.env_assist.get_env())) self.env_assist.assert_reports( _get_all_successful_reports(self.existing_nodes)[:-1] + [ fixture.warn( reports.codes.INVALID_RESPONSE_FORMAT, node=self.existing_nodes[0], ), fixture.warn( reports.codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=self.existing_nodes[1], command="remote/reload_corosync_conf", reason="error msg", ), fixture.warn( reports.codes.COROSYNC_CONFIG_RELOAD_ERROR, node=self.existing_nodes[2], reason="error msg", ), fixture.error( reports.codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, ), ])
def assert_raises_invalid_cib_content( self, report, extra_reports=None, can_be_more_verbose=True, verbose=False, ): extra_reports = extra_reports if extra_reports else [] self.env_assist.assert_raise_library_error( lambda: verify(self.env_assist.get_env(), verbose)) self.env_assist.assert_reports([ fixture.error( report_codes.INVALID_CIB_CONTENT, report=report, can_be_more_verbose=can_be_more_verbose, ), ] + extra_reports, )
def test_refuse_missing_resource_id(self, _): assert_raise_library_error( lambda: self.prepare( { "id": "id", "rsc-role": const.PCMK_ROLE_PROMOTED }, "ticket-key", "", ), ) self.report_processor.assert_reports([ fixture.error( report_codes.REQUIRED_OPTIONS_ARE_MISSING, option_names=["rsc"], option_type=None, ), ])
def test_book_not_valid_id(self): tree = etree.fromstring( '<cib><resources><group id="b"/></resources></cib>') id_provider = lib.IdProvider(tree) searcher = lib.ElementSearcher("group", "1a", tree.find(".//resources")) self.assertFalse(searcher.element_found()) self.assertFalse(searcher.validate_book_id(id_provider, "group name")) assert_report_item_list_equal(searcher.get_errors(), [ fixture.error( report_codes.INVALID_ID, id="1a", id_description="group name", is_first_char=True, invalid_character="1", ), ])
def test_metadata_load_error(self): self.config.runner.pcmk.load_agent( agent_is_missing=True, env={"PATH": "/usr/sbin:/bin:/usr/bin"}, ) self.env_assist.assert_raise_library_error(lambda: lib.describe_agent( self.env_assist.get_env(), "ocf:heartbeat:Dummy")) self.env_assist.assert_reports([ fixture.error( report_codes.UNABLE_TO_GET_AGENT_METADATA, agent="ocf:heartbeat:Dummy", reason=( "Agent ocf:heartbeat:Dummy not found or does not support " "meta-data: Invalid argument (22)\nMetadata query for " "ocf:heartbeat:Dummy failed: Input/output error"), ) ], )
def test_node_options_invalid(self): assert_report_item_list_equal( config_validators.add_nodes([ { "name": "node3", "addrs": ["addr03"], "nonsense": "abc" }, ], self.fixture_coronodes_1_link, []), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=["nonsense"], option_type="node", allowed=["addrs", "name"], allowed_patterns=[], ), ])
def test_disable_failed(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1]) self.config.http.sbd.disable_sbd(communication_list=[ { "label": "rh7-1" }, { "label": "rh7-2" }, { "label": "rh7-3", "response_code": 400, "output": err_msg }, ]) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], ) self.env_assist.assert_reports([ fixture.info( reports.codes.SERVICE_ACTION_STARTED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", instance="", ) ] + [ fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", node=node, instance="", ) for node in self.node_list[:2] ] + [ fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-3", reason=err_msg, command="remote/sbd_disable", ) ])
def test_id_found_for_another_tag(self): tree = etree.fromstring( '<cib><resources><primitive id="a"/></resources></cib>') searcher = lib.ElementSearcher("group", "a", tree.find(".//resources")) self.assertFalse(searcher.element_found()) self.assertIsNone(searcher.get_element()) assert_report_item_list_equal( searcher.get_errors(), [ fixture.error( report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE, id="a", expected_types=["group"], current_type="primitive", ), ], )
def test_agent_not_found(self): err_msg = "error message" self.config.runner.pcmk.load_agent( agent_name=self.name.full_name, agent_is_missing=True, stderr=err_msg, ) self.env_assist.assert_raise_library_error( lambda: lib.get_agent_metadata(self.env_assist.get_env(), self.name )) self.env_assist.assert_reports([ fixture.error( report_codes.UNABLE_TO_GET_AGENT_METADATA, agent=self.name.full_name, reason=err_msg, ) ], )
def test_bad_interval(self): self.env_assist.assert_raise_library_error( lambda: resource.get_failcounts(self.env_assist.get_env(), operation="start", interval="often"), [ fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="interval", option_value="often", allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)", cannot_be_empty=False, forbidden_characters=None, ), ], expected_in_processor=False, )
def test_qdevice_tie_breaker_removed(self): assert_report_item_list_equal( config_validators.remove_nodes( ["node4"], self.fixture_nodes, ("net", { "tie_breaker": "4" }, None, None), ), [ fixture.error( report_codes.NODE_USED_AS_TIE_BREAKER, node="node4", node_id=4, ), ], )
def test_invalid_instance_ghost(self): # pylint: disable=no-self-use assert_raise_library_error( lambda: env.BoothEnv( "../../booth/booth", { "config_data": "some config data", "key_data": "some key data", "key_path": "some key path", }, ), fixture.error( report_codes.BOOTH_INVALID_NAME, name="../../booth/booth", forbidden_characters="/", ), )
def test_fail_getting_cluster_status(self): (self.config.runner.pcmk.load_state_plaintext( stdout="some stdout", stderr="some stderr", returncode=1, )) self.env_assist.assert_raise_library_error( lambda: status.full_cluster_status_plaintext(self.env_assist. get_env()), [ fixture.error( report_codes.CRM_MON_ERROR, reason="some stderr\nsome stdout", ), ], expected_in_processor=False, )
def test_book_valid_id_used_in_id_provider(self): tree = etree.fromstring( '<cib><resources><group id="b"/></resources></cib>') id_provider = lib.IdProvider(tree) self.assertEqual([], id_provider.book_ids("a")) searcher = lib.ElementSearcher("group", "a", tree.find(".//resources")) self.assertFalse(searcher.element_found()) self.assertFalse(searcher.validate_book_id(id_provider)) assert_report_item_list_equal( searcher.get_errors(), [ fixture.error( report_codes.ID_ALREADY_EXISTS, id="a", ), ], )
def test_stonith_resource_is_not_running(self): self.config_cib(nodes_running_on=0, start_digests=False, monitor_digests=False) self.env_assist.assert_raise_library_error( self.command(), [ fixture.error( reports.codes.STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM, reason= f"resource '{self.stonith_id}' is not running on any node", reason_type=reports.const. STONITH_RESTARTLESS_UPDATE_UNABLE_TO_PERFORM_REASON_NOT_RUNNING, ) ], expected_in_processor=False, )
def test_node_offline(self): (self.config .fs.open( self.config_path, mock.mock_open(read_data="")(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, "", communication_list=[ dict( label=self.node_list[0], errno=1, error_msg=self.reason, was_connected=False, ), dict( label=self.node_list[1], ) ], ) ) self.env_assist.assert_raise_library_error( lambda: commands.config_sync(self.env_assist.get_env()), [] ) self.env_assist.assert_reports( [ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=self.node_list[1], name_list=[self.name] ), fixture.error( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=self.node_list[0], reason=self.reason, command="remote/booth_set_config", force_code=report_codes.SKIP_OFFLINE_NODES, ), ] )
def test_fail_getting_cib(self): (self.config.runner.pcmk.load_state_plaintext( stdout="crm_mon cluster status", ).fs.exists( settings.corosync_conf_file, return_value=True).corosync_conf.load().runner.cib.load_content( "some stdout", stderr="cib load error", returncode=1)) self.env_assist.assert_raise_library_error( lambda: status.full_cluster_status_plaintext(self.env_assist. get_env()), [ fixture.error( report_codes.CIB_LOAD_ERROR, reason="cib load error", ), ], expected_in_processor=False, )
def test_invalid_value(self): config = { "SBD_TIMEOUT_ACTION": "flush,noflush", } assert_report_item_list_equal( cmd_sbd._validate_sbd_options(config), [ fixture.error( reports.codes.INVALID_OPTION_VALUE, force_code=reports.codes.FORCE_OPTIONS, option_name="SBD_TIMEOUT_ACTION", option_value="flush,noflush", allowed_values=self.timeout_action_allowed_values, cannot_be_empty=False, forbidden_characters=None, ), ], )
def test_unknown_host(self): # Instance of 'Config' has no 'local' member # pylint: disable=no-member self.config.env.set_known_hosts_dests({ NODE_1: NODE_1_DEST_LIST, NODE_2: NODE_2_DEST_LIST, }) self.config.local.load_cib().corosync_conf.load( node_name_list=[NODE_1, NODE_2]) self.env_assist.assert_raise_library_error( lambda: node_add_guest(self.env_assist.get_env()), []) self.env_assist.assert_reports([ fixture.error( reports.codes.HOST_NOT_FOUND, force_code=reports.codes.SKIP_OFFLINE_NODES, host_list=[NODE_NAME], ) ])
def test_fails_when_remote_node_returns_invalid_output(self): # Instance of 'Config' has no 'local' member # pylint: disable=no-member (self.config.local.load_cib().corosync_conf.load( node_name_list=[NODE_1, NODE_2]).http.host.check_auth( communication_list=[ dict(label=NODE_NAME, dest_list=NODE_DEST_LIST) ], ).local.get_host_info(NODE_NAME, NODE_DEST_LIST, output="INVALID_OUTPUT")) self.env_assist.assert_raise_library_error( lambda: node_add_guest(self.env_assist.get_env()), []) self.env_assist.assert_reports([ fixture.error( reports.codes.INVALID_RESPONSE_FORMAT, node=NODE_NAME, ) ])
def test_unresolvable_addresses(self): (self.config.corosync_conf.load_content(self.before)) self.env_assist.assert_raise_library_error( lambda: cluster.update_link( self.env_assist.get_env(), "2", self.node_addr_map, self.link_options, ), [], ) self.env_assist.assert_reports([ fixture.error( report_codes.NODE_ADDRESSES_UNRESOLVABLE, force_code=report_codes.FORCE, address_list=list(self.node_addr_map.values()), ) ])
def test_return_error_on_not_allowed_names(self): assert_report_item_list_equal( validate.NamesIn(["a", "b", "c"], option_type="option").validate({ "x": "X", "y": "Y" }), [ fixture.error( reports.codes.INVALID_OPTIONS, option_names=["x", "y"], allowed=["a", "b", "c"], option_type="option", allowed_patterns=[], ) ], )
def test_report_when_dependency_missing(self): assert_report_item_list_equal( validate.DependsOnOption( ["name"], "prerequisite", option_type="type1", prerequisite_type="type2", ).validate({"name": "value"}), [ fixture.error( reports.codes.PREREQUISITE_OPTION_IS_MISSING, option_name="name", option_type="type1", prerequisite_name="prerequisite", prerequisite_type="type2", ), ], )
def test_resources_are_not_resources(self): assert_report_item_list_equal( self._validate("G", ["RB1-meta_attributes"]), [ fixture.error( report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE, id="RB1-meta_attributes", expected_types=[ "bundle", "clone", "group", "master", "primitive", ], current_type="meta_attributes", ), ], )
def test_node_name_conflict_report_is_unique(self): (self.config.runner.cib.load(resources=""" <resources> <primitive class="ocf" id="node-name" provider="pacemaker" type="Dummy" /> </resources> """).corosync_conf.load( node_name_list=[NODE_1, NODE_2]).runner.pcmk.load_agent( agent_name="ocf:pacemaker:remote")) self.env_assist.assert_raise_library_error( lambda: node_add_remote(self.env_assist.get_env()), []) self.env_assist.assert_reports( [fixture.error( reports.codes.ID_ALREADY_EXISTS, id=NODE_NAME, )])
def test_fail_tag_referenced_in_multiple_constraint(self): # pylint: disable=no-self-use assert_report_item_list_equal( lib.validate_remove_tag( get_constraints(self.tree_tag_has_multiple_constraints), ["multitag"], ), [ fixture.error( # pylint: disable=line-too-long reports.codes.TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS, tag_id="multitag", constraint_id_list=sorted( set(self.tag2constraint_id.values()), ), ) ], )
def test_invalid_options(self): assert_report_item_list_equal( self.call_function( { "nonsense1": "0", "nonsense2": "doesnt matter", } ), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=["nonsense1", "nonsense2"], option_type="totem", allowed=self.allowed_options, allowed_patterns=[], ), ], )
def test_tag_referenced_in_constraint(self): self.config.runner.cib.load( constraints=fixture_constraints_for_tags( tag[0] for tag in TAG_DEFINITIONS), tags=fixture_tags_xml(TAG_DEFINITIONS), ) self.env_assist.assert_raise_library_error(lambda: cmd_tag.remove( self.env_assist.get_env(), ["tag1"], )) self.env_assist.assert_reports([ fixture.error( # pylint: disable=line-too-long reports.codes.TAG_CANNOT_REMOVE_TAG_REFERENCED_IN_CONSTRAINTS, tag_id="tag1", constraint_id_list=["location-tag1"], ), ])