def test_node_addrs_mismatch_existing_links(self): assert_report_item_list_equal( config_validators.add_nodes([ { "name": "node3", "addrs": ["::ffff:10:0:0:3", "addr13"] }, { "name": "node4", "addrs": ["addr04", "10.0.0.14"] }, ], [ CNode("node1", [CAddr("10.0.0.1", 1), CAddr("addr11", 2)], 1), CNode("node2", [CAddr("addr02", 1), CAddr("::ffff:10:0:0:2", 2)], 2), ], []), [ fixture.error( report_codes.COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK, address="::ffff:10:0:0:3", expected_address_type="IPv4", link_number=1), fixture.error( report_codes.COROSYNC_ADDRESS_IP_VERSION_WRONG_FOR_LINK, address="10.0.0.14", expected_address_type="IPv6", link_number=2), ])
def test_invalid_option(self): assert_report_item_list_equal( config_validators.create_transport_udp( { "knet_pmtud_interval": "1234", "link_mode": "active", }, { "level": "5", "model": "zlib", "threshold": "1234", }, { "cipher": "aes256", "hash": "sha256", "model": "nss", }), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=["knet_pmtud_interval", "link_mode"], option_type="udp/udpu transport", allowed=["ip_version", "netmtu"], allowed_patterns=[], ), fixture.error( report_codes.COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS, option_type="compression", actual_transport="udp/udpu", required_transport_list=("knet", )), fixture.error( report_codes.COROSYNC_TRANSPORT_UNSUPPORTED_OPTIONS, option_type="crypto", actual_transport="udp/udpu", required_transport_list=("knet", )), ])
def test_node_addrs_count_mismatch(self): assert_report_item_list_equal( config_validators.add_nodes([ { "name": "node3", "addrs": ["addr03"] }, { "name": "node4", "addrs": ["addr04", "addr14"] }, { "name": "node5", "addrs": ["addr05", "addr15", "addr16"] }, ], self.fixture_coronodes_2_links, []), [ fixture.error(report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=1, min_count=2, max_count=2, node_name="node3", node_index=1), fixture.error(report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=3, min_count=2, max_count=2, node_name="node5", node_index=3), ])
def test_need_stopped_cluster_not_stopped(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ { "label": node, "output": '{"corosync":true}' } for node in self.node_labels ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-2", ), ])
def test_node_addrs_to_many_udp(self): assert_report_item_list_equal( config_validators.create("test-cluster", [ { "name": "node1", "addrs": ["addr01", "addr03"] }, { "name": "node2", "addrs": ["addr02", "addr04"] }, ], "udp"), [ fixture.error(report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=2, min_count=1, max_count=1, node_name="node1", node_index=1), fixture.error(report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=2, min_count=1, max_count=1, node_name="node2", node_index=2), ])
def test_node_addrs_unresolvable(self): assert_report_item_list_equal( config_validators.create( "test-cluster", [ # Duplicated addresses reported only once but they trigger # a duplicate addresses report. { "name": "node1", "addrs": ["addr01", "addrX2"] }, { "name": "node2", "addrs": ["addrX2", "addr05"] }, { "name": "node3", "addrs": ["addr03", "addrX1"] }, ], "knet"), [ fixture.error( report_codes.NODE_ADDRESSES_UNRESOLVABLE, force_code=report_codes.FORCE_NODE_ADDRESSES_UNRESOLVABLE, address_list=["addrX1", "addrX2"]), fixture.error(report_codes.NODE_ADDRESSES_DUPLICATION, address_list=["addrX2"]), ])
def test_node_addrs_to_many_knet(self): assert_report_item_list_equal( config_validators.create("test-cluster", [ { "name": "node1", "addrs": [f"addr{i:02d}" for i in range(1, 10)] }, { "name": "node2", "addrs": [f"addr{i:02d}" for i in range(11, 20)] }, ], "knet"), [ fixture.error(report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=9, min_count=1, max_count=8, node_name="node1", node_index=1), fixture.error(report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=9, min_count=1, max_count=8, node_name="node2", node_index=2), ])
def test_invalid_options(self): allowed_options = [ "ip_version", "link_priority", "linknumber", "mcastport", "ping_interval", "ping_precision", "ping_timeout", "pong_count", "transport", ] assert_report_item_list_equal( config_validators.create_link_list_knet( [{ "nonsense1": "0", "nonsense2": "doesnt matter", }, { "nonsense3": "who cares", }], 3), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=["nonsense1", "nonsense2"], option_type="link", allowed=allowed_options, allowed_patterns=[], ), fixture.error( report_codes.INVALID_OPTIONS, option_names=["nonsense3"], option_type="link", allowed=allowed_options, allowed_patterns=[], ), ])
def test_invalid_all_values(self): assert_report_item_list_equal( config_validators.create_link_list_udp([{ "bindnetaddr": "my-network", "broadcast": "yes", "mcastaddr": "my-group", "mcastport": "0", "ttl": "256", }]), [ fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="my-network", option_name="bindnetaddr", allowed_values="an IP address"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="yes", option_name="broadcast", allowed_values=("0", "1")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="my-group", option_name="mcastaddr", allowed_values="an IP address"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="0", option_name="mcastport", allowed_values="a port number (1-65535)"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="256", option_name="ttl", allowed_values="0..255"), ])
def test_need_stopped_cluster_comunnication_failure(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.corosync.check_corosync_offline(communication_list=[ dict(label="node-1", ), dict(label="node-2", response_code=401, output="""{"notauthorized":"true"}"""), ])) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_need_stopped_cluster_json_error(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.corosync.check_corosync_offline(communication_list=[ dict( label="node-1", output="{" # not valid json ), dict( label="node-2", # The expected key (/corosync) is missing, we don't # care about version 2 status key # (/services/corosync/running) output='{"services":{"corosync":{"running":true}}}'), ])) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-1", ), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_dont_need_stopped_cluster_error(self): (self.config.http.corosync.set_corosync_conf(self.corosync_conf_text, communication_list=[ { "label": "node-1", }, { "label": "node-2", "response_code": 400, "output": "Failed" }, ])) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", command="remote/set_corosync_conf", reason="Failed", ), fixture.error( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_validate_values(self): (self.config .local.load_cib() .corosync_conf.load(node_name_list=[NODE_1, NODE_2]) ) self.env_assist.assert_raise_library_error( lambda: node_add_guest( self.env_assist.get_env(), node_name="*name", options={ "remote-addr": "*addr", "remote-port": "abc", "remote-connect-timeout": "def", } ), [ fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-connect-timeout", option_value="def", allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)" ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-port", option_value="abc", allowed_values="a port number (1-65535)" ) ] )
def test_invalid_all_values(self): assert_report_item_list_equal( config_validators.create_link_list_knet([{ "ip_version": "ipv5", "linknumber": "-1", "link_priority": "256", "mcastport": "65536", "transport": "tcp", }, { "ping_interval": "-250", "ping_precision": "-15", "ping_timeout": "-750", "pong_count": "-10", "transport": "udpu", }], 3), [ fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="ipv5", option_name="ip_version", allowed_values=("ipv4", "ipv6")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="-1", option_name="linknumber", allowed_values="0..3"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="256", option_name="link_priority", allowed_values="0..255"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="65536", option_name="mcastport", allowed_values="a port number (1-65535)"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="tcp", option_name="transport", allowed_values=("sctp", "udp")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="-250", option_name="ping_interval", allowed_values="a non-negative integer"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="-15", option_name="ping_precision", allowed_values="a non-negative integer"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="-750", option_name="ping_timeout", allowed_values="a non-negative integer"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="-10", option_name="pong_count", allowed_values="a non-negative integer"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="udpu", option_name="transport", allowed_values=("sctp", "udp")), ])
def test_no_host_found(self): unknown_hosts = ["node0", "node1"] report_list = [ fixture.error(report_codes.HOST_NOT_FOUND, force_code=report_codes.SKIP_OFFLINE_NODES, host_list=unknown_hosts), fixture.error(report_codes.NONE_HOST_FOUND) ] assert_raise_library_error( lambda: self.factory.get_target_list(unknown_hosts), *report_list) self.report_processor.assert_reports(report_list)
def test_invalid_all_values(self): assert_report_item_list_equal( config_validators.create_transport_knet( { "ip_version": "ipv5", "knet_pmtud_interval": "a minute", "link_mode": "random", }, { "level": "maximum", "model": "", "threshold": "reasonable", }, { "cipher": "strongest", "hash": "fastest", "model": "best", }), [ fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="ipv5", option_name="ip_version", allowed_values=("ipv4", "ipv6")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="a minute", option_name="knet_pmtud_interval", allowed_values="a non-negative integer"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="random", option_name="link_mode", allowed_values=("active", "passive", "rr")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="maximum", option_name="level", allowed_values="a non-negative integer"), fixture.error( report_codes.INVALID_OPTION_VALUE, option_value="", option_name="model", allowed_values="a compression model e.g. zlib, lz4 or bzip2" ), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="reasonable", option_name="threshold", allowed_values="a non-negative integer"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="strongest", option_name="cipher", allowed_values=("none", "aes256", "aes192", "aes128", "3des")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="fastest", option_name="hash", allowed_values=("none", "md5", "sha1", "sha256", "sha384", "sha512")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="best", option_name="model", allowed_values=("nss", "openssl")), ])
def test_validation(self): resources_before = """ <resources> <group id="G"> <primitive id="RG1" /> </group> <primitive id="R1"> <meta_attributes id="R1-meta_attributes" /> </primitive> <primitive id="R2"> <meta_attributes id="R2-meta_attributes" /> </primitive> <clone id="RC1-clone"> <primitive id="RC1" /> </clone> </resources> """ (self.config.runner.cib.load(resources=resources_before)) self.env_assist.assert_raise_library_error( lambda: resource.group_add( self.env_assist.get_env(), "R1-meta_attributes", ["R2", "R4", "R3", "R2-meta_attributes", "RC1-clone", "RC1"]), [ fixture.error( report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE, id="R1-meta_attributes", expected_types=["group"], current_type="meta_attributes", ), fixture.report_not_found("R4", context_type="resources"), fixture.report_not_found("R3", context_type="resources"), fixture.error( report_codes.ID_BELONGS_TO_UNEXPECTED_TYPE, id="R2-meta_attributes", expected_types=[ "clone", "master", "group", "primitive", "bundle" ], current_type="meta_attributes", ), fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RC1-clone", resource_type="clone", ), fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RC1", resource_type="clone", ), ], )
def test_resources_are_in_clones_etc(self): assert_report_item_list_equal( self._validate("G", ["RC1", "R1", "RB1"]), [ fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RC1", resource_type="clone", ), fixture.error( report_codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RB1", resource_type="bundle", ), ])
def test_need_stopped_cluster_comunnication_failure( self, mock_is_systemctl): mock_is_systemctl.return_value = True self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.add_communication( "status", [ dict( label="node-1", response_code=200, output="""\ {"uptime":"0 days, 00:11:52","corosync":false,"pacemaker":false,"cman":false,\ "corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\ "pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\ "corosync_offline":["node-1","node-2"],"pacemaker_online":[],\ "pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\ "resources":[],"groups":[],"constraints":{},"cluster_settings":\ {"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\ "fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\ false,"acls":{},"username":"******"} """, ), dict(label="node-2", response_code=401, output="""{"notauthorized":"true"}"""), ], action="remote/status", )) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_nodename_not_unique(self): assert_report_item_list_equal( config_validators.create( "test-cluster", [ { "name": "node1", "addrs": ["addr01"] }, { "name": "node2", "addrs": ["addr02"] }, { "name": "node2", "addrs": ["addr03"] }, { "name": "node3", "addrs": ["addr04"] }, { "name": "node1", "addrs": ["addr05"] }, # invalid nodes are not reported as duplicate { "name": "", "addrs": ["addr06"] }, { "name": "", "addrs": ["addr07"] }, ], "udp"), [ fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="", option_name="node 6 name", allowed_values="a non-empty string"), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="", option_name="node 7 name", allowed_values="a non-empty string"), fixture.error(report_codes.NODE_NAMES_DUPLICATION, name_list=["node1", "node2"]) ])
def test_more_errors(self): assert_report_item_list_equal( config_validators.remove_nodes( ["node3", "node1", "node2", "node4", "nodeX"], self.fixture_nodes, ("net", { "tie_breaker": "4" }, None, None)), [ fixture.error(report_codes.NODE_NOT_FOUND, node="nodeX", searched_types=[]), fixture.error(report_codes.CANNOT_REMOVE_ALL_CLUSTER_NODES), fixture.error(report_codes.NODE_USED_AS_TIE_BREAKER, node="node4", node_id=4), ])
def test_invalid_all_values(self): assert_report_item_list_equal( config_validators.create_transport_udp( { "ip_version": "ipv5", "netmtu": "-5", }, {}, {}), [ fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="ipv5", option_name="ip_version", allowed_values=("ipv4", "ipv6")), fixture.error(report_codes.INVALID_OPTION_VALUE, option_value="-5", option_name="netmtu", allowed_values="a positive integer"), ])
def test_write_failure(self): (self.config.http.booth.get_config( self.name, self.config_data, node_labels=[self.node_name]).fs.exists( self.config_path, False).fs.open( self.config_path, mode="w", side_effect=EnvironmentError(0, self.reason, self.config_path), )) self.env_assist.assert_raise_library_error( lambda: commands.pull_config(self.env_assist.get_env(), self. node_name), [ fixture.error( report_codes.FILE_IO_ERROR, reason="{}: '{}'".format(self.reason, self.config_path), file_role=file_roles.BOOTH_CONFIG, file_path=self.config_path, operation="write", ) ], expected_in_processor=False, ) self.env_assist.assert_reports(self.report_list[:1])
def test_network_failure(self): self.config.http.booth.get_config(self.name, communication_list=[ dict( label=self.node_name, was_connected=False, errno=1, error_msg=self.reason, ) ]) self.env_assist.assert_raise_library_error( lambda: commands.pull_config(self.env_assist.get_env(), self. node_name), [], ) self.env_assist.assert_reports([ self.report_list[0], fixture.error( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, force_code=None, node=self.node_name, command="remote/booth_get_config", reason=self.reason, ), ])
def test_node_addrs_count_mismatch_knet(self): assert_report_item_list_equal( config_validators.create("test-cluster", [ { "name": "node1", "addrs": ["addr01", "addr11"] }, { "name": "node2", "addrs": ["addr02"] }, { "name": "node3", "addrs": ["addr03", "addr13"] }, { "name": "node4", "addrs": ["addr04"] }, { "name": "node5", "addrs": ["addr05", "addr15", "addr16"] }, ], "knet"), [ fixture.error( report_codes.COROSYNC_NODE_ADDRESS_COUNT_MISMATCH, node_addr_count={ "node1": 2, "node2": 1, "node3": 2, "node4": 1, "node5": 3, }) ])
def test_network_request_failure(self): self.config.http.booth.get_config(self.name, communication_list=[ dict( label=self.node_name, response_code=400, output=self.reason, ) ]) self.env_assist.assert_raise_library_error( lambda: commands.pull_config(self.env_assist.get_env(), self. node_name), [], ) self.env_assist.assert_reports([ self.report_list[0], fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, force_code=None, node=self.node_name, command="remote/booth_get_config", reason=self.reason, ), ])
def test_unable_to_set_authfile_mode(self, pwd_mock, grp_mock): self._set_pwd_mock(pwd_mock) self._set_grp_mock(grp_mock) (self.config.fs.open(self.authfile_path, self.booth_authfile_open_mock, mode="wb", name="fs.open.authfile.write").fs.chown( self.authfile_path, self.pcmk_uid, self.pcmk_gid, ).fs.chmod(self.authfile_path, settings.booth_authkey_file_mode, side_effect=EnvironmentError( 1, self.reason, self.authfile_path))) self.env_assist.assert_raise_library_error( lambda: commands.pull_config(self.env_assist.get_env(), self. node_name), [ fixture.error( report_codes.FILE_IO_ERROR, reason="{}: '{}'".format(self.reason, self.authfile_path), file_role=file_roles.BOOTH_KEY, file_path=self.authfile_path, operation="chmod", ) ], expected_in_processor=False, ) self.assert_authfile_written() self.env_assist.assert_reports(self.report_list[:1])
def test_need_stopped_cluster_not_stopped_skip_offline(self): # If we know for sure that corosync is running, skip_offline doesn't # matter. self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ dict( label="node-1", output='{"corosync":true}', ), dict( label="node-2", ), ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ) ])
def test_node_addrs_not_unique(self): assert_report_item_list_equal( config_validators.add_nodes([ { "name": "node3", "addrs": ["addr03", "10.0.0.3", "::ffff:10:0:0:3"] }, { "name": "node4", "addrs": ["addr04", "10.0.0.4", "::ffff:10:0:0:4"] }, { "name": "node5", "addrs": ["addr04", "10.0.0.3", "::ffff:10:0:0:6"] }, { "name": "node6", "addrs": ["addr06", "10.0.0.3", "::ffff:10:0:0:6"] }, ], [ CNode("node1", [ CAddr("addr01", 1), CAddr("10.0.0.1", 2), CAddr("::ffff:10:0:0:1", 3), ], 1), CNode("node2", [ CAddr("addr02", 1), CAddr("10.0.0.2", 2), CAddr("::ffff:10:0:0:2", 3), ], 2), ], []), [ fixture.error( report_codes.NODE_ADDRESSES_DUPLICATION, address_list=["10.0.0.3", "::ffff:10:0:0:6", "addr04"]) ])
def test_element_exists_in_another_context(self): tree = etree.fromstring(""" <cib> <resources> <group id="g1"><primitive id="a"/></group> <group id="g2"><primitive id="b"/></group> </resources> </cib> """) searcher = lib.ElementSearcher( "primitive", "a", tree.find('.//resources/group[@id="g2"]'), ) self.assertFalse(searcher.element_found()) self.assertIsNone(searcher.get_element()) assert_report_item_list_equal( searcher.get_errors(), [ fixture.error( report_codes.OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT, id="a", type="primitive", expected_context_type="group", expected_context_id="g2", ), ] )