def test_invalid_and_unsupported_options_forced(self): config = { "SBD_DELAY_START": "yes", "SBD_WATCHDOG_TIMEOUT": "5", "SBD_STARTMODE": "clean", "SBD_WATCHDOG_DEV": "/dev/watchdog", "SBD_UNKNOWN": "", "SBD_OPTS": " ", "SBD_PACEMAKER": "false", } assert_report_item_list_equal( cmd_sbd._validate_sbd_options(config, allow_unknown_opts=True), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=sorted( ["SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"] ), option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], ), fixture.warn( report_codes.INVALID_OPTIONS, option_names=["SBD_UNKNOWN"], option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], ), ] )
def test_nodename_not_unique(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["addr03"]}, {"name": "node3", "addrs": ["addr04"]}, # invalid nodes are not reported as duplicate {"name": "", "addrs": ["addr05"]}, {"name": "", "addrs": ["addr06"]}, ], self.fixture_coronodes_1_link, [ ] ), [ fixture.error( report_codes.INVALID_OPTION_VALUE, option_value="", option_name="node 3 name", allowed_values="a non-empty string" ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_value="", option_name="node 4 name", allowed_values="a non-empty string" ), fixture.error( report_codes.NODE_NAMES_DUPLICATION, name_list=["node3"] ) ] )
def test_success_used_forced(self, mock_net_stop, mock_status): mock_status.return_value = 'Cluster "a_cluster":\n' lib.qdevice_stop(self.lib_env, "net", proceed_if_used=True) mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.WARNING, report_codes.QDEVICE_USED_BY_CLUSTERS, { "clusters": ["a_cluster"], } ), ( severity.INFO, report_codes.SERVICE_STOP_STARTED, { "service": "quorum device", } ), ( severity.INFO, report_codes.SERVICE_STOP_SUCCESS, { "service": "quorum device", } ) ] )
def test_booked_ids(self): self.fixture_add_primitive_with_id("myId") assert_report_item_list_equal( self.provider.book_ids("myId-1"), [] ) self.assertEqual("myId-2", self.provider.allocate_id("myId"))
def test_returns_no_report_when_second_is_present(self): assert_report_item_list_equal( validate.is_required_some_of(["first", "second"], "type")({ "second": "value", }), [] )
def test_failed(self, mock_net_stop, mock_status): mock_status.return_value = "" mock_net_stop.side_effect = StopServiceError( "test service", "test error" ) assert_raise_library_error( lambda: lib.qdevice_stop(self.lib_env, "net"), ( severity.ERROR, report_codes.SERVICE_STOP_ERROR, { "service": "test service", "reason": "test error", } ) ) mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.INFO, report_codes.SERVICE_STOP_STARTED, { "service": "quorum device", } ) ] )
def test_failed(self): device_list = ["/dev/sdb", "/dev/vda"] option_dict = { "watchdog-timeout": "10", # -1 "loop-timeout": "1", # -3 } error_msg = "error" self.mock_runner.run.return_value = "", error_msg, 1 assert_raise_library_error( lambda: lib_sbd.initialize_block_devices( self.mock_rep, self.mock_runner, device_list, option_dict ), ( Severities.ERROR, report_codes.SBD_DEVICE_INITIALIZATION_ERROR, { "device_list": device_list, "reason": error_msg, } ) ) cmd = [ settings.sbd_binary, "-d", "/dev/sdb", "-d", "/dev/vda", "-3", "1", "-1", "10", "create" ] self.mock_runner.run.assert_called_once_with(cmd) assert_report_item_list_equal( self.mock_rep.report_item_list, [( Severities.INFO, report_codes.SBD_DEVICE_INITIALIZATION_STARTED, {"device_list": device_list} )] )
def test_no_wrap(self): validators = validate.wrap_with_empty_or_valid( self.validators, wrap=False ) validators.append(validate.value_port_number("c")) assert_report_item_list_equal( validate.run_collection_of_option_validators( {"a": "", "b": "", "c": ""}, validators ), [ fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="a", option_value="", allowed_values=["x", "y", "z"], ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="b", option_value="", allowed_values="0..9", ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="c", option_value="", allowed_values="a port number (1-65535)", ), ] )
def test_no_resources_specified(self): assert_report_item_list_equal( self._validate("G", []), [ fixture.error(report_codes.CANNOT_GROUP_RESOURCE_NO_RESOURCES), ] )
def test_node_addrs_count_mismatch(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["addr03"]}, {"name": "node4", "addrs": ["addr04", "addr14"]}, {"name": "node5", "addrs": ["addr05", "addr15", "addr16"]}, ], self.fixture_coronodes_2_links, [] ), [ fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=1, min_count=2, max_count=2, node_name="node3", node_index=1 ), fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=3, min_count=2, max_count=2, node_name="node5", node_index=3 ), ] )
def test_node_addrs_unresolvable_forced(self): assert_report_item_list_equal( config_validators.add_nodes( [ # Duplicated addresses reported only once but they trigger # a duplicate addresses report. {"name": "node3", "addrs": ["addr03", "addrX2"]}, {"name": "node4", "addrs": ["addrX2", "addr14"]}, # Extra address reported as well, it triggres its own report # about being an extra address. {"name": "node5", "addrs": ["addr05", "addrX1", "addrX3"]}, ], self.fixture_coronodes_2_links, [], force_unresolvable=True ), [ fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=3, min_count=2, max_count=2, node_name="node5", node_index=3 ), fixture.warn( report_codes.NODE_ADDRESSES_UNRESOLVABLE, address_list=["addrX1", "addrX2", "addrX3"] ), fixture.error( report_codes.NODE_ADDRESSES_DUPLICATION, address_list=["addrX2"] ), ] )
def test_add_last_allowed_link(self): existing_link_list = [ str(x) for x in range(constants.LINKS_KNET_MAX - 1) ] coro_nodes = [ node.CorosyncNode( f"node{i}", [ node.CorosyncNodeAddress(f"addr{i}-{j}", f"{j}") for j in existing_link_list ], i ) for i in [1, 2, 3] ] assert_report_item_list_equal( config_validators.add_link( self.new_addrs, {}, coro_nodes, self.pcmk_nodes, existing_link_list, self.transport, constants.IP_VERSION_64 ), [] )
def test_success(self): device_list = ["/dev/sdb", "/dev/vda"] option_dict = { "watchdog-timeout": "10", # -1 "loop-timeout": "1", # -3 } self.mock_runner.run.return_value = "", "", 0 lib_sbd.initialize_block_devices( self.mock_rep, self.mock_runner, device_list, option_dict ) cmd = [ settings.sbd_binary, "-d", "/dev/sdb", "-d", "/dev/vda", "-3", "1", "-1", "10", "create" ] self.mock_runner.run.assert_called_once_with(cmd) assert_report_item_list_equal( self.mock_rep.report_item_list, [ ( Severities.INFO, report_codes.SBD_DEVICE_INITIALIZATION_STARTED, {"device_list": device_list} ), ( Severities.INFO, report_codes.SBD_DEVICE_INITIALIZATION_SUCCESS, {"device_list": device_list} ), ] )
def test_used_addrs(self): pcmk_nodes = [ PacemakerNode("node-remote", "addr-remote") ] already_existing_addrs = [ pcmk_nodes[0].addr, self.coro_nodes[0].addrs_plain[0], ] self.new_addrs["node2"], self.new_addrs["node3"] = ( already_existing_addrs ) assert_report_item_list_equal( config_validators.add_link( self.new_addrs, {}, self.coro_nodes, pcmk_nodes, self.existing_link_list, self.transport, constants.IP_VERSION_64 ), [ fixture.error( report_codes.NODE_ADDRESSES_ALREADY_EXIST, address_list=already_existing_addrs, ), ] )
def test_missing_node_addrs(self): broken_nodes = sorted(self.new_addrs.keys())[1:2] for node_name in broken_nodes: del self.new_addrs[node_name] pcmk_nodes = [ PacemakerNode("node-remote", "addr-remote") ] assert_report_item_list_equal( config_validators.add_link( self.new_addrs, {}, self.coro_nodes, pcmk_nodes, self.existing_link_list, self.transport, constants.IP_VERSION_64 ), [ fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=0, min_count=1, max_count=1, node_name=node_name, node_index=None, ) for node_name in broken_nodes ] )
def test_element_exists_in_another_context(self): tree = etree.fromstring(""" <cib> <resources> <group id="g1"><primitive id="a"/></group> <group id="g2"><primitive id="b"/></group> </resources> </cib> """) searcher = lib.ElementSearcher( "primitive", "a", tree.find('.//resources/group[@id="g2"]'), ) self.assertFalse(searcher.element_found()) self.assertIsNone(searcher.get_element()) assert_report_item_list_equal( searcher.get_errors(), [ fixture.error( report_codes.OBJECT_WITH_ID_IN_UNEXPECTED_CONTEXT, id="a", type="primitive", expected_context_type="group", expected_context_id="g2", ), ] )
def test_return_error_on_not_allowed_and_banned_names_forced(self): code = "force_code" assert_report_item_list_equal( validate.names_in( ["a", "b"], ["x", "a", "z", "c", "d"], banned_name_list=["x", "y", "z"], code_to_allow_extra_names=code, extra_names_allowed=True, ), [ fixture.warn( report_codes.INVALID_OPTIONS, option_names=["c", "d"], allowed=["a", "b"], option_type="option", allowed_patterns=[], ), fixture.error( report_codes.INVALID_OPTIONS, option_names=["x", "z"], allowed=["a", "b"], option_type="option", allowed_patterns=[], ), ] )
def test_success_not_used(self, mock_net_stop, mock_status): mock_status.return_value = "" lib.qdevice_stop(self.lib_env, "net", proceed_if_used=False) mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.INFO, report_codes.SERVICE_STOP_STARTED, { "service": "quorum device", } ), ( severity.INFO, report_codes.SERVICE_STOP_SUCCESS, { "service": "quorum device", } ) ] )
def test_enable_failed( self, mock_net_setup, mock_net_enable, mock_net_start ): mock_net_enable.side_effect = EnableServiceError( "test service", "test error" ) assert_raise_library_error( lambda: lib.qdevice_setup(self.lib_env, "net", True, True), ( severity.ERROR, report_codes.SERVICE_ENABLE_ERROR, { "service": "test service", "reason": "test error", } ) ) mock_net_setup.assert_called_once_with("mock_runner") mock_net_enable.assert_called_once_with("mock_runner", "corosync-qnetd") mock_net_start.assert_not_called() assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.INFO, report_codes.QDEVICE_INITIALIZATION_SUCCESS, { "model": "net", } ) ] )
def test_remove_more_than_defined(self): assert_report_item_list_equal( config_validators.remove_links( self.existing + ["2"], self.existing, "knet" ), [ fixture.error( report_codes .COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS , # We try to remove more links than defined yet only defined # links are counted here - nonexistent links cannot be # defined so they are not included in the count links_change_count=len(self.existing), # the point of the test is to not get negative number here links_new_count=0, links_limit_count=1, add_or_not_remove=False, ), fixture.error( report_codes.COROSYNC_LINK_DOES_NOT_EXIST_CANNOT_REMOVE, link_list=["2"], existing_link_list=["0", "1", "3", "10", "11"], ) ] )
def test_too_many_links(self): existing_link_list = [str(x) for x in range(constants.LINKS_KNET_MAX)] coro_nodes = [ node.CorosyncNode( f"node{i}", [ node.CorosyncNodeAddress(f"addr{i}-{j}", f"{j}") for j in existing_link_list ], i ) for i in [1, 2, 3] ] assert_report_item_list_equal( config_validators.add_link( self.new_addrs, {}, coro_nodes, self.pcmk_nodes, existing_link_list, self.transport, constants.IP_VERSION_64 ), [ fixture.error( report_codes .COROSYNC_CANNOT_ADD_REMOVE_LINKS_TOO_MANY_FEW_LINKS , links_change_count=1, links_new_count=(constants.LINKS_KNET_MAX + 1), links_limit_count=constants.LINKS_KNET_MAX, add_or_not_remove=True, ) ] )
def test_failures(self): resources = etree.fromstring("<resources />") reporter = MockLibraryReportProcessor() lib.verify(reporter, self.tree, resources, []) report = [ ( severity.ERROR, report_codes.STONITH_RESOURCES_DO_NOT_EXIST, { "stonith_ids": [ "d1", "d2", "d3", "d4", "d5", "dR", "dR-special" ], }, None ), ( severity.ERROR, report_codes.NODE_NOT_FOUND, { "node": "nodeA", }, None ), ( severity.ERROR, report_codes.NODE_NOT_FOUND, { "node": "nodeB", }, None ), ] assert_report_item_list_equal(reporter.report_item_list, report)
def test_empty_node(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["addr03"]}, {}, {"name": "node4", "addrs": ["addr04"]}, ], self.fixture_coronodes_1_link, [] ), [ fixture.error( report_codes.REQUIRED_OPTION_IS_MISSING, option_names=["name"], option_type="node 2" ), fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=0, min_count=1, max_count=1, node_name=None, node_index=2 ), ] )
def test_existing_id(self): self.fixture_add_primitive_with_id("myId") assert_report_item_list_equal( self.provider.book_ids("myId"), [ self.fixture_report("myId"), ] )
def test_return_empty_report_on_allowed_names(self): assert_report_item_list_equal( validate.names_in( ["a", "b", "c"], ["a", "b"], ), [], )
def test_success_when_dependency_present(self): assert_report_item_list_equal( validate.depends_on_option("name", "prerequisite", "type")({ "name": "value", "prerequisite": "value", }), [] )
def test_success(self): reporter = MockLibraryReportProcessor() lib._validate_level(reporter, 1) lib._validate_level(reporter, "1") lib._validate_level(reporter, 9) lib._validate_level(reporter, "9") lib._validate_level(reporter, "05") assert_report_item_list_equal(reporter.report_item_list, [])
def test_pair_success(self): id_provider = IdProvider(etree.fromstring("<a><test id='used' /></a>")) assert_report_item_list_equal( validate.value_id("id", "test id", id_provider)({ "id": validate.ValuePair("correct", "correct") }), [] )
def test_missing_resources_specified(self): assert_report_item_list_equal( self._validate("G", ["RX1", "RX2"]), [ fixture.report_not_found("RX1", context_type="resources"), fixture.report_not_found("RX2", context_type="resources"), ] )
def test_success_on_valid_options(self): report_processor = MockLibraryReportProcessor() config_structure.validate_ticket_options( report_processor, {"timeout": "10"}, allow_unknown_options=False, ) assert_report_item_list_equal(report_processor.report_item_list, [])
def test_resources_are_in_clones_etc(self): assert_report_item_list_equal( self._validate("G", ["RC1", "R1", "RB1"]), [ fixture.error( reports.codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RC1", resource_type="primitive", parent_id="RC1-clone", parent_type="clone", ), fixture.error( reports.codes.CANNOT_GROUP_RESOURCE_WRONG_TYPE, resource_id="RB1", resource_type="primitive", parent_id="RB1-bundle", parent_type="bundle", ), ], )
def test_adjacent_resource_new_group(self): empty_group_element = etree.fromstring('<group id="G-new" />') assert_report_item_list_equal( validations.validate_move_resources_to_group( empty_group_element, _resources(self.cib, "RG1"), _resource(self.cib, "RG2"), ), [ fixture.error( reports.codes. ADD_REMOVE_ADJACENT_ITEM_NOT_IN_THE_CONTAINER, container_type=reports.const. ADD_REMOVE_CONTAINER_TYPE_GROUP, item_type=reports.const.ADD_REMOVE_ITEM_TYPE_RESOURCE, container_id="G-new", adjacent_item_id="RG2", ), ], )
def test_unknown_options_forced(self): config = { "SBD_DELAY_START": "yes", "SBD_WATCHDOG_TIMEOUT": "5", "SBD_STARTMODE": "clean", "SBD_UNKNOWN": "", "another_unknown_option": "some value" } # just make sure there is no exception raised assert_report_item_list_equal( cmd_sbd._validate_sbd_options(config, allow_unknown_opts=True), [ fixture.warn( report_codes.INVALID_OPTIONS, option_names=sorted( ["SBD_UNKNOWN", "another_unknown_option"]), option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], ), ])
def test_unsupported_options(self): config = { "SBD_DELAY_START": "yes", "SBD_WATCHDOG_TIMEOUT": "5", "SBD_STARTMODE": "clean", "SBD_WATCHDOG_DEV": "/dev/watchdog", "SBD_OPTS": " ", "SBD_DEVICE": "/dev/vda", } assert_report_item_list_equal(cmd_sbd._validate_sbd_options(config), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=sorted( ["SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_DEVICE"]), option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], ), ])
def test_no_such_level_ignore_missing(self): report_list = lib.remove_levels_by_params(self.tree, 9, TARGET_TYPE_NODE, "nodeB", ["d3"], True) assert_report_item_list_equal(report_list, []) self.assertEqual( self.get_remaining_ids(), [ "fl1", "fl2", "fl3", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9", "fl10", ], )
def test_error(self, mock_find): mock_find.return_value = ["element"] report_list = lib._validate_level_target_devices_does_not_exist( "tree", "level", "target_type", "target_value", ["devices"]) mock_find.assert_called_once_with("tree", "level", "target_type", "target_value", ["devices"]) report = [( severity.ERROR, report_codes.CIB_FENCING_LEVEL_ALREADY_EXISTS, { "devices": ["devices"], "target_type": "target_type", "target_value": "target_value", "level": "level", }, None, )] assert_report_item_list_equal(report_list, report)
def assert_called_invalid(self, mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, mock_append, dupl_called=True, report_list=None): report_list = report_list or [] with self.assertRaises(LibraryError): lib.add_level(self.reporter, self.topology_el, self.resources_el, self.level, self.target_type, self.target_value, self.devices, self.cluster_status_nodes, self.force_device, self.force_node) assert_report_item_list_equal(self.reporter.report_item_list, report_list) self.assert_validators_called(mock_val_level, mock_val_target, mock_val_devices, mock_val_dupl, dupl_called) mock_append.assert_not_called()
def test_report_invalid_interval(self): assert_report_item_list_equal( self.validate({"remote-connect-timeout": "invalid"}, "node1"), [ ( severities.ERROR, report_codes.INVALID_OPTION_VALUE, { "option_name": "remote-connect-timeout", "option_value": "invalid", "allowed_values": ( "time interval (e.g. 1, 2s, 3m, 4h, ...)" ), "cannot_be_empty": False, "forbidden_characters": None, }, None, ), ], )
def test_stonith_action_deprecated_forced(self): assert_report_item_list_equal( primitive.validate_resource_instance_attributes_create( _fixture_stonith(), { "action": "reboot", "required": "value", }, etree.Element("resources"), force=True, ), [ fixture.warn( reports.codes.DEPRECATED_OPTION, option_type="stonith", option_name="action", replaced_by=["pcmk_off_action", "pcmk_reboot_action"], ), ], )
def test_node_addrs_ip_version_mismatch(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["::ffff:10:0:0:3"]}, {"name": "node4", "addrs": ["10.0.0.14"]}, ], [ CNode("node1", [CAddr("addr01", 1)], 1), CNode("node2", [CAddr("addr02", 1)], 2), ], [], ), [ fixture.error( report_codes.COROSYNC_IP_VERSION_MISMATCH_IN_LINKS, link_numbers=[1], ) ], )
def test_success(self, mock_net_disable): lib.qdevice_disable(self.lib_env, "net") mock_net_disable.assert_called_once_with( "mock_runner", "corosync-qnetd" ) assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.INFO, reports.codes.SERVICE_ACTION_SUCCEEDED, { "action": reports.const.SERVICE_ACTION_DISABLE, "service": "quorum device", "node": "", "instance": "", }, ) ], )
def test_return_error_on_not_allowed_names_without_force_code(self): assert_report_item_list_equal( validate.NamesIn( ["a", "b", "c"], # does now work without code_for_warning produce_warning=True, ).validate({ "x": "X", "y": "Y" }), [ fixture.warn( report_codes.INVALID_OPTIONS, option_names=["x", "y"], allowed=["a", "b", "c"], option_type=None, allowed_patterns=[], ) ], )
def test_book_not_valid_id(self): tree = etree.fromstring( '<cib><resources><group id="b"/></resources></cib>') id_provider = lib.IdProvider(tree) searcher = lib.ElementSearcher("group", "1a", tree.find(".//resources")) self.assertFalse(searcher.element_found()) self.assertFalse(searcher.validate_book_id(id_provider, "group name")) assert_report_item_list_equal( searcher.get_errors(), [ fixture.error( report_codes.INVALID_ID_BAD_CHAR, id="1a", id_description="group name", is_first_char=True, invalid_character="1", ), ], )
def test_success_full(self): validator = nvpair_multi.ValidateNvsetAppendNew( self.id_provider, {"name": "value"}, {"id": "some-id", "score": "10"}, nvset_rule="resource ::stateful", rule_allows_rsc_expr=True, rule_allows_op_expr=True, rule_allows_node_attr_expr=True, ) assert_report_item_list_equal( validator.validate(), [], ) self.assertEqual( repr(validator.get_parsed_rule()), "BoolExpr(operator='AND', children=[" "RscExpr(standard=None, provider=None, type='stateful')" "])", )
def test_node_addr_empty(self): assert_report_item_list_equal( config_validators.add_nodes( [ {"name": "node3", "addrs": ["", "addr13"]}, {"name": "node4", "addrs": ["addr04", "addr14"]}, {"name": "node5", "addrs": ["addr05", ""]}, {"name": "node6", "addrs": ["", ""]}, {"name": None, "addrs": ["", ""]}, ], self.fixture_coronodes_2_links, [], ), [ fixture.error( report_codes.NODE_ADDRESSES_CANNOT_BE_EMPTY, node_name_list=["node3", "node5", "node6"], ), ], )
def test_used_not_forced(self, mock_net_stop, mock_status): mock_status.return_value = 'Cluster "a_cluster":\n' assert_raise_library_error( lambda: lib.qdevice_stop(self.lib_env, "net", proceed_if_used=False) ) assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.ERROR, report_codes.QDEVICE_USED_BY_CLUSTERS, { "clusters": ["a_cluster"], }, report_codes.FORCE_QDEVICE_USED, ), ], ) mock_net_stop.assert_not_called()
def test_remove_ids_errors(self): assert_report_item_list_equal( self._validate( "tag", [], ["e1", "e1", "e2", "e2", "none", "none", "none1", "none1",], ), [ fixture.error( reports.codes.TAG_ADD_REMOVE_IDS_DUPLICATION, duplicate_ids_list=["e1", "e2", "none", "none1"], add_or_not_remove=False, ), fixture.error( reports.codes.TAG_IDS_NOT_IN_THE_TAG, tag_id="tag", id_list=["none", "none1"], ), ], )
def test_new_group_not_valid_id(self): assert_report_item_list_equal( self._validate("1Gr:oup", ["R1"]), [ fixture.error( report_codes.INVALID_ID_BAD_CHAR, id="1Gr:oup", id_description="group name", is_first_char=True, invalid_character="1", ), fixture.error( report_codes.INVALID_ID_BAD_CHAR, id="1Gr:oup", id_description="group name", is_first_char=False, invalid_character=":", ), ], )
def test_raises_when_duplicate_element_found(self): element = mock.MagicMock() element.tag = "constraint_type" report_processor = MockLibraryReportProcessor() assert_raise_library_error( lambda: constraint.check_is_without_duplication( report_processor, fixture_constraint_section( [etree.Element("tag", {"id": "duplicate_element"})]), element, are_duplicate=lambda e1, e2: True, export_element=constraint.export_with_set, )) assert_report_item_list_equal( report_processor.report_item_list, [ ( severities.INFO, report_codes.DUPLICATE_CONSTRAINTS_LIST, { "constraint_info_list": [{ "resource_sets": [], "options": { "id": "duplicate_element" }, }], "constraint_type": "constraint_type", }, ), ( severities.ERROR, report_codes.DUPLICATE_CONSTRAINTS_EXIST, { "constraint_ids": ["duplicate_element"], }, report_codes.FORCE, ), ], )
def test_success(self): device_list = ["/dev/sdb", "/dev/vda"] option_dict = { "watchdog-timeout": "10", # -1 "loop-timeout": "1", # -3 } self.mock_runner.run.return_value = "", "", 0 lib_sbd.initialize_block_devices(self.mock_rep, self.mock_runner, device_list, option_dict) cmd = [ settings.sbd_binary, "-d", "/dev/sdb", "-d", "/dev/vda", "-3", "1", "-1", "10", "create", ] self.mock_runner.run.assert_called_once_with(cmd) assert_report_item_list_equal( self.mock_rep.report_item_list, [ ( Severities.INFO, report_codes.SBD_DEVICE_INITIALIZATION_STARTED, { "device_list": device_list }, ), ( Severities.INFO, report_codes.SBD_DEVICE_INITIALIZATION_SUCCESS, { "device_list": device_list }, ), ], )
def test_return_report_on_duplicated_intervals(self): assert_report_item_list_equal( operations.validate_different_intervals([ { "name": "monitor", "interval": "3600s" }, { "name": "monitor", "interval": "60m" }, { "name": "monitor", "interval": "1h" }, { "name": "monitor", "interval": "60s" }, { "name": "monitor", "interval": "1m" }, { "name": "monitor", "interval": "5s" }, ]), [( severities.ERROR, report_codes.RESOURCE_OPERATION_INTERVAL_DUPLICATION, { "duplications": { "monitor": [ ["3600s", "60m", "1h"], ["60s", "1m"], ], }, }, )], )
def test_collect_all_errors_from_specifications(self): assert_report_item_list_equal( validate.ValidatorAll( [ validate.NamesIn(["x", "y"]), validate.MutuallyExclusive(["x", "y"]), validate.ValuePositiveInteger("x"), validate.ValueIn("y", ["a", "b"]), ] ).validate({"x": "abcd", "y": "defg", "z": "hijk",}), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=["z"], option_type=None, allowed=["x", "y"], allowed_patterns=[], ), fixture.error( report_codes.MUTUALLY_EXCLUSIVE_OPTIONS, option_names=["x", "y"], option_type=None, ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_value="abcd", option_name="x", allowed_values="a positive integer", cannot_be_empty=False, forbidden_characters=None, ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_value="defg", option_name="y", allowed_values=["a", "b"], cannot_be_empty=False, forbidden_characters=None, ), ], )
def test_report_when_is_guest(self): assert_report_item_list_equal( guest_node.validate_is_not_guest( etree.fromstring(""" <primitive id="resource_id"> <meta_attributes> <nvpair name="remote-node" value="node1" /> </meta_attributes> </primitive> """)), [ ( severities.ERROR, report_codes.RESOURCE_IS_GUEST_NODE_ALREADY, { "resource_id": "resource_id", }, None, ), ], )
def test_stop_failed(self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status): mock_status.return_value = "" mock_net_stop.side_effect = StopServiceError("test service", "test error") assert_raise_library_error( lambda: lib.qdevice_destroy(self.lib_env, "net"), (severity.ERROR, report_codes.SERVICE_STOP_ERROR, { "service": "test service", "reason": "test error", })) mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") mock_net_disable.assert_not_called() mock_net_destroy.assert_not_called() assert_report_item_list_equal( self.mock_reporter.report_item_list, [(severity.INFO, report_codes.SERVICE_STOP_STARTED, { "service": "quorum device", })])
def test_enable_failed(self, mock_net_setup, mock_net_enable, mock_net_start): mock_net_enable.side_effect = EnableServiceError( "test service", "test error") assert_raise_library_error( lambda: lib.qdevice_setup(self.lib_env, "net", True, True), (severity.ERROR, report_codes.SERVICE_ENABLE_ERROR, { "service": "test service", "reason": "test error", })) mock_net_setup.assert_called_once_with("mock_runner") mock_net_enable.assert_called_once_with("mock_runner", "corosync-qnetd") mock_net_start.assert_not_called() assert_report_item_list_equal( self.mock_reporter.report_item_list, [(severity.INFO, report_codes.QDEVICE_INITIALIZATION_SUCCESS, { "model": "net", })])
def test_return_forceable_error_on_not_allowed_names(self): assert_report_item_list_equal( validate.NamesIn( ["a", "b", "c"], option_type="some option", severity=reports.item.ReportItemSeverity.error("FORCE_CODE"), ).validate({ "x": "X", "y": "Y" }), [ fixture.error( reports.codes.INVALID_OPTIONS, force_code="FORCE_CODE", option_names=["x", "y"], allowed=["a", "b", "c"], option_type="some option", allowed_patterns=[], ) ], )
def test_return_error_on_banned_names(self): assert_report_item_list_equal( validate.NamesIn(["a", "b"], banned_name_list=["x", "y", "z"]).validate({ "x": "X", "a": "A", "z": "Z" }), [ fixture.error( reports.codes.INVALID_OPTIONS, option_names=["x", "z"], allowed=["a", "b"], option_type=None, allowed_patterns=[], ) ], )
def test_refuse_problem_combination(self): assert_report_item_list_equal( config_validators.create( ["1.1.1.1"], ["1.1.1.1"] ), [ fixture.error( report_codes.BOOTH_LACK_OF_SITES, sites=["1.1.1.1"], ), fixture.error( report_codes.BOOTH_EVEN_PEERS_NUM, number=2, ), fixture.error( report_codes.BOOTH_ADDRESS_DUPLICATION, addresses=["1.1.1.1"], ), ] )
def test_all_valid(self): assert_report_item_list_equal( self.call_function( { "ip_version": "ipv4", "knet_pmtud_interval": "1234", "link_mode": "active", }, { "level": "5", "model": "zlib", "threshold": "1234", }, { "cipher": "aes256", "hash": "sha256", "model": "nss", }, ), [], )
def test_more_errors(self): assert_report_item_list_equal( config_validators.remove_nodes( ["node3", "node1", "node2", "node4", "nodeX"], self.fixture_nodes, ("net", { "tie_breaker": "4" }, None, None), ), [ fixture.error(report_codes.NODE_NOT_FOUND, node="nodeX", searched_types=[]), fixture.error(report_codes.CANNOT_REMOVE_ALL_CLUSTER_NODES), fixture.error( report_codes.NODE_USED_AS_TIE_BREAKER, node="node4", node_id=4, ), ], )