def test_authfile_not_in_booth_dir(self): config_file_content = "authfile=/etc/my_booth.conf" (self.config.fs.open( self.config_path, mock.mock_open(read_data=config_file_content)(), name="open.conf").corosync_conf.load().http.booth.send_config( self.name, config_file_content, node_labels=self.node_list, )) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports([ fixture.warn(report_codes.BOOTH_UNSUPORTED_FILE_LOCATION), fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED) ] + [ fixture.info(report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name]) for node in self.node_list ])
def test_success_authfile_exists(self, pwd_mock, grp_mock): self._set_pwd_mock(pwd_mock) self._set_grp_mock(grp_mock) self.config.fs.exists( self.authfile_path, True, name="fs.exists.authfile", instead="fs.exists.authfile", ) commands.pull_config(self.env_assist.get_env(), self.node_name) self.env_assist.assert_reports(self.report_list + [ fixture.warn( report_codes.FILE_ALREADY_EXISTS, node=None, file_role=file_roles.BOOTH_KEY, file_path=self.authfile_path, ) ])
def test_set_stonith_watchdog_timeout_fails_on_all_nodes(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( communication_list=[[ dict(label=node, response_code=400, output=err_msg) ] for node in self.node_list]) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], ) self.env_assist.assert_reports([ fixture.warn(report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node=node, reason=err_msg, command="remote/set_stonith_watchdog_timeout_to_zero") for node in self.node_list ] + [ fixture.error(report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, ) ])
def test_node_addrs_unresolvable_forced(self): assert_report_item_list_equal( config_validators.add_nodes( [ # Duplicated addresses reported only once but they trigger # a duplicate addresses report. { "name": "node3", "addrs": ["addr03", "addrX2"] }, { "name": "node4", "addrs": ["addrX2", "addr14"] }, # Extra address reported as well, it triggres its own report # about being an extra address. { "name": "node5", "addrs": ["addr05", "addrX1", "addrX3"] }, ], self.fixture_coronodes_2_links, [], force_unresolvable=True), [ fixture.error(report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=3, min_count=2, max_count=2, node_name="node5", node_index=3), fixture.warn(report_codes.NODE_ADDRESSES_UNRESOLVABLE, address_list=["addrX1", "addrX2", "addrX3"]), fixture.error(report_codes.NODE_ADDRESSES_DUPLICATION, address_list=["addrX2"]), ])
def tearDown(self): self.env_assist.assert_reports( [fixture.warn(report_codes.DEFAULTS_CAN_BE_OVERRIDEN)])
def test_different_responses(self): (self.config.runner.corosync.version().corosync_conf.load( node_name_list=[ "node-1", "node-2", "node-3", "node-4", "node-5", ], auto_tie_breaker=True, ).http.add_communication( "get_sbd_config", [ dict( label="node-1", output=outdent("""\ # This file has been generated by pcs. SBD_DELAY_START=no SBD_OPTS="-n node-1" SBD_PACEMAKER=yes SBD_STARTMODE=always SBD_WATCHDOG_DEV=/dev/watchdog SBD_WATCHDOG_TIMEOUT=5 """), response_code=200, ), dict( label="node-2", was_connected=False, errno=7, error_msg="Failed connect to node-2:2224;" " No route to host", ), dict( label="node-3", output="OPTION= value", response_code=200, ), dict( label="node-4", output="# just comment", response_code=200, ), dict( label="node-5", output="invalid value", response_code=200, ), ], action="remote/get_sbd_config", )) self.assertEqual(get_cluster_sbd_config(self.env_assist.get_env()), [ { 'node': 'node-1', 'config': { 'SBD_WATCHDOG_TIMEOUT': '5', 'SBD_WATCHDOG_DEV': '/dev/watchdog', 'SBD_PACEMAKER': 'yes', 'SBD_OPTS': '"-n node-1"', 'SBD_STARTMODE': 'always', 'SBD_DELAY_START': 'no' }, }, { 'node': 'node-3', 'config': { "OPTION": "value", } }, { 'node': 'node-4', 'config': {}, }, { 'node': 'node-5', 'config': {}, }, { 'node': 'node-2', 'config': None, }, ]) self.env_assist.assert_reports([ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-2", reason="Failed connect to node-2:2224; No route to host", command="remote/get_sbd_config", ), fixture.warn( report_codes.UNABLE_TO_GET_SBD_CONFIG, node="node-2", reason="", ), ])
def test_success_enable(self): (self.config .http.add_communication( "check_sbd", [ dict(label="node-1"), ], output=json.dumps({ "sbd":{ "installed": True, "enabled": False, "running": False }, "watchdog":{ "path": "/dev/watchdog", "exist": True, } }), response_code=200, action="remote/check_sbd", param_list=[ ("watchdog", "/dev/watchdog"), ("device_list", []) ], ) .corosync_conf.load( node_name_list=["node-1", "node-2"], auto_tie_breaker=True, name="corosync_conf.load-extra", ) .http.add_communication( "set_sbd_config", [ dict(label="node-1"), ], output=json.dumps({ "sbd":{ "installed": True, "enabled": False, "running": False }, "watchdog":{ "path": "/dev/watchdog", "exist": True, } }), response_code=200, action="remote/set_sbd_config", param_list=[("config", outdent( """\ # This file has been generated by pcs. SBD_DELAY_START=no SBD_OPTS="-n node-1" SBD_PACEMAKER=yes SBD_STARTMODE=always SBD_WATCHDOG_DEV=/dev/watchdog SBD_WATCHDOG_TIMEOUT=5 """ ))], ) .http.add_communication( "remove_stonith_watchdog_timeout", [ dict(label="node-1"), ], output="OK", response_code=200, action="remote/remove_stonith_watchdog_timeout", ) .http.add_communication( "sbd_enable", [ dict(label="node-1"), ], output="SBD enabled", response_code=200, action="remote/sbd_enable", ) ) enable_sbd( self.env_assist.get_env(), default_watchdog=None, watchdog_dict={}, sbd_options={}, ignore_offline_nodes=True, ) self.env_assist.assert_reports([ fixture.info(report_codes.SBD_ENABLING_STARTED), fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ), fixture.warn(report_codes.OMITTING_NODE, node="node-2"), fixture.info(report_codes.SBD_CHECK_STARTED), fixture.info(report_codes.SBD_CHECK_SUCCESS, node="node-1"), fixture.info(report_codes.SBD_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.SBD_CONFIG_ACCEPTED_BY_NODE, node="node-1" ), fixture.info( report_codes.SERVICE_ENABLE_SUCCESS, node="node-1", instance=None, service="sbd", ), ])
def test_qdevice_reload_failures_skip_offline(self): self.corosync_conf_facade.need_qdevice_reload = True (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ dict( label="node-1", ), dict( label="node-2", errno=8, error_msg="failure", was_connected=False, ), ] ) .runner.systemctl.is_active("corosync", is_active=False) .http.corosync.qdevice_client_stop( communication_list=[ dict( label="node-1", ), dict( label="node-2", response_code=400, output="error", ), ] ) .http.corosync.qdevice_client_start( communication_list=[ dict( label="node-1", errno=8, error_msg="failure", was_connected=False, ), dict( label="node-2", ), ] ) ) env = self.env_assistant.get_env() env.push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-2", reason="failure", command="remote/set_corosync_conf", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-2", ), fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-1", service="corosync-qdevice", instance=None, ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="node-2", reason="error", command="remote/qdevice_client_stop", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-1", reason="failure", command="remote/qdevice_client_start", ), fixture.info( report_codes.SERVICE_START_SUCCESS, node="node-2", service="corosync-qdevice", instance=None, ), ])
def test_need_stopped_cluster_comunnication_failures_skip_offline(self): # If we don't know if corosync is running, skip_offline matters. self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ dict( label="node-1", output="{" # not valid json ), dict( label="node-2", response_code=401, output="""{"notauthorized":"true"}""" ), ] ) .http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ dict( label="node-1", ), dict( label="node-2", response_code=401, output="""{"notauthorized":"true"}""", ) ] ) .runner.systemctl.is_active("corosync", is_active=False) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-2", reason="HTTP error: 401", command="remote/status", ), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-2", reason="HTTP error: 401", command="remote/set_corosync_conf", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-2", ), ])
def test_need_stopped_cluster_comunnication_failure_skip_offline( self, mock_is_systemctl): mock_is_systemctl.return_value = True self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.add_communication( "status", [ dict( label="node-1", response_code=200, output="""\ {"uptime":"0 days, 00:11:52","corosync":false,"pacemaker":false,"cman":false,\ "corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\ "pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\ "corosync_offline":["node-1","node-2"],"pacemaker_online":[],\ "pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\ "resources":[],"groups":[],"constraints":{},"cluster_settings":\ {"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\ "fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\ false,"acls":{},"username":"******"} """, ), dict(label="node-2", response_code=401, output="""{"notauthorized":"true"}"""), ], action="remote/status", ).http.add_communication( "set_corosync_conf", [ dict( label="node-1", response_code=200, output="Succeeded", ), dict( label="node-2", response_code=401, output="""{"notauthorized":"true"}""", ) ], action="remote/set_corosync_conf", param_list=[("corosync_conf", self.corosync_conf_text)], ).runner.systemctl.is_active("corosync", is_active=False)) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-2", reason="HTTP error: 401", command="remote/status", ), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-2", reason="HTTP error: 401", command="remote/set_corosync_conf", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-2", ), ])
def test_default_different_results_on_different_nodes(self): node_name_list = ["node-1", "node-2", "node-3", "node-4", "node-5"] (self.config.env.set_known_nodes(node_name_list).corosync_conf.load( node_name_list=node_name_list).http.add_communication( "check_sbd", [ dict( label="node-1", output='{"notauthorized":"true"}', response_code=401, ), dict( label="node-2", was_connected=False, errno=6, error_msg="Could not resolve host: node-2;" " Name or service not known", ), dict( label="node-3", output=json.dumps({ "sbd": { "installed": True, "enabled": False, "running": False }, "watchdog": { "path": "", "exist": False }, "device_list": [] }), response_code=200, ), dict( label="node-4", output=json.dumps({ "watchdog": { "path": "", "exist": False }, "device_list": [] }), response_code=200, ), dict( label="node-5", output="invalid json", response_code=200, ), ], action="remote/check_sbd", param_list=[("watchdog", ""), ("device_list", "[]")], )) default_status = { 'running': None, 'enabled': None, 'installed': None, } self.assertEqual(get_cluster_sbd_status(self.env_assist.get_env()), [ { 'node': 'node-3', 'status': { 'running': False, 'enabled': False, 'installed': True, } }, { 'node': 'node-1', 'status': default_status }, { 'node': 'node-2', 'status': default_status }, { 'node': 'node-4', 'status': default_status }, { 'node': 'node-5', 'status': default_status }, ]) self.env_assist.assert_reports([ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-1", reason="HTTP error: 401", command="remote/check_sbd", ), warn_unable_to_get_sbd_status(node="node-1", reason=""), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-2", reason= "Could not resolve host: node-2; Name or service not known", command="remote/check_sbd", ), warn_unable_to_get_sbd_status(node="node-2", reason=""), warn_unable_to_get_sbd_status(node="node-4", reason="'sbd'"), warn_unable_to_get_sbd_status( node="node-5", #the reason differs in python3 #reason="No JSON object could be decoded", ), ])