def test_success_node_offline_skip_offline(self): err_msg = "Failed connect to rh7-3:2224; No route to host" online_nodes_list = ["rh7-2", "rh7-3"] self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth( communication_list=[{ "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, }, { "label": "rh7-2" }, { "label": "rh7-3" }]) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=online_nodes_list[:1]) self.config.http.sbd.disable_sbd(node_labels=online_nodes_list) disable_sbd(self.env_assist.get_env(), ignore_offline_nodes=True) self.env_assist.assert_reports( [fixture.warn(report_codes.OMITTING_NODE, node="rh7-1")] + [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info(report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None) for node in online_nodes_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES) ])
def test_dont_need_stopped_cluster(self): (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .http.corosync.reload_corosync_conf( node_labels=self.node_labels[:1] ) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.info( report_codes.COROSYNC_CONFIG_RELOADED, node="node-1" ), ])
def test_authfile_not_in_booth_dir(self): config_file_content = "authfile=/etc/my_booth.conf" (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_file_content)(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, config_file_content, node_labels=self.node_list, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [ fixture.warn(report_codes.BOOTH_UNSUPPORTED_FILE_LOCATION), fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED) ] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in self.node_list ] )
def test_reload_corosync_not_running_anywhere(self): self.config.http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels) self.config.http.corosync.reload_corosync_conf(communication_list=[[ { "label": node, "response_code": 200, "output": json.dumps(dict(code="not_running", message="not running")), }, ] for node in self.node_labels]) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-1", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-2", ), ])
def test_no_authfile(self): (self.config .fs.open( self.config_path, mock.mock_open(read_data="")(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, "", node_labels=self.node_list, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED)] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in self.node_list ] )
def test_disable_failed(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1]) self.config.http.sbd.disable_sbd(communication_list=[{ "label": "rh7-1" }, { "label": "rh7-2" }, { "label": "rh7-3", "response_code": 400, "output": err_msg }]) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], ) self.env_assist.assert_reports( [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info(report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None) for node in self.node_list[:2] ] + [ fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-3", reason=err_msg, command="remote/sbd_disable") ])
def test_dont_need_stopped_cluster_error_skip_offline(self): (self.config.http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ { "label": "node-1", }, { "label": "node-2", "response_code": 400, "output": "Failed" }, ]).runner.systemctl.is_active("corosync").runner.corosync.reload()) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="node-2", command="remote/set_corosync_conf", reason="Failed", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_RELOADED) ])
def test_some_nodes_unknown_forced(self): (self.config.env.set_known_nodes(self.node_names[1:]).fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert", ).fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key", ).http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=self.node_names[1:], )) pcsd.synchronize_ssl_certificate(self.env_assist.get_env(), skip_offline=True) self.env_assist.assert_reports([ fixture.warn(report_codes.HOST_NOT_FOUND, host_list=[self.node_names[0]]), ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=self.node_names[1:], ) ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in self.node_names[1:] ])
def test_success(self): (self.config .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) .http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=self.node_names ) ) pcsd.synchronize_ssl_certificate(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=self.node_names ) ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in self.node_names ] )
def test_running_on_not_specified_node(self): self.success_config( self.state_running_node1, self.state_running_node2, action_node="node1", ) self.lib_action(self.env_assist.get_env(), "A", node="node1", wait=self.timeout) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), fixture.info( report_codes.WAIT_FOR_IDLE_STARTED, timeout=self.timeout, ), fixture.report_resource_running( "A", {"Started": ["node2"]}, ), ])
def test_some_node_names_missing(self): self.corosync_conf_name = "corosync-some-node-names.conf" self.node_list = ["rh7-2"] self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1]) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports([ fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), fixture.info( reports.codes.SERVICE_ACTION_STARTED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", instance="", ), ] + [ fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", node=node, instance="", ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES) ])
def test_success(self): self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1] ) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def test_need_stopped_cluster_not_stopped_skip_offline(self): # If we know for sure that corosync is running, skip_offline doesn't # matter. self.corosync_conf_facade.need_stopped_cluster = True self.config.http.corosync.check_corosync_offline(communication_list=[ dict( label="node-1", output=corosync_running_check_response(True), ), dict(label="node-2"), ]) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade, skip_offline_nodes=True), [], ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ), ])
def test_need_stopped_cluster_not_stopped_skip_offline(self): # If we know for sure that corosync is running, skip_offline doesn't # matter. self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ dict( label="node-1", output='{"corosync":true}', ), dict( label="node-2", ), ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ) ])
def test_success(self): self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1] ) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.info( reports.codes.SERVICE_ACTION_STARTED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", instance="", ) ] + [ fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", node=node, instance="", ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def test_wait_fail(self): self.config.runner.pcmk.wait(timeout=self.timeout, stderr="wait error", returncode=1) self.env_assist.assert_raise_library_error( lambda: self.lib_action( self.env_assist.get_env(), "A", wait=self.timeout), [ fixture.error( report_codes.WAIT_FOR_IDLE_ERROR, reason="wait error", ), ], expected_in_processor=False, ) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), fixture.info( report_codes.WAIT_FOR_IDLE_STARTED, timeout=self.timeout, ), ])
def fixture_reports_new_node_unreachable(node_name, omitting=False): if omitting: report = [ fixture.warn( report_codes.OMITTING_NODE, node=node_name, ), ] else: report = [ fixture.warn( report_codes.HOST_NOT_FOUND, host_list=[node_name], ), ] return report + [ fixture.info( report_codes.FILES_DISTRIBUTION_SKIPPED, reason_type="unreachable", file_list=["pacemaker authkey"], node_list=[node_name], ), fixture.info( report_codes.SERVICE_COMMANDS_ON_NODES_SKIPPED, reason_type="unreachable", action_list=[ "pacemaker_remote start", "pacemaker_remote enable" ], node_list=[node_name], ), ]
def test_unknown_node_force(self): existing_nodes = self.remote_nodes[1:] + self.local_nodes[1:] self.config.env.set_known_nodes(existing_nodes) self._fixture_load_configs() self.config.http.files.remove_files( node_labels=existing_nodes, pcs_disaster_recovery_conf=True, ) dr.destroy( self.env_assist.get_env(), force_flags=[report_codes.SKIP_OFFLINE_NODES], ) self.env_assist.assert_reports([ fixture.warn( report_codes.HOST_NOT_FOUND, host_list=self.local_nodes[:1] + self.remote_nodes[:1], ), ] + [ fixture.info( report_codes.FILES_REMOVE_FROM_NODES_STARTED, file_list=[DR_CONF], node_list=existing_nodes, ) ] + [ fixture.info( report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, file_description=DR_CONF, node=node, ) for node in existing_nodes ])
def test_need_stopped_cluster_not_stopped(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ { "label": self.node_labels[0], "output": '{"corosync":true}' } ] + [ { "label": node, } for node in self.node_labels[1:] ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ), ])
def test_some_node_names_missing(self): nodes = ["rh7-2"] (self.config.corosync_conf.load( filename="corosync-some-node-names.conf", instead="corosync_conf.load", ).fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert", ).fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key", ).http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=nodes, )) pcsd.synchronize_ssl_certificate(self.env_assist.get_env()) self.env_assist.assert_reports([ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=nodes, ), fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in nodes ])
def test_dont_need_stopped_cluster_error(self): (self.config.http.corosync.set_corosync_conf(self.corosync_conf_text, communication_list=[ { "label": "node-1", }, { "label": "node-2", "response_code": 400, "output": "Failed" }, ])) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", command="remote/set_corosync_conf", reason="Failed", ), fixture.error( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_need_stopped_cluster_comunnication_failure(self): self.corosync_conf_facade.need_stopped_cluster = True self.config.http.corosync.check_corosync_offline(communication_list=[ dict(label="node-1"), dict( label="node-2", response_code=401, output='{"notauthorized":"true"}', ), ]) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", command="remote/status", reason="HTTP error: 401", ), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_success(self): resources_state = """ <resources> <resource id="A" role="Started" nodes_running_on="1"> <node name="node1" id="1" cached="false" /> </resource> </resources> """ self.config.runner.pcmk.wait(timeout=self.timeout) self.config.runner.pcmk.load_state(resources=resources_state) self.lib_action(self.env_assist.get_env(), "A", wait=self.timeout) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), fixture.info( report_codes.WAIT_FOR_IDLE_STARTED, timeout=self.timeout, ), fixture.report_resource_running("A", {"Started": ["node1"]}), ])
def test_state_after_action_not_xml(self): self.config.runner.pcmk.load_state() self.config_pcmk_action() self.config.runner.pcmk.wait(timeout=self.timeout) self.config.runner.pcmk.load_state(name="runner.pcmk.load_state.after", stdout="state stdout") self.env_assist.assert_raise_library_error( lambda: self.lib_action( self.env_assist.get_env(), "A", wait=self.timeout), [ fixture.error(report_codes.BAD_CLUSTER_STATE_FORMAT, ), ], expected_in_processor=False, ) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), fixture.info( report_codes.WAIT_FOR_IDLE_STARTED, timeout=self.timeout, ), ])
def test_success(self): (self.config.fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert", ).fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key", ).http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=self.node_names, )) pcsd.synchronize_ssl_certificate(self.env_assist.get_env()) self.env_assist.assert_reports([ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=self.node_names, ) ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in self.node_names ])
def test_set_stonith_watchdog_timeout_fails_on_some_nodes(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( communication_list=[ [ { "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, } ], [{"label": "rh7-2", "response_code": 400, "output": "FAILED",}], [{"label": "rh7-3"}], ] ) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="rh7-1", reason=err_msg, command="remote/set_stonith_watchdog_timeout_to_zero", ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-2", reason="FAILED", command="remote/set_stonith_watchdog_timeout_to_zero", ), ] + [ fixture.info( reports.codes.SERVICE_ACTION_STARTED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", instance="", ) ] + [ fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_DISABLE, service="sbd", node=node, instance="", ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def test_fail_communication(self): error = "an error" (self.config .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) .http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, communication_list=[ { "label": self.node_names[0], "response_code": 400, "output": error, } ] + [ dict(label=node) for node in self.node_names[1:] ] ) ) self.env_assist.assert_raise_library_error( lambda: pcsd.synchronize_ssl_certificate(self.env_assist.get_env()), [] ) self.env_assist.assert_reports( [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=self.node_names ) ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in self.node_names[1:] ] + [ fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node=self.node_names[0], command="remote/set_certs", reason=error ) ] )
def test_need_stopped_cluster_comunnication_failures_skip_offline(self): # If we don't know if corosync is running, skip_offline matters. self.corosync_conf_facade.need_stopped_cluster = True self.config.http.corosync.check_corosync_offline(communication_list=[ dict( label="node-1", response_code=401, output='{"notauthorized":"true"}', ), dict(label="node-2", output="{"), # not valid json ]) self.config.http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ dict( label="node-1", response_code=401, output='{"notauthorized":"true"}', ), dict(label="node-2"), ], ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-1", reason="HTTP error: 401", command="remote/status", ), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-1", ), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-1", reason="HTTP error: 401", command="remote/set_corosync_conf", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), ])
def test_reload_not_successful(self): self.corosync_conf_facade.need_qdevice_reload = True (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .http.corosync.reload_corosync_conf( communication_list=[ [ { "label": self.node_labels[0], "response_code": 200, "output": json.dumps( dict(code="not_running", message="not running") ), }, ], [ { "label": self.node_labels[1], "response_code": 200, "output": "not a json", }, ], ] ) ) self.env_assistant.assert_raise_library_error( lambda: self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-1" ), fixture.warn( report_codes.INVALID_RESPONSE_FORMAT, node="node-2" ), fixture.error( report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE ), ])
def _test_minimal(self, local_cluster_size, recovery_cluster_size): local_nodes = generate_nodes(local_cluster_size) remote_nodes = generate_nodes(recovery_cluster_size, prefix="recovery-") orig_node = remote_nodes[-1] cfg = self.config cfg.env.set_known_nodes(local_nodes + remote_nodes) cfg.raw_file.exists( file_type_codes.PCS_DR_CONFIG, settings.pcsd_dr_config_location, exists=False, ) cfg.corosync_conf.load_content(corosync_conf_fixture(local_nodes)) cfg.http.corosync.get_corosync_conf( corosync_conf_fixture(remote_nodes), node_labels=[orig_node]) cfg.http.files.put_files( node_labels=remote_nodes, pcs_disaster_recovery_conf=dr_cfg_fixture(DrRole.RECOVERY, DrRole.PRIMARY, local_nodes), name="distribute_remote", ) cfg.http.files.put_files( node_labels=local_nodes, pcs_disaster_recovery_conf=dr_cfg_fixture(DrRole.PRIMARY, DrRole.RECOVERY, remote_nodes), name="distribute_local", ) dr.set_recovery_site(self.env_assist.get_env(), orig_node) self.env_assist.assert_reports([ fixture.info( report_codes.FILES_DISTRIBUTION_STARTED, file_list=[DR_CFG_DESC], node_list=remote_nodes, ) ] + [ fixture.info( report_codes.FILE_DISTRIBUTION_SUCCESS, file_description=DR_CFG_DESC, node=node, ) for node in remote_nodes ] + [ fixture.info( report_codes.FILES_DISTRIBUTION_STARTED, file_list=[DR_CFG_DESC], node_list=local_nodes, ) ] + [ fixture.info( report_codes.FILE_DISTRIBUTION_SUCCESS, file_description=DR_CFG_DESC, node=node, ) for node in local_nodes ])
def test_set_stonith_watchdog_timeout_fails_on_some_nodes(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( communication_list=[ [{ "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, }], [{ "label": "rh7-2", "response_code": 400, "output": "FAILED", }], [{"label": "rh7-3"}] ] ) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="rh7-1", reason=err_msg, command="remote/set_stonith_watchdog_timeout_to_zero" ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-2", reason="FAILED", command="remote/set_stonith_watchdog_timeout_to_zero" ) ] + [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def setUp(self): self.env_assist, self.config = get_env_tools(self) local_nodes = generate_nodes(4) self.remote_nodes = generate_nodes(3, prefix="recovery-") self.node = self.remote_nodes[0] self.failed_nodes = local_nodes[-1:] successful_nodes = local_nodes[:-1] self.config.env.set_known_nodes(local_nodes + self.remote_nodes) self.config.raw_file.exists( file_type_codes.PCS_DR_CONFIG, settings.pcsd_dr_config_location, exists=False, ) self.config.corosync_conf.load_content( corosync_conf_fixture(local_nodes)) self.config.http.corosync.get_corosync_conf(corosync_conf_fixture( self.remote_nodes), node_labels=[self.node]) self.config.http.files.put_files( node_labels=self.remote_nodes, pcs_disaster_recovery_conf=dr_cfg_fixture(DrRole.RECOVERY, DrRole.PRIMARY, local_nodes), name="distribute_remote", ) self.success_communication = [ dict(label=node) for node in successful_nodes ] self.expected_reports = [ fixture.info( report_codes.FILES_DISTRIBUTION_STARTED, file_list=[DR_CFG_DESC], node_list=self.remote_nodes, ) ] + [ fixture.info( report_codes.FILE_DISTRIBUTION_SUCCESS, file_description=DR_CFG_DESC, node=node, ) for node in self.remote_nodes ] + [ fixture.info( report_codes.FILES_DISTRIBUTION_STARTED, file_list=[DR_CFG_DESC], node_list=local_nodes, ) ] + [ fixture.info( report_codes.FILE_DISTRIBUTION_SUCCESS, file_description=DR_CFG_DESC, node=node, ) for node in successful_nodes ]
def test_qdevice_reload(self): self.corosync_conf_facade.need_qdevice_reload = True self.config.http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels) self.config.http.corosync.reload_corosync_conf( node_labels=self.node_labels[:1]) self.config.http.corosync.qdevice_client_stop( node_labels=self.node_labels) self.config.http.corosync.qdevice_client_start( node_labels=self.node_labels) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.info( report_codes.COROSYNC_CONFIG_RELOADED, node="node-1", ), fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_STOP, service="corosync-qdevice", node="node-1", instance="", ), fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_STOP, service="corosync-qdevice", node="node-2", instance="", ), fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_START, service="corosync-qdevice", node="node-1", instance="", ), fixture.info( reports.codes.SERVICE_ACTION_SUCCEEDED, action=reports.const.SERVICE_ACTION_START, service="corosync-qdevice", node="node-2", instance="", ), ])
def _success_reports(self): return [ fixture.info( report_codes.FILES_REMOVE_FROM_NODES_STARTED, file_list=[DR_CONF], node_list=self.remote_nodes + self.local_nodes, ) ] + [ fixture.info( report_codes.FILE_REMOVE_FROM_NODE_SUCCESS, file_description=DR_CONF, node=node, ) for node in (self.remote_nodes + self.local_nodes) ]
def test_validate_values(self): (self.config .local.load_cib() ) self.env_assist.assert_raise_library_error( lambda: node_add_guest( self.env_assist.get_env(), options={ "remote-addr": "*addr", "remote-port": "abc", "remote-connect-timeout": "def", } ), [] ) self.env_assist.assert_reports([ fixture.info( report_codes.COROSYNC_NODE_CONFLICT_CHECK_SKIPPED, reason_type="not_live_cib", ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-connect-timeout", option_value="def", allowed_values="time interval (e.g. 1, 2s, 3m, 4h, ...)" ), fixture.error( report_codes.INVALID_OPTION_VALUE, option_name="remote-port", option_value="abc", allowed_values="a port number (1-65535)" ) ])
def test_unknown_host_addr_not_specified(self): meta_attributes = """ <meta_attributes id="virtual_machine_id-meta_attributes"> <nvpair id="virtual_machine_id-meta_attributes-remote-addr" name="remote-addr" value="node-name" /> <nvpair id="virtual_machine_id-meta_attributes-remote-node" name="remote-node" value="node-name" /> <nvpair id="virtual_machine_id-meta_attributes-remote-port" name="remote-port" value="99" /> </meta_attributes> """ self.config.env.set_known_hosts_dests(dict()) (self.config .local.load_cib() .local.push_cib(meta_attributes=meta_attributes) ) node_add_guest(self.env_assist.get_env(), options={"remote-port": "99"}) self.env_assist.assert_reports( [ fixture.info( report_codes.USING_KNOWN_HOST_ADDRESS_FOR_HOST, host_name=NODE_NAME, address=NODE_NAME, ), ] + fixture_reports_not_live_cib(NODE_NAME) )
def test_state_after_action_fail(self): self.config.runner.pcmk.load_state() self.config_pcmk_action() self.config.runner.pcmk.wait(timeout=10) self.config.runner.pcmk.load_state( name="runner.pcmk.load_state.after", stdout="state stdout", stderr="state stderr", returncode=1 ) self.env_assist.assert_raise_library_error( lambda: self.lib_action( self.env_assist.get_env(), "A", wait="10" ), [ fixture.error( report_codes.CRM_MON_ERROR, reason="state stderr\nstate stdout", ), ], expected_in_processor=False ) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), ])
def test_state_after_action_not_xml(self): self.config.runner.pcmk.load_state() self.config_pcmk_action() self.config.runner.pcmk.wait(timeout=10) self.config.runner.pcmk.load_state( name="runner.pcmk.load_state.after", stdout="state stdout" ) self.env_assist.assert_raise_library_error( lambda: self.lib_action( self.env_assist.get_env(), "A", wait="10" ), [ fixture.error( report_codes.BAD_CLUSTER_STATE_FORMAT, ), ], expected_in_processor=False ) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), ])
def test_running_on_specified_node(self): self.success_config( self.state_running_node1, self.state_running_node1, action_node="node1" ) self.env_assist.assert_raise_library_error( lambda: self.lib_action( self.env_assist.get_env(), "A", node="node1", wait="10" ), [ fixture.report_resource_running( "A", {"Started": ["node1"]}, severity=severities.ERROR ), ] ) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), ])
def test_success(self): resources_state = etree_to_str(fixture.complete_state_resources( etree.fromstring(""" <resources> <resource id="A" role="Started" nodes_running_on="1"> <node name="node1" id="1" cached="false" /> </resource> </resources> """) )) self.config.runner.pcmk.wait(timeout=10) self.config.runner.pcmk.load_state(resources=resources_state) self.lib_action(self.env_assist.get_env(), "A", wait="10") self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), fixture.report_resource_running( "A", {"Started": ["node1"]} ), ])
def test_wait_fail(self): self.config.runner.pcmk.wait( timeout=10, stderr="wait error", returncode=1 ) self.env_assist.assert_raise_library_error( lambda: self.lib_action( self.env_assist.get_env(), "A", wait="10" ), [ fixture.error( report_codes.WAIT_FOR_IDLE_ERROR, reason="wait error", ), ], expected_in_processor=False ) self.env_assist.assert_reports([ fixture.info( self.report_code_pcmk_success, resource_id="A", stdout="pcmk std out", stderr="pcmk std err", ), ])
def test_reload_on_another_node(self): (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .http.corosync.reload_corosync_conf( communication_list=[ [ { "label": self.node_labels[0], "response_code": 200, "output": json.dumps( dict(code="not_running", message="not running") ), }, ], [ { "label": self.node_labels[1], }, ], ] ) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-1" ), fixture.info( report_codes.COROSYNC_CONFIG_RELOADED, node="node-2" ), ])
def test_some_nodes_unknown_forced(self): (self.config .env.set_known_nodes(self.node_names[1:]) .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) .http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=self.node_names[1:] ) ) pcsd.synchronize_ssl_certificate( self.env_assist.get_env(), skip_offline=True ) self.env_assist.assert_reports( [ fixture.warn( report_codes.HOST_NOT_FOUND, host_list=[self.node_names[0]] ), ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=self.node_names[1:] ) ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in self.node_names[1:] ] )
def test_node_offline(self): (self.config .fs.open( self.config_path, mock.mock_open(read_data="")(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, "", communication_list=[ dict( label=self.node_list[0], errno=1, error_msg=self.reason, was_connected=False, ), dict( label=self.node_list[1], ) ], ) ) self.env_assist.assert_raise_library_error( lambda: commands.config_sync(self.env_assist.get_env()), [] ) self.env_assist.assert_reports( [ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=self.node_list[1], name_list=[self.name] ), fixture.error( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=self.node_list[0], reason=self.reason, command="remote/booth_set_config", force_code=report_codes.SKIP_OFFLINE_NODES, ), ] )
def test_some_node_names_missing(self): auth_file = "auth.file" auth_file_path = os.path.join(settings.booth_config_dir, auth_file) config_content = "authfile={}".format(auth_file_path) auth_file_content = b"auth" nodes = ["rh7-2"] (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_content)(), name="open.conf" ) .fs.open( auth_file_path, mock.mock_open(read_data=auth_file_content)(), mode="rb", name="open.authfile", ) .corosync_conf.load(filename="corosync-some-node-names.conf") .http.booth.send_config( self.name, config_content, authfile=auth_file, authfile_data=auth_file_content, node_labels=nodes, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED), fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in nodes ] )
def test_some_node_names_missing(self): nodes = ["rh7-2"] (self.config .corosync_conf.load( filename="corosync-some-node-names.conf", instead="corosync_conf.load" ) .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) .http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=nodes ) ) pcsd.synchronize_ssl_certificate(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=nodes ), fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in nodes ] )
def test_authfile_not_accessible(self): auth_file = "auth.file" auth_file_path = os.path.join(settings.booth_config_dir, auth_file) config_content = "authfile={}".format(auth_file_path) (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_content)(), name="open.conf" ) .fs.open( auth_file_path, mode="rb", name="open.authfile", side_effect=EnvironmentError(0, self.reason, auth_file_path), ) .corosync_conf.load() .http.booth.send_config( self.name, config_content, node_labels=self.node_list, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [ fixture.warn( report_codes.FILE_IO_ERROR, reason="{}: '{}'".format(self.reason, auth_file_path), file_role=file_roles.BOOTH_KEY, file_path=auth_file_path, operation="read", ), fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED) ] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in self.node_list ] )
def test_node_failure(self): (self.config .fs.open( self.config_path, mock.mock_open(read_data="")(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, "", communication_list=[ dict( label=self.node_list[0], response_code=400, output=self.reason, ), dict( label=self.node_list[1], ) ] ) ) self.env_assist.assert_raise_library_error( lambda: commands.config_sync(self.env_assist.get_env()), [] ) self.env_assist.assert_reports( [ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=self.node_list[1], name_list=[self.name] ), fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node=self.node_list[0], reason=self.reason, command="remote/booth_set_config", force_code=report_codes.SKIP_OFFLINE_NODES, ), ] )
def test_disable_failed(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1] ) self.config.http.sbd.disable_sbd( communication_list=[ {"label": "rh7-1"}, {"label": "rh7-2"}, { "label": "rh7-3", "response_code": 400, "output": err_msg } ] ) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], ) self.env_assist.assert_reports( [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in self.node_list[:2] ] + [ fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-3", reason=err_msg, command="remote/sbd_disable" ) ] )
def setUp(self): self.env_assist, self.config = get_env_tools(self) self.name = "booth" self.node_name = "node" self.config_data = "config" self.config_path = _get_booth_file_path("{}.conf".format(self.name)) self.report_list = [ fixture.info( report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE, node=self.node_name, config=self.name ), fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=None, name_list=[self.name], ) ] self.config.env.set_booth({"name": self.name})
def test_unknown_host_skip_offline_authkey_distribution( self, generate_binary_key ): generate_binary_key.return_value = b"password" self.config.env.set_known_hosts_dests({ NODE_1: NODE_1_DEST_LIST, NODE_2: NODE_2_DEST_LIST, }) (self.config .local.load_cib() .corosync_conf.load(node_name_list=[NODE_1, NODE_2]) .local.authkey_exists(return_value=False) .local.distribute_authkey( communication_list=[ dict(label=NODE_1, dest_list=NODE_1_DEST_LIST), dict(label=NODE_2, dest_list=NODE_2_DEST_LIST), ], pcmk_authkey_content=generate_binary_key.return_value, ) .local.push_cib() ) node_add_guest(self.env_assist.get_env(), skip_offline_nodes=True) generate_binary_key.assert_called_once_with(random_bytes_count=384) self.env_assist.assert_reports( fixture_reports_new_node_unreachable(NODE_NAME) + [ fixture.info( report_codes.FILES_DISTRIBUTION_STARTED, file_list=["pacemaker authkey"], node_list=[NODE_1, NODE_2], ), fixture.info( report_codes.FILE_DISTRIBUTION_SUCCESS, file_description="pacemaker authkey", node=NODE_1, ), fixture.info( report_codes.FILE_DISTRIBUTION_SUCCESS, file_description="pacemaker authkey", node=NODE_2, ), ])
def test_can_skip_all_offline(self, generate_binary_key): generate_binary_key.return_value = b"password" (self.config .local.load_cib() .corosync_conf.load(node_name_list=[NODE_1, NODE_2]) .http.host.check_auth( communication_list=[ dict( label=NODE_NAME, dest_list=NODE_DEST_LIST, **FAIL_HTTP_KWARGS ) ], ) .local.authkey_exists(return_value=False) .local.distribute_authkey( communication_list=[ dict( label=NODE_1, dest_list=NODE_1_DEST_LIST, **FAIL_HTTP_KWARGS, ), dict( label=NODE_2, dest_list=NODE_2_DEST_LIST, **FAIL_HTTP_KWARGS, ), ], pcmk_authkey_content=generate_binary_key.return_value, ) .local.push_cib() ) node_add_guest(self.env_assist.get_env(), skip_offline_nodes=True) self.env_assist.assert_reports( fixture_reports_new_node_unreachable(NODE_NAME, omitting=True) + [ fixture.info( report_codes.FILES_DISTRIBUTION_STARTED, file_list=["pacemaker authkey"], node_list=[NODE_1, NODE_2], ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=NODE_1, command="remote/put_file", reason="Could not resolve host", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=NODE_2, command="remote/put_file", reason="Could not resolve host", ), ])
def test_success_node_offline_skip_offline(self): err_msg = "Failed connect to rh7-3:2224; No route to host" online_nodes_list = ["rh7-2", "rh7-3"] self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth( communication_list=[ { "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, }, {"label": "rh7-2"}, {"label": "rh7-3"} ] ) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=online_nodes_list[:1] ) self.config.http.sbd.disable_sbd(node_labels=online_nodes_list) disable_sbd(self.env_assist.get_env(), ignore_offline_nodes=True) self.env_assist.assert_reports( [fixture.warn(report_codes.OMITTING_NODE, node="rh7-1")] + [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in online_nodes_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )