def test_authfile_not_in_booth_dir(self): config_file_content = "authfile=/etc/my_booth.conf" (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_file_content)(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, config_file_content, node_labels=self.node_list, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [ fixture.warn(report_codes.BOOTH_UNSUPORTED_FILE_LOCATION), fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED) ] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in self.node_list ] )
def test_authfile_not_accessible(self): auth_file = "auth.file" auth_file_path = os.path.join(settings.booth_config_dir, auth_file) config_content = "authfile={}".format(auth_file_path) (self.config.fs.open(self.config_path, mock.mock_open(read_data=config_content)(), name="open.conf").fs.open( auth_file_path, mode="rb", name="open.authfile", side_effect=EnvironmentError( 0, self.reason, auth_file_path), ).corosync_conf.load().http.booth.send_config( self.name, config_content, node_labels=self.node_list, )) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports([ fixture.warn( report_codes.FILE_IO_ERROR, reason="{}: '{}'".format(self.reason, auth_file_path), file_role=file_roles.BOOTH_KEY, file_path=auth_file_path, operation="read", ), fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED) ] + [ fixture.info(report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name]) for node in self.node_list ])
def test_no_authfile(self): (self.config .fs.open( self.config_path, mock.mock_open(read_data="")(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, "", node_labels=self.node_list, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED)] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in self.node_list ] )
def test_need_stopped_cluster_not_stopped_skip_offline(self): # If we know for sure that corosync is running, skip_offline doesn't # matter. self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ dict( label="node-1", output='{"corosync":true}', ), dict( label="node-2", ), ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ) ])
def test_success_node_offline_skip_offline(self): err_msg = "Failed connect to rh7-3:2224; No route to host" online_nodes_list = ["rh7-2", "rh7-3"] self.config.runner.corosync.version() self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth( communication_list=[{ "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, }, { "label": "rh7-2" }, { "label": "rh7-3" }]) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=online_nodes_list[:1]) self.config.http.sbd.disable_sbd(node_labels=online_nodes_list) disable_sbd(self.env_assist.get_env(), ignore_offline_nodes=True) self.env_assist.assert_reports( [fixture.warn(report_codes.OMITTING_NODE, node="rh7-1")] + [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info(report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None) for node in online_nodes_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES) ])
def test_node_offline_skip_offline(self): (self.config.fs.open( self.config_path, mock.mock_open(read_data="")(), name="open.conf").corosync_conf.load().http.booth.send_config( self.name, "", communication_list=[ dict( label=self.node_list[0], errno=1, error_msg=self.reason, was_connected=False, ), dict(label=self.node_list[1], ) ], )) commands.config_sync(self.env_assist.get_env(), skip_offline_nodes=True) self.env_assist.assert_reports([ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED), fixture.info(report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=self.node_list[1], name_list=[self.name]), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=self.node_list[0], reason=self.reason, command="remote/booth_set_config", ), ])
def test_node_failure_skip_offline(self): (self.config.fs.open( self.config_path, mock.mock_open(read_data="")(), name="open.conf").corosync_conf.load().http.booth.send_config( self.name, "", communication_list=[ dict( label=self.node_list[0], response_code=400, output=self.reason, ), dict(label=self.node_list[1], ) ])) commands.config_sync(self.env_assist.get_env(), skip_offline_nodes=True) self.env_assist.assert_reports([ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED), fixture.info(report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=self.node_list[1], name_list=[self.name]), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node=self.node_list[0], reason=self.reason, command="remote/booth_set_config", ), ])
def fixture_reports_new_node_unreachable(node_name, omitting=False): if omitting: report = [ fixture.warn( report_codes.OMITTING_NODE, node=node_name, ), ] else: report = [ fixture.warn( report_codes.HOST_NOT_FOUND, host_list=[node_name], ), ] return report + [ fixture.info( report_codes.FILES_DISTRIBUTION_SKIPPED, reason_type="unreachable", file_list=["pacemaker authkey"], node_list=[node_name], ), fixture.info( report_codes.SERVICE_COMMANDS_ON_NODES_SKIPPED, reason_type="unreachable", action_list=["pacemaker_remote start", "pacemaker_remote enable"], node_list=[node_name], ), ]
def test_disable_failed(self): err_msg = "Error" self.config.runner.corosync.version() self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1]) self.config.http.sbd.disable_sbd(communication_list=[{ "label": "rh7-1" }, { "label": "rh7-2" }, { "label": "rh7-3", "response_code": 400, "output": err_msg }]) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], ) self.env_assist.assert_reports( [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info(report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None) for node in self.node_list[:2] ] + [ fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-3", reason=err_msg, command="remote/sbd_disable") ])
def test_need_stopped_cluster_comunnication_failure(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.corosync.check_corosync_offline(communication_list=[ dict(label="node-1", ), dict(label="node-2", response_code=401, output="""{"notauthorized":"true"}"""), ])) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_success(self): auth_file = "auth.file" auth_file_path = os.path.join(settings.booth_config_dir, auth_file) config_content = "authfile={}".format(auth_file_path) auth_file_content = b"auth" (self.config.fs.open(self.config_path, mock.mock_open(read_data=config_content)(), name="open.conf").fs.open( auth_file_path, mock.mock_open(read_data=auth_file_content)(), mode="rb", name="open.authfile", ).corosync_conf.load().http.booth.send_config( self.name, config_content, authfile=auth_file, authfile_data=auth_file_content, node_labels=self.node_list, )) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED)] + [ fixture.info(report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name]) for node in self.node_list ])
def test_dont_need_stopped_cluster_error_skip_offline(self): (self.config.http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ { "label": "node-1", }, { "label": "node-2", "response_code": 400, "output": "Failed" }, ]).runner.systemctl.is_active("corosync").runner.corosync.reload()) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="node-2", command="remote/set_corosync_conf", reason="Failed", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_RELOADED) ])
def test_dont_need_stopped_cluster_error(self): (self.config.http.corosync.set_corosync_conf(self.corosync_conf_text, communication_list=[ { "label": "node-1", }, { "label": "node-2", "response_code": 400, "output": "Failed" }, ])) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", command="remote/set_corosync_conf", reason="Failed", ), fixture.error( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_need_stopped_cluster_not_stopped_skip_offline( self, mock_is_systemctl): mock_is_systemctl.return_value = True self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.add_communication( "status", [ dict( label="node-1", output="""\ {"uptime":"0 days, 06:36:00","corosync":true,"pacemaker":true,"cman":false,\ "corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\ "pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":\ ["node-1"],"corosync_offline":["node-2"],"pacemaker_online":["node-1"],\ "pacemaker_offline":["node-2"],"pacemaker_standby":[],"cluster_name":\ "cluster_name","resources":[],"groups":[],"constraints":{},"cluster_settings":\ {"have-watchdog":"false","dc-version":"1.1.16-11.el7-94ff4df",\ "cluster-infrastructure":"corosync","cluster-name":"cluster_name"},\ "node_id":"1","node_attr":{},"fence_levels":{},"need_ring1_address":false,\ "is_cman_with_udpu_transport":false,"acls":{"role":{},"group":{},"user":{},\ "target":{}},"username":"******"} """, ), dict( label="node-2", output="""\ {"uptime":"0 days, 06:35:58","corosync":false,"pacemaker":false,"cman":false,\ "corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\ "pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\ "corosync_offline":["node-1","node-2"],"pacemaker_online":[],\ "pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\ "resources":[],"groups":[],"constraints":{},"cluster_settings":\ {"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\ "fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\ false,"acls":{},"username":"******"} """, ), ], action="remote/status", response_code=200, )) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade, skip_offline_nodes=True), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ) ])
def test_need_stopped_cluster_comunnication_failures_skip_offline(self): # If we don't know if corosync is running, skip_offline matters. self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.corosync.check_corosync_offline(communication_list=[ dict( label="node-1", output="{" # not valid json ), dict(label="node-2", response_code=401, output="""{"notauthorized":"true"}"""), ]).http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ dict(label="node-1", ), dict( label="node-2", response_code=401, output="""{"notauthorized":"true"}""", ) ]).runner.systemctl.is_active("corosync", is_active=False)) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-2", reason="HTTP error: 401", command="remote/status", ), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-2", reason="HTTP error: 401", command="remote/set_corosync_conf", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-2", ), ])
def test_need_stopped_cluster_comunnication_failure( self, mock_is_systemctl): mock_is_systemctl.return_value = True self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.add_communication( "status", [ dict( label="node-1", response_code=200, output="""\ {"uptime":"0 days, 00:11:52","corosync":false,"pacemaker":false,"cman":false,\ "corosync_enabled":false,"pacemaker_enabled":false,"pacemaker_remote":false,\ "pacemaker_remote_enabled":false,"pcsd_enabled":true,"corosync_online":[],\ "corosync_offline":["node-1","node-2"],"pacemaker_online":[],\ "pacemaker_offline":[],"pacemaker_standby":[],"cluster_name":"cluster_name",\ "resources":[],"groups":[],"constraints":{},"cluster_settings":\ {"error":"Unable to get configuration settings"},"node_id":"","node_attr":{},\ "fence_levels":{},"need_ring1_address":false,"is_cman_with_udpu_transport":\ false,"acls":{},"username":"******"} """, ), dict(label="node-2", response_code=401, output="""{"notauthorized":"true"}"""), ], action="remote/status", )) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-1", ), fixture.error( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_need_stopped_cluster_json_error(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config.http.corosync.check_corosync_offline(communication_list=[ dict( label="node-1", output="{" # not valid json ), dict( label="node-2", # The expected key (/corosync) is missing, we don't # care about version 2 status key # (/services/corosync/running) output='{"services":{"corosync":{"running":true}}}'), ])) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), []) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-1", ), fixture.error( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, force_code=report_codes.SKIP_OFFLINE_NODES, node="node-2", ), ])
def test_unknown_host_addr_not_specified(self): meta_attributes = """ <meta_attributes id="virtual_machine_id-meta_attributes"> <nvpair id="virtual_machine_id-meta_attributes-remote-addr" name="remote-addr" value="node-name" /> <nvpair id="virtual_machine_id-meta_attributes-remote-node" name="remote-node" value="node-name" /> <nvpair id="virtual_machine_id-meta_attributes-remote-port" name="remote-port" value="99" /> </meta_attributes> """ self.config.env.set_known_hosts_dests(dict()) (self.config.local.load_cib().local.push_cib( meta_attributes=meta_attributes)) node_add_guest(self.env_assist.get_env(), options={"remote-port": "99"}) self.env_assist.assert_reports([ fixture.info( report_codes.USING_KNOWN_HOST_ADDRESS_FOR_HOST, host_name=NODE_NAME, address=NODE_NAME, ), ] + fixture_reports_not_live_cib(NODE_NAME))
def test_need_stopped_cluster_not_stopped(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ { "label": node, "output": '{"corosync":true}' } for node in self.node_labels ] ) ) env = self.env_assistant.get_env() self.env_assistant.assert_raise_library_error( lambda: env.push_corosync_conf(self.corosync_conf_facade), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-1", ), fixture.error( report_codes.COROSYNC_RUNNING_ON_NODE, node="node-2", ), ])
def test_success(self): self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1]) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info(report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES) ])
def test_qdevice_reload(self): self.corosync_conf_facade.need_qdevice_reload = True (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .runner.systemctl.is_active("corosync", is_active=False) .http.corosync.qdevice_client_stop( node_labels=self.node_labels ) .http.corosync.qdevice_client_start( node_labels=self.node_labels ) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-1", service="corosync-qdevice", instance=None, ), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-2", service="corosync-qdevice", instance=None, ), fixture.info( report_codes.SERVICE_START_SUCCESS, node="node-1", service="corosync-qdevice", instance=None, ), fixture.info( report_codes.SERVICE_START_SUCCESS, node="node-2", service="corosync-qdevice", instance=None, ), ])
def setUp(self): self.env_assist, self.config = get_env_tools(self) self.name = "booth" self.node_name = "node" self.config_data = "config" self.config_path = _get_booth_file_path("{}.conf".format(self.name)) self.report_list = [ fixture.info(report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE, node=self.node_name, config=self.name), fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=None, name_list=[self.name], ) ] self.config.env.set_booth({"name": self.name})
def test_get_and_push_cib_version_upgrade_needed(self): (self.config.runner.cib.load( name="load_cib_old").runner.cib.upgrade().runner.cib.load( filename="cib-empty-2.8.xml")) env = self.env_assist.get_env() env.get_cib((2, 8, 0)) self.env_assist.assert_reports( [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)])
def test_qdevice_reload_corosync_stopped(self): self.corosync_conf_facade.need_qdevice_reload = True (self.config.http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels).runner.systemctl.is_active( "corosync", is_active=False).http.corosync.qdevice_client_stop( node_labels=self.node_labels).http.corosync. qdevice_client_start( communication_list=[{ "label": label, "output": "corosync is not running, skipping", } for label in self.node_labels])) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-1", service="corosync-qdevice", instance=None, ), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-2", service="corosync-qdevice", instance=None, ), fixture.info( report_codes.SERVICE_START_SKIPPED, node="node-1", service="corosync-qdevice", instance=None, reason="corosync is not running", ), fixture.info( report_codes.SERVICE_START_SKIPPED, node="node-2", service="corosync-qdevice", instance=None, reason="corosync is not running", ), ])
def test_dont_need_stopped_cluster(self): (self.config.http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels).runner.systemctl.is_active( "corosync").runner.corosync.reload()) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_RELOADED) ])
def test_cib_upgrade(self): (self.config.runner.cib.load( name="load_cib_old_version", filename=self.upgraded_cib_filename, before="runner.cib.load").runner.cib.upgrade( before="runner.cib.load").env.push_cib( resources=self.fixture_resources_bundle_simple)) self.run_bundle_cmd() self.env_assist.assert_reports( [fixture.info(report_codes.CIB_UPGRADE_SUCCESSFUL)])
def test_need_stopped_cluster(self): self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( node_labels=self.node_labels ) .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .runner.systemctl.is_active("corosync", is_active=False) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_NOT_RUNNING_ON_NODE, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), ])
def fixture_reports_not_live_cib(node_name): return [ fixture.info( report_codes.COROSYNC_NODE_CONFLICT_CHECK_SKIPPED, reason_type="not_live_cib", ), fixture.info( report_codes.FILES_DISTRIBUTION_SKIPPED, reason_type="not_live_cib", file_list=["pacemaker authkey"], node_list=[node_name], ), fixture.info( report_codes.SERVICE_COMMANDS_ON_NODES_SKIPPED, reason_type="not_live_cib", action_list=[ "pacemaker_remote start", "pacemaker_remote enable", ], node_list=[node_name], ), ]
def test_set_stonith_watchdog_timeout_fails_on_some_nodes(self): err_msg = "Error" self.config.runner.corosync.version() self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( communication_list=[[{ "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, }], [{ "label": "rh7-2", "response_code": 400, "output": "FAILED", }], [{ "label": "rh7-3" }]]) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports([ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="rh7-1", reason=err_msg, command="remote/set_stonith_watchdog_timeout_to_zero"), fixture.warn(report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-2", reason="FAILED", command="remote/set_stonith_watchdog_timeout_to_zero") ] + [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info(report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES) ])
def test_unknown_host_addr_not_specified(self): self.config.env.set_known_hosts_dests(dict()) (self.config.runner.cib.load().runner.pcmk.load_agent( agent_name="ocf:pacemaker:remote").env.push_cib( resources=FIXTURE_RESOURCES_TEMPLATE.format(server=NODE_NAME))) node_add_remote(self.env_assist.get_env(), no_node_addr=True) self.env_assist.assert_reports([ fixture.info( report_codes.USING_KNOWN_HOST_ADDRESS_FOR_HOST, host_name=NODE_NAME, address=NODE_NAME, ), ] + fixture_reports_not_live_cib(NODE_NAME))