def test_corosync_conf_not_set_need_offline_success( self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload, mock_check_offline, mock_qdevice_reload ): corosync_data = open(rc("corosync.conf")).read() new_corosync_data = corosync_data.replace("version: 2", "version: 3") mock_get_corosync.return_value = corosync_data mock_is_running.return_value = False env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertTrue(env.is_corosync_conf_live) self.assertEqual(corosync_data, env.get_corosync_conf_data()) self.assertEqual(corosync_data, env.get_corosync_conf().config.export()) self.assertEqual(2, mock_get_corosync.call_count) conf_facade = CorosyncConfigFacade.from_string(new_corosync_data) conf_facade._need_stopped_cluster = True env.push_corosync_conf(conf_facade) mock_check_offline.assert_called_once_with( "mock node communicator", self.mock_reporter, "mock node list", False ) mock_distribute.assert_called_once_with( "mock node communicator", self.mock_reporter, "mock node list", new_corosync_data, False ) mock_reload.assert_not_called() mock_qdevice_reload.assert_not_called()
def test_corosync_conf_not_set( self, mock_get_corosync, mock_distribute, mock_reload ): corosync_data = open(rc("corosync.conf")).read() new_corosync_data = corosync_data.replace("version: 2", "version: 3") mock_get_corosync.return_value = corosync_data env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertTrue(env.is_corosync_conf_live) self.assertEqual(corosync_data, env.get_corosync_conf_data()) self.assertEqual(corosync_data, env.get_corosync_conf().config.export()) self.assertEqual(2, mock_get_corosync.call_count) env.push_corosync_conf( CorosyncConfigFacade.from_string(new_corosync_data) ) mock_distribute.assert_called_once_with( "mock node communicator", self.mock_reporter, "mock node list", new_corosync_data, False ) mock_reload.assert_called_once_with("mock cmd runner")
def test_get_cluster_conf_live(self, mock_get_local_cluster_conf): env = LibraryEnvironment( self.mock_logger, self.mock_reporter, cluster_conf_data=None ) mock_get_local_cluster_conf.return_value = "cluster.conf data" self.assertEqual("cluster.conf data", env.get_cluster_conf_data()) mock_get_local_cluster_conf.assert_called_once_with()
def test_sucess_create(self): resource_xml = '<primitive id="resourceA" class="service" type="exim"/>' cib = ( self.create_cib() .append_to_first_tag_name('resources', resource_xml) ) env = Env(self.mock_logger, self.mock_reporter, cib_data=str(cib)) ticket_command.create(env, "ticketA", "resourceA", { "loss-policy": "fence", "rsc-role": "master" }) assert_xml_equal( env.get_cib_xml(), str(cib.append_to_first_tag_name( 'constraints', """ <rsc_ticket id="ticket-ticketA-resourceA-Master" rsc="resourceA" rsc-role="Master" ticket="ticketA" loss-policy="fence" /> """ )) )
def test_node_communicator_all_options(self, mock_comm): expected_comm = mock.MagicMock() mock_comm.return_value = expected_comm user = "******" groups = ["some", "group"] tokens = {"node": "token"} timeout = 10 env = LibraryEnvironment( self.mock_logger, self.mock_reporter, user_login=user, user_groups=groups, auth_tokens_getter=lambda:tokens, request_timeout=timeout ) comm = env.node_communicator() self.assertEqual(expected_comm, comm) mock_comm.assert_called_once_with( self.mock_logger, self.mock_reporter, tokens, user, groups, timeout )
def test_get_cluster_conf(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) facade_obj = env.get_cluster_conf() self.assertTrue(isinstance(facade_obj, ClusterConfFacade)) assert_xml_equal( '<cluster/>', etree.tostring(facade_obj._config).decode() )
def test_push_cib_not_upgraded_live(self, mock_replace_cib): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) env.push_cib(etree.XML('<cib/>')) mock_replace_cib.assert_called_once_with( "mock cmd runner", '<cib/>', False ) self.assertEqual([], env.report_processor.report_item_list)
def test_get_cib_no_version_live( self, mock_get_cib_xml, mock_ensure_cib_version ): mock_get_cib_xml.return_value = '<cib/>' env = LibraryEnvironment(self.mock_logger, self.mock_reporter) assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode()) self.assertEqual(1, mock_get_cib_xml.call_count) self.assertEqual(0, mock_ensure_cib_version.call_count) self.assertFalse(env.cib_upgraded)
def test_push_cib_upgraded_live(self, mock_replace_cib): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) env._cib_upgraded = True env.push_cib(etree.XML('<cib/>')) mock_replace_cib.assert_called_once_with( "mock cmd runner", '<cib/>' ) self.assertFalse(env.cib_upgraded)
def test_cmd_runner_no_options(self, mock_runner): expected_runner = mock.MagicMock() mock_runner.return_value = expected_runner env = LibraryEnvironment(self.mock_logger, self.mock_reporter) runner = env.cmd_runner() self.assertEqual(expected_runner, runner) mock_runner.assert_called_once_with( self.mock_logger, self.mock_reporter, {} )
def test_get_cib_upgrade_live( self, mock_get_cib_xml, mock_ensure_cib_version ): mock_get_cib_xml.return_value = '<cib/>' mock_ensure_cib_version.return_value = etree.XML('<new_cib/>') env = LibraryEnvironment(self.mock_logger, self.mock_reporter) assert_xml_equal( '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode() ) self.assertEqual(1, mock_get_cib_xml.call_count) self.assertEqual(1, mock_ensure_cib_version.call_count) self.assertTrue(env.cib_upgraded)
def test_cib_not_set(self, mock_get_cib, mock_push_cib): cib_data = "test cib data" new_cib_data = "new test cib data" mock_get_cib.return_value = cib_data env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertTrue(env.is_cib_live) self.assertEqual(cib_data, env._get_cib_xml()) self.assertEqual(1, mock_get_cib.call_count) env._push_cib_xml(new_cib_data) self.assertEqual(1, mock_push_cib.call_count)
def test_node_communicator_no_options(self, mock_comm): expected_comm = mock.MagicMock() mock_comm.return_value = expected_comm env = LibraryEnvironment(self.mock_logger, self.mock_reporter) comm = env.node_communicator() self.assertEqual(expected_comm, comm) mock_comm.assert_called_once_with( self.mock_logger, self.mock_reporter, {}, None, [] )
def test_push_cib_upgraded_live(self, mock_replace_cib): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) env._cib_upgraded = True env.push_cib(etree.XML('<cib/>')) mock_replace_cib.assert_called_once_with( "mock cmd runner", '<cib/>', True ) assert_report_item_list_equal( env.report_processor.report_item_list, [( severity.INFO, report_codes.CIB_UPGRADE_SUCCESSFUL, {} )] )
def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() self.mock_env = LibraryEnvironment( self.mock_log, self.mock_rep, cib_data="<cib/>" )
def test_cmd_runner_all_options(self, mock_runner): expected_runner = mock.MagicMock() mock_runner.return_value = expected_runner user = "******" env = LibraryEnvironment( self.mock_logger, self.mock_reporter, user_login=user ) runner = env.cmd_runner() self.assertEqual(expected_runner, runner) mock_runner.assert_called_once_with( self.mock_logger, self.mock_reporter, {"CIB_user": user} )
class PushCib(TestCase): def setUp(self): self.env = LibraryEnvironment( mock.MagicMock(logging.Logger), MockLibraryReportProcessor() ) def test_run_only_push_when_without_wait(self, wait_for_idle, push_cib_xml): self.env.push_cib(etree.fromstring("<cib/>")) push_cib_xml.assert_called_once_with("<cib/>") wait_for_idle.assert_not_called() def test_run_wait_when_wait_specified(self, wait_for_idle, push_cib_xml): self.env.push_cib(etree.fromstring("<cib/>"), 10) push_cib_xml.assert_called_once_with("<cib/>") wait_for_idle.assert_called_once_with(self.env.cmd_runner(), 10)
def test_cib_set(self, mock_get_cib, mock_push_cib): cib_data = "test cib data" new_cib_data = "new test cib data" env = LibraryEnvironment( self.mock_logger, self.mock_reporter, cib_data=cib_data ) self.assertFalse(env.is_cib_live) self.assertEqual(cib_data, env._get_cib_xml()) self.assertEqual(0, mock_get_cib.call_count) env._push_cib_xml(new_cib_data) self.assertEqual(0, mock_push_cib.call_count) self.assertEqual(new_cib_data, env._get_cib_xml()) self.assertEqual(0, mock_get_cib.call_count)
def test_get_cib_upgrade_live( self, mock_get_cib_xml, mock_ensure_cib_version ): mock_get_cib_xml.return_value = '<cib/>' mock_ensure_cib_version.return_value = etree.XML('<new_cib/>') env = LibraryEnvironment(self.mock_logger, self.mock_reporter) assert_xml_equal( '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode() ) self.assertEqual(1, mock_get_cib_xml.call_count) self.assertEqual(1, mock_ensure_cib_version.call_count) assert_report_item_list_equal( env.report_processor.report_item_list, [( severity.INFO, report_codes.CIB_UPGRADE_SUCCESSFUL, {} )] ) self.assertTrue(env.cib_upgraded)
def test_corosync_conf_set( self, mock_get_corosync, mock_distribute, mock_reload, mock_check_offline, mock_qdevice_reload ): corosync_data = "totem {\n version: 2\n}\n" new_corosync_data = "totem {\n version: 3\n}\n" env = LibraryEnvironment( self.mock_logger, self.mock_reporter, corosync_conf_data=corosync_data ) self.assertFalse(env.is_corosync_conf_live) self.assertEqual(corosync_data, env.get_corosync_conf_data()) self.assertEqual(corosync_data, env.get_corosync_conf().config.export()) self.assertEqual(0, mock_get_corosync.call_count) env.push_corosync_conf( CorosyncConfigFacade.from_string(new_corosync_data) ) self.assertEqual(0, mock_distribute.call_count) self.assertEqual(new_corosync_data, env.get_corosync_conf_data()) self.assertEqual(0, mock_get_corosync.call_count) mock_check_offline.assert_not_called() mock_reload.assert_not_called() mock_qdevice_reload.assert_not_called()
def test_dump_cib_file(self, mock_tmpfile, mock_runner): expected_runner = mock.MagicMock() mock_runner.return_value = expected_runner mock_instance = mock.MagicMock() mock_instance.name = rc("file.tmp") mock_tmpfile.return_value = mock_instance env = LibraryEnvironment( self.mock_logger, self.mock_reporter, cib_data="<cib />" ) runner = env.cmd_runner() self.assertEqual(expected_runner, runner) mock_runner.assert_called_once_with( self.mock_logger, self.mock_reporter, { "LC_ALL": "C", "CIB_file": rc("file.tmp"), } ) mock_tmpfile.assert_called_once_with("<cib />")
def test_corosync_conf_not_set_need_offline_fail( self, mock_get_corosync, mock_distribute, mock_reload, mock_check_offline, mock_qdevice_reload ): corosync_data = open(rc("corosync.conf")).read() new_corosync_data = corosync_data.replace("version: 2", "version: 3") mock_get_corosync.return_value = corosync_data def raiser(dummy_communicator, dummy_reporter, dummy_nodes, dummy_force): raise LibraryError( reports.corosync_not_running_check_node_error("test node") ) mock_check_offline.side_effect = raiser env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertTrue(env.is_corosync_conf_live) self.assertEqual(corosync_data, env.get_corosync_conf_data()) self.assertEqual(corosync_data, env.get_corosync_conf().config.export()) self.assertEqual(2, mock_get_corosync.call_count) conf_facade = CorosyncConfigFacade.from_string(new_corosync_data) conf_facade._need_stopped_cluster = True assert_raise_library_error( lambda: env.push_corosync_conf(conf_facade), ( severity.ERROR, report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, {"node": "test node"} ) ) mock_check_offline.assert_called_once_with( "mock node communicator", self.mock_reporter, "mock node list", False ) mock_distribute.assert_not_called() mock_reload.assert_not_called() mock_qdevice_reload.assert_not_called()
class RemoveRecipientTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="alert-recipient-1" value="value"/> </alert> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment( self.mock_log, self.mock_rep, cib_data=cib ) def test_recipient_not_found(self): assert_raise_library_error( lambda: cmd_alert.remove_recipient( self.mock_env, "recipient" ), ( Severities.ERROR, report_codes.ID_NOT_FOUND, {"id": "recipient"} ) ) def test_success(self): cmd_alert.remove_recipient(self.mock_env, "alert-recipient") assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient-1" value="value"/> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml() )
def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"/> <alert id="alert-1" path="/path"/> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment( self.mock_log, self.mock_rep, cib_data=cib )
def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="alert-recipient-1" value="value" description="d"> <meta_attributes id="alert-recipient-1-meta_attributes" > <nvpair id="alert-recipient-1-meta_attributes-attr1" name="attr1" value="val1" /> <nvpair id="alert-recipient-1-meta_attributes-attr2" name="attr2" value="val2" /> </meta_attributes> <instance_attributes id="alert-recipient-1-instance_attributes" > <nvpair id="alert-recipient-1-instance_attributes-attr1" name="attr1" value="val1" /> </instance_attributes> </recipient> </alert> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment( self.mock_log, self.mock_rep, cib_data=cib )
class RemoveAlertTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"/> <alert id="alert-1" path="/path"/> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment( self.mock_log, self.mock_rep, cib_data=cib ) def test_success(self): cmd_alert.remove_alert(self.mock_env, "alert") assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert-1" path="/path"/> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml() ) def test_not_existing_alert(self): assert_raise_library_error( lambda: cmd_alert.remove_alert(self.mock_env, "unknown"), ( Severities.ERROR, report_codes.CIB_ALERT_NOT_FOUND, {"alert": "unknown"} ) )
def setUp(self): self.mock_logger = mock.MagicMock(logging.Logger) self.mock_reporter = MockLibraryReportProcessor() self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.metadata = """ <resource-agent> <shortdesc>short desc</shortdesc> <longdesc>long desc</longdesc> <parameters> </parameters> <actions> </actions> </resource-agent> """ self.description = { "name": "ocf:test:Dummy", "shortdesc": "short desc", "longdesc": "long desc", "parameters": [], "actions": [], }
class UpdateRecipientTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="alert-recipient-1" value="value" description="d"> <meta_attributes id="alert-recipient-1-meta_attributes" > <nvpair id="alert-recipient-1-meta_attributes-attr1" name="attr1" value="val1" /> <nvpair id="alert-recipient-1-meta_attributes-attr2" name="attr2" value="val2" /> </meta_attributes> <instance_attributes id="alert-recipient-1-instance_attributes" > <nvpair id="alert-recipient-1-instance_attributes-attr1" name="attr1" value="val1" /> </instance_attributes> </recipient> </alert> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment(self.mock_log, self.mock_rep, cib_data=cib) def test_empty_value(self): assert_raise_library_error( lambda: cmd_alert.update_recipient(self.mock_env, "alert-recipient-1", {}, {}, recipient_value=""), (Severities.ERROR, report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID, { "recipient": "" })) def test_recipient_not_found(self): assert_raise_library_error( lambda: cmd_alert.update_recipient(self.mock_env, "recipient", { }, {}), (Severities.ERROR, report_codes.ID_NOT_FOUND, { "id": "recipient", "id_description": "Recipient" })) def test_update_all(self): cmd_alert.update_recipient(self.mock_env, "alert-recipient-1", {"attr1": "value"}, { "attr1": "", "attr3": "new_val" }, recipient_value="new_val", description="desc") assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="alert-recipient-1" value="new_val" description="desc" > <meta_attributes id="alert-recipient-1-meta_attributes" > <nvpair id="alert-recipient-1-meta_attributes-attr2" name="attr2" value="val2" /> <nvpair id="alert-recipient-1-meta_attributes-attr3" name="attr3" value="new_val" /> </meta_attributes> <instance_attributes id="alert-recipient-1-instance_attributes" > <nvpair id="alert-recipient-1-instance_attributes-attr1" name="attr1" value="value" /> </instance_attributes> </recipient> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml())
def setUp(self): self.env = LibraryEnvironment( mock.MagicMock(logging.Logger), MockLibraryReportProcessor() )
def test_push_cib_not_upgraded_live(self, mock_replace_cib): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) env.push_cib(etree.XML('<cib/>')) mock_replace_cib.assert_called_once_with("mock cmd runner", '<cib/>') self.assertEqual([], env.report_processor.report_item_list)
class RemoveRecipientTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient1" value="value1"/> <recipient id="alert-recipient2" value="value2"/> </alert> <alert id="alert2" path="path"> <recipient id="alert2-recipient3" value="value3"/> <recipient id="alert2-recipient4" value="value4"/> </alert> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment(self.mock_log, self.mock_rep, cib_data=cib) def test_recipient_not_found(self): report_list = [(Severities.ERROR, report_codes.ID_NOT_FOUND, { "id": "recipient" }), (Severities.ERROR, report_codes.ID_NOT_FOUND, { "id": "alert2-recipient1" })] assert_raise_library_error( lambda: cmd_alert.remove_recipient(self.mock_env, [ "recipient", "alert-recipient1", "alert2-recipient1" ]), *report_list) assert_report_item_list_equal(self.mock_rep.report_item_list, report_list) def test_one_recipient(self): cmd_alert.remove_recipient(self.mock_env, ["alert-recipient1"]) assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient2" value="value2"/> </alert> <alert id="alert2" path="path"> <recipient id="alert2-recipient3" value="value3"/> <recipient id="alert2-recipient4" value="value4"/> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml()) self.assertEqual([], self.mock_rep.report_item_list) def test_multiple_recipients(self): cmd_alert.remove_recipient( self.mock_env, ["alert-recipient1", "alert-recipient2", "alert2-recipient4"]) assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"/> <alert id="alert2" path="path"> <recipient id="alert2-recipient3" value="value3"/> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml()) self.assertEqual([], self.mock_rep.report_item_list) def test_no_recipient(self): cmd_alert.remove_recipient(self.mock_env, []) assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient1" value="value1"/> <recipient id="alert-recipient2" value="value2"/> </alert> <alert id="alert2" path="path"> <recipient id="alert2-recipient3" value="value3"/> <recipient id="alert2-recipient4" value="value4"/> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml()) self.assertEqual([], self.mock_rep.report_item_list)
def update_scsi_devices( env: LibraryEnvironment, stonith_id: str, set_device_list: Iterable[str], force_flags: Container[reports.types.ForceCode] = (), ) -> None: """ Update scsi fencing devices without restart and affecting other resources. env -- provides all for communication with externals stonith_id -- id of stonith resource set_device_list -- paths to the scsi devices that would be set for stonith resource force_flags -- list of flags codes """ if not is_getting_resource_digest_supported(env.cmd_runner()): raise LibraryError( ReportItem.error( reports.messages.StonithRestartlessUpdateOfScsiDevicesNotSupported() ) ) cib = env.get_cib() if not set_device_list: env.report_processor.report( ReportItem.error( reports.messages.InvalidOptionValue( "devices", "", None, cannot_be_empty=True ) ) ) ( stonith_el, report_list, ) = stonith.validate_stonith_restartless_update(cib, stonith_id) if env.report_processor.report_list(report_list).has_errors: raise LibraryError() # for mypy, this should not happen because exeption would be raised if stonith_el is None: raise AssertionError("stonith element is None") stonith.update_scsi_devices_without_restart( env.cmd_runner(), env.get_cluster_state(), stonith_el, IdProvider(cib), set_device_list, ) # Unfencing cluster_nodes_names, nodes_report_list = get_existing_nodes_names( env.get_corosync_conf(), error_on_missing_name=True, ) env.report_processor.report_list(nodes_report_list) ( target_report_list, cluster_nodes_target_list, ) = env.get_node_target_factory().get_target_list_with_reports( cluster_nodes_names, allow_skip=False, ) env.report_processor.report_list(target_report_list) if env.report_processor.has_errors: raise LibraryError() com_cmd: AllSameDataMixin = GetCorosyncOnlineTargets( env.report_processor, skip_offline_targets=reports.codes.SKIP_OFFLINE_NODES in force_flags, ) com_cmd.set_targets(cluster_nodes_target_list) online_corosync_target_list = run_and_raise( env.get_node_communicator(), com_cmd ) com_cmd = Unfence(env.report_processor, sorted(set_device_list)) com_cmd.set_targets(online_corosync_target_list) run_and_raise(env.get_node_communicator(), com_cmd) env.push_cib()
def status_all_sites_plaintext( env: LibraryEnvironment, hide_inactive_resources: bool = False, verbose: bool = False, ) -> List[Mapping[str, Any]]: """ Return local site's and all remote sites' status as plaintext env -- LibraryEnvironment hide_inactive_resources -- if True, do not display non-running resources verbose -- if True, display more info """ # The command does not provide an option to skip offline / unreacheable / # misbehaving nodes. # The point of such skipping is to stop a command if it is unable to make # changes on all nodes. The user can then decide to proceed anyway and # make changes on the skipped nodes later manually. # This command only reads from nodes so it automatically asks other nodes # if one is offline / misbehaving. class SiteData: def __init__( self, local: bool, role: DrRole, target_list: Iterable[RequestTarget], ) -> None: self.local = local self.role = role self.target_list = target_list self.status_loaded = False self.status_plaintext = "" if env.ghost_file_codes: raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentRequired( env.ghost_file_codes))) report_processor = env.report_processor report_list, dr_config = _load_dr_config(env.get_dr_env().config) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() site_data_list = [] target_factory = env.get_node_target_factory() # get local nodes local_nodes, report_list = get_existing_nodes_names( env.get_corosync_conf()) report_processor.report_list(report_list) report_list, local_targets = target_factory.get_target_list_with_reports( local_nodes, skip_non_existing=True, ) report_processor.report_list(report_list) site_data_list.append(SiteData(True, dr_config.local_role, local_targets)) # get remote sites' nodes for conf_remote_site in dr_config.get_remote_site_list(): ( report_list, remote_targets, ) = target_factory.get_target_list_with_reports( conf_remote_site.node_name_list, skip_non_existing=True, ) report_processor.report_list(report_list) site_data_list.append( SiteData(False, conf_remote_site.role, remote_targets)) if report_processor.has_errors: raise LibraryError() # get all statuses for site_data in site_data_list: com_cmd = GetFullClusterStatusPlaintext( report_processor, hide_inactive_resources=hide_inactive_resources, verbose=verbose, ) com_cmd.set_targets(site_data.target_list) site_data.status_loaded, site_data.status_plaintext = run_com_cmd( env.get_node_communicator(), com_cmd) return [ dto.to_dict( DrSiteStatusDto( local_site=site_data.local, site_role=site_data.role, status_plaintext=site_data.status_plaintext, status_successfully_obtained=site_data.status_loaded, )) for site_data in site_data_list ]
def cib_tags_section(env: LibraryEnvironment) -> Iterator[Element]: yield get_tags(env.get_cib(REQUIRED_CIB_VERSION)) env.push_cib()
def test_is_cluster_conf_live_not_live(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter, cluster_conf_data="data") self.assertFalse(env.is_cluster_conf_live)
def node_add_remote( env: LibraryEnvironment, node_name: str, node_addr: Optional[str], operations: Iterable[Mapping[str, str]], meta_attributes: Mapping[str, str], instance_attributes: Mapping[str, str], skip_offline_nodes: bool = False, allow_incomplete_distribution: bool = False, allow_pacemaker_remote_service_fail: bool = False, allow_invalid_operation: bool = False, allow_invalid_instance_attributes: bool = False, use_default_operations: bool = True, wait: WaitType = False, ): # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements """ create an ocf:pacemaker:remote resource and use it as a remote node env -- provides all for communication with externals node_name -- the name of the new node node_addr -- the address of the new node or None for default operations -- attributes for each entered operation meta_attributes -- attributes for primitive/meta_attributes instance_attributes -- attributes for primitive/instance_attributes skip_offline_nodes -- if True, ignore when some nodes are offline allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed allow_invalid_operation -- if True, allow to use operations that are not listed in a resource agent metadata allow_invalid_instance_attributes -- if True, allow to use instance attributes that are not listed in a resource agent metadata and allow to omit required instance_attributes use_default_operations -- if True, add operations specified in a resource agent metadata to the resource wait -- a flag for controlling waiting for pacemaker idle mechanism """ wait_timeout = env.ensure_wait_satisfiable(wait) report_processor = env.report_processor cib = env.get_cib( minimal_version=get_required_cib_version_for_primitive(operations) ) id_provider = IdProvider(cib) if env.is_cib_live: corosync_conf: Optional[CorosyncConfigFacade] = env.get_corosync_conf() else: corosync_conf = None report_processor.report( ReportItem.info( reports.messages.CorosyncNodeConflictCheckSkipped( reports.const.REASON_NOT_LIVE_CIB, ) ) ) ( existing_nodes_names, existing_nodes_addrs, report_list, ) = get_existing_nodes_names_addrs(corosync_conf, cib) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) try: resource_agent_facade = ResourceAgentFacadeFactory( env.cmd_runner(), report_processor ).facade_from_parsed_name(remote_node.AGENT_NAME) except ResourceAgentError as e: report_processor.report(resource_agent_error_to_report_item(e)) raise LibraryError() from e existing_target_list = [] if env.is_cib_live: target_factory = env.get_node_target_factory() existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes, ) new_target = new_target_list[0] if new_target_list else None # default node_addr to an address from known-hosts if node_addr is None: if new_target: node_addr = new_target.first_addr node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS ) else: node_addr = node_name node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME ) report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, node_addr, node_addr_source ) ) ) else: # default node_addr to an address from known-hosts if node_addr is None: known_hosts = env.get_known_hosts([node_name]) if known_hosts: node_addr = known_hosts[0].dest.addr node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS ) else: node_addr = node_name node_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME ) report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, node_addr, node_addr_source ) ) ) # validate inputs report_list = remote_node.validate_create( existing_nodes_names, existing_nodes_addrs, resource_agent_facade.metadata, node_name, node_addr, instance_attributes, ) if report_processor.report_list(report_list).has_errors: raise LibraryError() # validation + cib setup # TODO extract the validation to a separate function try: remote_resource_element = remote_node.create( env.report_processor, resource_agent_facade, get_resources(cib), id_provider, node_addr, node_name, operations, meta_attributes, instance_attributes, allow_invalid_operation, allow_invalid_instance_attributes, use_default_operations, ) except LibraryError as e: # Check unique id conflict with check against nodes. Until validation # resource create is not separated, we need to make unique post # validation. already_exists = [] unified_report_list = [] for report_item in report_list + list(e.args): # pylint: disable=no-member dto_obj = report_item.message.to_dto() if dto_obj.code not in ( reports.codes.ID_ALREADY_EXISTS, reports.codes.RESOURCE_INSTANCE_ATTR_VALUE_NOT_UNIQUE, ): unified_report_list.append(report_item) elif ( "id" in dto_obj.payload and dto_obj.payload["id"] not in already_exists ): unified_report_list.append(report_item) already_exists.append(dto_obj.payload["id"]) report_list = unified_report_list report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib") ) env.push_cib(wait_timeout=wait_timeout) if wait_timeout >= 0: _ensure_resource_running(env, remote_resource_element.attrib["id"])
def node_add_guest( env: LibraryEnvironment, node_name, resource_id, options, skip_offline_nodes=False, allow_incomplete_distribution=False, allow_pacemaker_remote_service_fail=False, wait: WaitType = False, ): # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements """ Make a guest node from the specified resource LibraryEnvironment env -- provides all for communication with externals string node_name -- name of the guest node string resource_id -- specifies resource that should become a guest node dict options -- guest node options (remote-port, remote-addr, remote-connect-timeout) bool skip_offline_nodes -- if True, ignore when some nodes are offline bool allow_incomplete_distribution -- if True, allow this command to finish successfully even if file distribution did not succeed bool allow_pacemaker_remote_service_fail -- if True, allow this command to finish successfully even if starting/enabling pacemaker_remote did not succeed mixed wait -- a flag for controlling waiting for pacemaker idle mechanism """ wait_timeout = env.ensure_wait_satisfiable(wait) report_processor = env.report_processor cib = env.get_cib() id_provider = IdProvider(cib) corosync_conf: Optional[CorosyncConfigFacade] if env.is_cib_live: corosync_conf = env.get_corosync_conf() else: corosync_conf = None report_processor.report( ReportItem.info( reports.messages.CorosyncNodeConflictCheckSkipped( reports.const.REASON_NOT_LIVE_CIB, ) ) ) ( existing_nodes_names, existing_nodes_addrs, report_list, ) = get_existing_nodes_names_addrs(corosync_conf, cib) if env.is_cib_live: # We just reported corosync checks are going to be skipped so we # shouldn't complain about errors related to corosync nodes report_processor.report_list(report_list) existing_target_list = [] if env.is_cib_live: target_factory = env.get_node_target_factory() existing_target_list, new_target_list = _get_targets_for_add( target_factory, report_processor, existing_nodes_names, [node_name], skip_offline_nodes, ) new_target = new_target_list[0] if new_target_list else None # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: if new_target: new_addr = new_target.first_addr new_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS ) else: new_addr = node_name new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME options["remote-addr"] = new_addr report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, new_addr, new_addr_source ) ) ) else: # default remote-addr to an address from known-hosts if "remote-addr" not in options or options["remote-addr"] is None: known_hosts = env.get_known_hosts([node_name]) if known_hosts: new_addr = known_hosts[0].dest.addr new_addr_source = ( reports.const.DEFAULT_ADDRESS_SOURCE_KNOWN_HOSTS ) else: new_addr = node_name new_addr_source = reports.const.DEFAULT_ADDRESS_SOURCE_HOST_NAME options["remote-addr"] = new_addr report_processor.report( ReportItem.info( reports.messages.UsingDefaultAddressForHost( node_name, new_addr, new_addr_source ) ) ) # validate inputs report_list = guest_node.validate_set_as_guest( cib, existing_nodes_names, existing_nodes_addrs, node_name, options ) searcher = ElementSearcher(primitive.TAG, resource_id, get_resources(cib)) if searcher.element_found(): resource_element = searcher.get_element() report_list.extend(guest_node.validate_is_not_guest(resource_element)) else: report_list.extend(searcher.get_errors()) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # everything validated, let's set it up guest_node.set_as_guest( resource_element, id_provider, node_name, options.get("remote-addr", None), options.get("remote-port", None), options.get("remote-connect-timeout", None), ) if env.is_cib_live: _prepare_pacemaker_remote_environment( env, report_processor, existing_target_list, new_target, node_name, skip_offline_nodes, allow_incomplete_distribution, allow_pacemaker_remote_service_fail, ) else: report_processor.report_list( _reports_skip_new_node(node_name, "not_live_cib") ) env.push_cib(wait_timeout=wait_timeout) if wait_timeout >= 0: _ensure_resource_running(env, resource_id)
def _ensure_resource_running(env: LibraryEnvironment, resource_id): if env.report_processor.report( state.ensure_resource_running(env.get_cluster_state(), resource_id) ).has_errors: raise LibraryError()
def test_usergroups_set(self): groups = ["some", "group"] env = LibraryEnvironment(self.mock_logger, self.mock_reporter, user_groups=groups) self.assertEqual(groups, env.user_groups)
def remove_levels_by_params( lib_env: LibraryEnvironment, level=None, target_type=None, target_value=None, devices=None, ignore_if_missing=False, target_may_be_a_device=False, ): """ Remove specified fencing level(s). LibraryEnvironment lib_env -- environment int|string level -- level (index) of the fencing level to remove constant target_type -- the removed fencing level target value type mixed target_value -- the removed fencing level target value Iterable devices -- list of stonith devices of the removed fencing level bool ignore_if_missing -- when True, do not report if level not found target_may_be_a_device -- enables backward compatibility mode for old CLI """ topology_el = get_fencing_topology(lib_env.get_cib()) report_list = cib_fencing_topology.remove_levels_by_params( topology_el, level, target_type, target_value, devices, ignore_if_missing) if not target_may_be_a_device or target_type != TARGET_TYPE_NODE: if lib_env.report_processor.report_list(report_list).has_errors: raise LibraryError() lib_env.push_cib() return # backward compatibility mode # CLI command parameters are: level, node, stonith, stonith... Both the # node and the stonith list are optional. If the node is ommited and the # stonith list is present, there is no way to figure it out, since there is # no specification of what the parameter is. Hence the pre-lib code tried # both. First it assumed the first parameter is a node. If that fence level # didn't exist, it assumed the first parameter is a device. Since it was # only possible to specify node as a target back then, this is enabled only # in that case. # CLI has no way to figure out what the first parameter is. Therefore, the # lib must try both cases if asked to do so. if not has_errors(report_list): lib_env.report_processor.report_list(report_list) lib_env.push_cib() return level_not_found = False for report_item in report_list: if (report_item.code == report_codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST): level_not_found = True break if not level_not_found: lib_env.report_processor.report_list(report_list) raise LibraryError() target_and_devices = [target_value] if devices: target_and_devices.extend(devices) report_list_second = cib_fencing_topology.remove_levels_by_params( topology_el, level, None, None, target_and_devices, ignore_if_missing) if not has_errors(report_list_second): lib_env.report_processor.report_list(report_list_second) lib_env.push_cib() return lib_env.report_processor.report_list(report_list) lib_env.report_processor.report_list(report_list_second) raise LibraryError()
class UpdateRecipientTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="alert-recipient-1" value="value" description="d"> <meta_attributes id="alert-recipient-1-meta_attributes" > <nvpair id="alert-recipient-1-meta_attributes-attr1" name="attr1" value="val1" /> <nvpair id="alert-recipient-1-meta_attributes-attr2" name="attr2" value="val2" /> </meta_attributes> <instance_attributes id="alert-recipient-1-instance_attributes" > <nvpair id="alert-recipient-1-instance_attributes-attr1" name="attr1" value="val1" /> </instance_attributes> </recipient> </alert> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment( self.mock_log, self.mock_rep, cib_data=cib ) def test_empty_value(self): assert_raise_library_error( lambda: cmd_alert.update_recipient( self.mock_env, "alert-recipient-1", {}, {}, recipient_value="" ), ( Severities.ERROR, report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID, {"recipient": ""} ) ) def test_recipient_not_found(self): assert_raise_library_error( lambda: cmd_alert.update_recipient( self.mock_env, "recipient", {}, {} ), ( Severities.ERROR, report_codes.ID_NOT_FOUND, { "id": "recipient", "id_description": "Recipient" } ) ) def test_update_all(self): cmd_alert.update_recipient( self.mock_env, "alert-recipient-1", {"attr1": "value"}, { "attr1": "", "attr3": "new_val" }, recipient_value="new_val", description="desc" ) assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="alert-recipient-1" value="new_val" description="desc" > <meta_attributes id="alert-recipient-1-meta_attributes" > <nvpair id="alert-recipient-1-meta_attributes-attr2" name="attr2" value="val2" /> <nvpair id="alert-recipient-1-meta_attributes-attr3" name="attr3" value="new_val" /> </meta_attributes> <instance_attributes id="alert-recipient-1-instance_attributes" > <nvpair id="alert-recipient-1-instance_attributes-attr1" name="attr1" value="value" /> </instance_attributes> </recipient> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml() )
def setUp(self): self.env = LibraryEnvironment(mock.MagicMock(logging.Logger), MockLibraryReportProcessor())
def full_cluster_status_plaintext( env: LibraryEnvironment, hide_inactive_resources: bool = False, verbose: bool = False, ) -> str: """ Return full cluster status as plaintext env -- LibraryEnvironment hide_inactive_resources -- if True, do not display non-running resources verbose -- if True, display more info """ # pylint: disable=too-many-branches # pylint: disable=too-many-locals # validation if not env.is_cib_live and env.is_corosync_conf_live: raise LibraryError( reports.live_environment_not_consistent( [file_type_codes.CIB], [file_type_codes.COROSYNC_CONF], ) ) if env.is_cib_live and not env.is_corosync_conf_live: raise LibraryError( reports.live_environment_not_consistent( [file_type_codes.COROSYNC_CONF], [file_type_codes.CIB], ) ) # initialization runner = env.cmd_runner() report_processor = env.report_processor live = env.is_cib_live and env.is_corosync_conf_live is_sbd_running = False # load status, cib, corosync.conf status_text, warning_list = get_cluster_status_text( runner, hide_inactive_resources, verbose ) corosync_conf = env.get_corosync_conf() cib = env.get_cib() if verbose: ticket_status_text, ticket_status_stderr, ticket_status_retval = ( get_ticket_status_text(runner) ) # get extra info if live if live: try: is_sbd_running = is_service_running(runner, get_sbd_service_name()) except LibraryError: pass local_services_status = _get_local_services_status(runner) if verbose: node_name_list, node_names_report_list = get_existing_nodes_names( corosync_conf ) report_processor.report_list(node_names_report_list) node_reachability = _get_node_reachability( env.get_node_target_factory(), env.get_node_communicator(), report_processor, node_name_list, ) # check stonith configuration warning_list = list(warning_list) warning_list.extend(_stonith_warnings(cib, is_sbd_running)) # put it all together if report_processor.has_errors: raise LibraryError() parts = [] parts.append(f"Cluster name: {corosync_conf.get_cluster_name()}") if warning_list: parts.extend(["", "WARNINGS:"] + warning_list + [""]) parts.append(status_text) if verbose: parts.extend(["", "Tickets:"]) if ticket_status_retval != 0: ticket_warning_parts = [ "WARNING: Unable to get information about tickets" ] if ticket_status_stderr: ticket_warning_parts.extend( indent(ticket_status_stderr.splitlines()) ) parts.extend(indent(ticket_warning_parts)) else: parts.extend(indent(ticket_status_text.splitlines())) if live: if verbose: parts.extend(["", "PCSD Status:"]) parts.extend(indent( _format_node_reachability(node_name_list, node_reachability) )) parts.extend(["", "Daemon Status:"]) parts.extend(indent( _format_local_services_status(local_services_status) )) return "\n".join(parts)
def _defaults_update( env: LibraryEnvironment, cib_section_name: str, nvset_id: Optional[str], nvpairs: Mapping[str, str], pcs_command: reports.types.PcsCommand, ) -> None: cib = env.get_cib() id_provider = IdProvider(cib) if nvset_id is None: # Backward compatibility code to support an old use case where no id # was requested and provided and the first meta_attributes nvset was # created / updated. However, we check that there is only one nvset # present in the CIB to prevent breaking the configuration with # multiple nvsets in place. # This is to be supported as it provides means of easily managing # defaults if only one set of defaults is needed. # TODO move this to a separate lib command. if not nvpairs: return # Do not create new defaults element if we are only removing values # from it. only_removing = True for value in nvpairs.values(): if value != "": only_removing = False break if only_removing and not sections.exists(cib, cib_section_name): env.report_processor.report( ReportItem.warning(reports.messages.DefaultsCanBeOverriden())) return nvset_elements = nvpair_multi.find_nvsets( sections.get(cib, cib_section_name), nvpair_multi.NVSET_META) if len(nvset_elements) > 1: env.report_processor.report( reports.item.ReportItem.error( reports.messages.CibNvsetAmbiguousProvideNvsetId( pcs_command))) raise LibraryError() env.report_processor.report( ReportItem.warning(reports.messages.DefaultsCanBeOverriden())) if len(nvset_elements) == 1: nvpair_multi.nvset_update(nvset_elements[0], id_provider, nvpairs) elif only_removing: # do not create new nvset if there is none and we are only removing # nvpairs return else: nvpair_multi.nvset_append_new( sections.get(cib, cib_section_name), id_provider, get_pacemaker_version_by_which_cib_was_validated(cib), nvpair_multi.NVSET_META, nvpairs, {}, ) env.push_cib() return nvset_elements, report_list = nvpair_multi.find_nvsets_by_ids( sections.get(cib, cib_section_name), [nvset_id]) if env.report_processor.report_list(report_list).has_errors: raise LibraryError() nvpair_multi.nvset_update(nvset_elements[0], id_provider, nvpairs) env.report_processor.report( ReportItem.warning(reports.messages.DefaultsCanBeOverriden())) env.push_cib()
def test_is_cluster_conf_live_live(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertTrue(env.is_cluster_conf_live)
def test_user_not_set(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertEqual(None, env.user_login)
def set_recovery_site(env: LibraryEnvironment, node_name: str) -> None: """ Set up disaster recovery with the local cluster being the primary site env node_name -- a known host from the recovery site """ # pylint: disable=too-many-locals if env.ghost_file_codes: raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentRequired( env.ghost_file_codes))) report_processor = env.report_processor dr_env = env.get_dr_env() if dr_env.config.raw_file.exists(): report_processor.report( ReportItem.error(reports.messages.DrConfigAlreadyExist())) target_factory = env.get_node_target_factory() local_nodes, report_list = get_existing_nodes_names( env.get_corosync_conf(), error_on_missing_name=True) report_processor.report_list(report_list) if node_name in local_nodes: report_processor.report( ReportItem.error(reports.messages.NodeInLocalCluster(node_name))) report_list, local_targets = target_factory.get_target_list_with_reports( local_nodes, allow_skip=False, report_none_host_found=False) report_processor.report_list(report_list) report_list, remote_targets = target_factory.get_target_list_with_reports( [node_name], allow_skip=False, report_none_host_found=False) report_processor.report_list(report_list) if report_processor.has_errors: raise LibraryError() # TODO The new file framework doesn't support network communication yet. com_cmd = GetCorosyncConf(env.report_processor) com_cmd.set_targets(remote_targets) corosync_conf_instance = FileInstance.for_corosync_conf() try: remote_cluster_nodes, report_list = get_existing_nodes_names( cast( CorosyncConfigFacade, corosync_conf_instance.raw_to_facade( run_and_raise(env.get_node_communicator(), com_cmd).encode("utf-8")), ), error_on_missing_name=True, ) except ParserErrorException as e: report_processor.report_list( corosync_conf_instance.toolbox.parser.exception_to_report_list( e, file_type_codes.COROSYNC_CONF, None, force_code=None, is_forced_or_warning=False, )) if report_processor.report_list(report_list).has_errors: raise LibraryError() # ensure we have tokens for all nodes of remote cluster report_list, remote_targets = target_factory.get_target_list_with_reports( remote_cluster_nodes, allow_skip=False, report_none_host_found=False) if report_processor.report_list(report_list).has_errors: raise LibraryError() dr_config_exporter = get_file_toolbox( file_type_codes.PCS_DR_CONFIG).exporter # create dr config for remote cluster remote_dr_cfg = dr_env.create_facade(DrRole.RECOVERY) remote_dr_cfg.add_site(DrRole.PRIMARY, local_nodes) # send config to all node of remote cluster distribute_file_cmd = DistributeFilesWithoutForces( env.report_processor, node_communication_format.pcs_dr_config_file( dr_config_exporter.export(remote_dr_cfg.config)), ) distribute_file_cmd.set_targets(remote_targets) run_and_raise(env.get_node_communicator(), distribute_file_cmd) # create new dr config, with local cluster as primary site local_dr_cfg = dr_env.create_facade(DrRole.PRIMARY) local_dr_cfg.add_site(DrRole.RECOVERY, remote_cluster_nodes) distribute_file_cmd = DistributeFilesWithoutForces( env.report_processor, node_communication_format.pcs_dr_config_file( dr_config_exporter.export(local_dr_cfg.config)), ) distribute_file_cmd.set_targets(local_targets) run_and_raise(env.get_node_communicator(), distribute_file_cmd)
def test_user_set(self): user = "******" env = LibraryEnvironment(self.mock_logger, self.mock_reporter, user_login=user) self.assertEqual(user, env.user_login)
def test_push_cib_upgraded_live(self, mock_replace_cib): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) env._cib_upgraded = True env.push_cib(etree.XML('<cib/>')) mock_replace_cib.assert_called_once_with("mock cmd runner", '<cib/>') self.assertFalse(env.cib_upgraded)
def test_report_processor(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertEqual(self.mock_reporter, env.report_processor)
class UpdateAlertTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() self.mock_env = LibraryEnvironment(self.mock_log, self.mock_rep, cib_data="<cib/>") def test_update_all(self): self.mock_env._push_cib_xml(""" <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="my-alert" path="/my/path" description="my description"> <instance_attributes id="my-alert-instance_attributes"> <nvpair id="my-alert-instance_attributes-instance" name="instance" value="value" /> <nvpair id="my-alert-instance_attributes-another" name="another" value="val" /> </instance_attributes> <meta_attributes id="my-alert-meta_attributes"> <nvpair id="my-alert-meta_attributes-meta1" name="meta1" value="val1" /> </meta_attributes> </alert> </alerts> </configuration> </cib> """) cmd_alert.update_alert(self.mock_env, "my-alert", "/another/one", { "instance": "", "my-attr": "its_val" }, {"meta1": "val2"}, "") assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="my-alert" path="/another/one"> <instance_attributes id="my-alert-instance_attributes"> <nvpair id="my-alert-instance_attributes-another" name="another" value="val" /> <nvpair id="my-alert-instance_attributes-my-attr" name="my-attr" value="its_val" /> </instance_attributes> <meta_attributes id="my-alert-meta_attributes"> <nvpair id="my-alert-meta_attributes-meta1" name="meta1" value="val2" /> </meta_attributes> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml()) def test_update_instance_attribute(self): self.mock_env._push_cib_xml(""" <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="my-alert" path="/my/path" description="my description"> <instance_attributes id="my-alert-instance_attributes"> <nvpair id="my-alert-instance_attributes-instance" name="instance" value="value" /> </instance_attributes> </alert> </alerts> </configuration> </cib> """) cmd_alert.update_alert(self.mock_env, "my-alert", None, {"instance": "new_val"}, {}, None) assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="my-alert" path="/my/path" description="my description"> <instance_attributes id="my-alert-instance_attributes"> <nvpair id="my-alert-instance_attributes-instance" name="instance" value="new_val" /> </instance_attributes> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml()) def test_alert_doesnt_exist(self): self.mock_env._push_cib_xml(""" <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"/> </alerts> </configuration> </cib> """) assert_raise_library_error( lambda: cmd_alert.update_alert(self.mock_env, "unknown", "test", { }, {}, None), (Severities.ERROR, report_codes.CIB_ALERT_NOT_FOUND, { "alert": "unknown" }))
def setUp(self): self.env = LibraryEnvironment(mock.MagicMock(logging.Logger), MockLibraryReportProcessor()) self.create = partial(self.get_create(), self.env)
def test_get_cluster_conf_not_live(self, mock_get_local_cluster_conf): env = LibraryEnvironment( self.mock_logger, self.mock_reporter, cluster_conf_data="data" ) self.assertEqual("data", env.get_cluster_conf_data()) self.assertEqual(0, mock_get_local_cluster_conf.call_count)
def test_get_cluster_conf(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) facade_obj = env.get_cluster_conf() self.assertTrue(isinstance(facade_obj, ClusterConfFacade)) assert_xml_equal('<cluster/>', etree.tostring(facade_obj._config).decode())
class AddRecipientTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() cib = """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> </alert> </alerts> </configuration> </cib> """ self.mock_env = LibraryEnvironment(self.mock_log, self.mock_rep, cib_data=cib) def test_alert_not_found(self): assert_raise_library_error( lambda: cmd_alert.add_recipient(self.mock_env, "unknown", "recipient", {}, {}), (Severities.ERROR, report_codes.CIB_ALERT_NOT_FOUND, { "alert": "unknown" })) def test_value_not_defined(self): assert_raise_library_error( lambda: cmd_alert.add_recipient(self.mock_env, "unknown", "", {}, { }), (Severities.ERROR, report_codes.REQUIRED_OPTION_IS_MISSING, { "option_name": "value" })) def test_recipient_already_exists(self): assert_raise_library_error( lambda: cmd_alert.add_recipient(self.mock_env, "alert", "value1", {}, {}, recipient_id="alert-recipient"), (Severities.ERROR, report_codes.ID_ALREADY_EXISTS, { "id": "alert-recipient" })) def test_without_id(self): cmd_alert.add_recipient(self.mock_env, "alert", "value", {"attr1": "val1"}, { "attr2": "val2", "attr1": "val1" }) assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="alert-recipient-1" value="value"> <meta_attributes id="alert-recipient-1-meta_attributes" > <nvpair id="alert-recipient-1-meta_attributes-attr1" name="attr1" value="val1" /> <nvpair id="alert-recipient-1-meta_attributes-attr2" name="attr2" value="val2" /> </meta_attributes> <instance_attributes id="alert-recipient-1-instance_attributes" > <nvpair id="alert-recipient-1-instance_attributes-attr1" name="attr1" value="val1" /> </instance_attributes> </recipient> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml()) def test_with_id(self): cmd_alert.add_recipient(self.mock_env, "alert", "value", {"attr1": "val1"}, { "attr2": "val2", "attr1": "val1" }, recipient_id="my-recipient") assert_xml_equal( """ <cib validate-with="pacemaker-2.5"> <configuration> <alerts> <alert id="alert" path="path"> <recipient id="alert-recipient" value="value1"/> <recipient id="my-recipient" value="value"> <meta_attributes id="my-recipient-meta_attributes" > <nvpair id="my-recipient-meta_attributes-attr1" name="attr1" value="val1" /> <nvpair id="my-recipient-meta_attributes-attr2" name="attr2" value="val2" /> </meta_attributes> <instance_attributes id="my-recipient-instance_attributes" > <nvpair id="my-recipient-instance_attributes-attr1" name="attr1" value="val1" /> </instance_attributes> </recipient> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml())
def test_get_cluster_conf_not_live(self, mock_get_local_cluster_conf): env = LibraryEnvironment(self.mock_logger, self.mock_reporter, cluster_conf_data="data") self.assertEqual("data", env.get_cluster_conf_data()) self.assertEqual(0, mock_get_local_cluster_conf.call_count)
class CreateAlertTest(TestCase): def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_run = mock.MagicMock(spec_set=CommandRunner) self.mock_rep = MockLibraryReportProcessor() self.mock_env = LibraryEnvironment(self.mock_log, self.mock_rep, cib_data="<cib/>") def test_no_path(self, mock_ensure_cib_version): assert_raise_library_error( lambda: cmd_alert.create_alert(self.mock_env, None, None, None, None), (Severities.ERROR, report_codes.REQUIRED_OPTION_IS_MISSING, { "option_name": "path" })) mock_ensure_cib_version.assert_not_called() def test_upgrade_needed(self, mock_ensure_cib_version): original_cib_xml = """ <cib validate-with="pacemaker-2.4.1"> <configuration> </configuration> </cib> """ self.mock_env._push_cib_xml(original_cib_xml) mock_ensure_cib_version.return_value = etree.XML(""" <cib validate-with="pacemaker-2.5.0"> <configuration> </configuration> </cib> """) cmd_alert.create_alert(self.mock_env, "my-alert", "/my/path", { "instance": "value", "another": "val" }, {"meta1": "val1"}, "my description") assert_xml_equal( """ <cib validate-with="pacemaker-2.5.0"> <configuration> <alerts> <alert id="my-alert" path="/my/path" description="my description"> <meta_attributes id="my-alert-meta_attributes"> <nvpair id="my-alert-meta_attributes-meta1" name="meta1" value="val1" /> </meta_attributes> <instance_attributes id="my-alert-instance_attributes"> <nvpair id="my-alert-instance_attributes-another" name="another" value="val" /> <nvpair id="my-alert-instance_attributes-instance" name="instance" value="value" /> </instance_attributes> </alert> </alerts> </configuration> </cib> """, self.mock_env._get_cib_xml()) self.assertEqual(1, mock_ensure_cib_version.call_count)
def test_is_cman_cluster(self, mock_is_cman): mock_is_cman.return_value = True env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertTrue(env.is_cman_cluster) self.assertTrue(env.is_cman_cluster) self.assertEqual(1, mock_is_cman.call_count)
def setUp(self): self.mock_log = mock.MagicMock(spec_set=logging.Logger) self.mock_rep = MockLibraryReportProcessor() self.mock_env = LibraryEnvironment(self.mock_log, self.mock_rep)
def test_usergroups_not_set(self): env = LibraryEnvironment(self.mock_logger, self.mock_reporter) self.assertEqual([], env.user_groups)