def test_more_nodes(self): self.assert_maintenance_none() self.assert_pcs_success( "node maintenance rh7-1 rh7-2" ) self.assert_pcs_success( "node attribute", outdent( """\ Node Attributes: rh7-1: maintenance=on rh7-2: maintenance=on """ ) ) self.fixture_maintenance_all() self.assert_pcs_success( "node unmaintenance rh7-1 rh7-2" ) self.assert_pcs_success( "node attribute", outdent( """\ Node Attributes: rh7-3: maintenance=on """ ) )
def test_more_nodes(self): self.assert_standby_none() self.assert_pcs_success( "node standby rh7-1 rh7-2" ) self.assert_pcs_success( "node attribute", outdent( """\ Node Attributes: rh7-1: standby=on rh7-2: standby=on """ ) ) self.fixture_standby_all() self.assert_pcs_success( "node unstandby rh7-1 rh7-2" ) self.assert_pcs_success( "node attribute", outdent( """\ Node Attributes: rh7-3: standby=on """ ) )
def _test_multiple(self): self.assert_pcs_success( "alert config", outdent("""\ Alerts: No alerts defined """ ) ) self.assert_pcs_success("alert create path=test id=alert1") self.assert_pcs_success("alert create path=test id=alert2") self.assert_pcs_success("alert create path=test id=alert3") self.assert_pcs_success( "alert config", outdent("""\ Alerts: Alert: alert1 (path=test) Alert: alert2 (path=test) Alert: alert3 (path=test) """ ) ) self.assert_pcs_success(f"alert {self.command} alert1 alert3") self.assert_pcs_success( "alert config", outdent("""\ Alerts: Alert: alert2 (path=test) """ ) )
def test_returns_the_same_text_when_not_indented(self): text = "\n".join([ "first line", " second line", " third line", ]) self.assertEqual(text, outdent(text))
def test_success_remove_nodes_votes(self): config = open(rc("corosync-3nodes.conf")).read() config_votes = config.replace("node {", "node {\nquorum_votes: 2") facade = lib.ConfigFacade.from_string(config_votes) facade.add_quorum_device( "net", {"host": "127.0.0.1", "algorithm": "lms"}, {}, {} ) ac( config.replace( " provider: corosync_votequorum\n", outdent("""\ provider: corosync_votequorum device { model: net net { algorithm: lms host: 127.0.0.1 } } """) ), facade.config.export() ) self.assertFalse(facade.need_stopped_cluster) self.assertFalse(facade.need_qdevice_reload)
def test_success_net_minimal_ffsplit(self): config = open(rc("corosync-3nodes.conf")).read() facade = lib.ConfigFacade.from_string(config) facade.add_quorum_device( "net", {"host": "127.0.0.1", "algorithm": "ffsplit"}, {}, {} ) ac( config.replace( " provider: corosync_votequorum\n", outdent("""\ provider: corosync_votequorum device { model: net votes: 1 net { algorithm: ffsplit host: 127.0.0.1 } } """) ), facade.config.export() ) self.assertFalse(facade.need_stopped_cluster) self.assertFalse(facade.need_qdevice_reload)
def test_all(self): self.assert_pcs_success( """ resource bundle create B1 container docker image=pcs:test promoted-max=2 replicas=4 options='a b c' network host-interface=eth0 host-netmask=24 control-port=12345 port-map id=B1-port-map-1001 internal-port=2002 port=2000 port-map range=3000-3300 storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b storage-map id=my-storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b meta target-role=Stopped is-managed=false """ ) self.assert_pcs_success( "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops" ) self.assert_pcs_success("resource config B1", outdent( """\ Bundle: B1 Docker: image=pcs:test options="a b c" promoted-max=2 replicas=4 Network: control-port=12345 host-interface=eth0 host-netmask=24 Port Mapping: internal-port=2002 port=2000 (B1-port-map-1001) range=3000-3300 (B1-port-map-3000-3300) Storage Mapping: source-dir=/tmp/docker1a target-dir=/tmp/docker1b (B1-storage-map) source-dir=/tmp/docker2a target-dir=/tmp/docker2b (my-storage-map) Meta Attrs: is-managed=false target-role=Stopped Resource: A (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (A-monitor-interval-10s) """ ))
def fixture_nolive_remove_report(host_list): return outdent( """\ Running action(s) 'pacemaker_remote disable', 'pacemaker_remote stop' on {hosts} was skipped because the command does not run on a live cluster (e.g. -f was used). Please, run the action(s) manually. Removing 'pacemaker authkey' from {hosts} was skipped because the command does not run on a live cluster (e.g. -f was used). Please, remove the file(s) manually. """ ).format(hosts=", ".join("'{0}'".format(host) for host in host_list))
def fixture_nolive_add_report(node_name): return outdent(f"""\ Unable to check if there is a conflict with nodes set in corosync because the command does not run on a live cluster (e.g. -f was used) Distribution of 'pacemaker authkey' to '{node_name}' was skipped because the command does not run on a live cluster (e.g. -f was used). Please, distribute the file(s) manually. Running action(s) 'pacemaker_remote enable', 'pacemaker_remote start' on '{node_name}' was skipped because the command does not run on a live cluster (e.g. -f was used). Please, run the action(s) manually. """ )
def _test_usage(self): self.assert_pcs_fail( f"alert recipient {self.command}", stdout_start=outdent(f""" Usage: pcs alert <command> recipient {self.command} <""") )
def fixture_logger_call_debug_data(url, data): send_msg = outdent("""\ Communication debug info for calling: {url} --Debug Communication Info Start-- {data} --Debug Communication Info End--""" ) return mock.call.debug(send_msg.format(url=url, data=data))
def test_device_not_defined(self, mock_sbd_config, mock_config_exists): mock_config_exists.return_value = True mock_sbd_config.return_value = outdent(""" SBD_WATCHDOG=/dev/watchdog SBD_WATCHDOG_TIMEOUT=10 """) self.assertEqual([], lib_sbd.get_local_sbd_device_list()) mock_config_exists.assert_called_once_with(settings.sbd_config) mock_sbd_config.assert_called_once_with()
def test_minimal(self): self.assert_pcs_success( "resource bundle create B1 container docker image=pcs:test" ) self.assert_pcs_success("resource config B1", outdent( """\ Bundle: B1 Docker: image=pcs:test """ ))
def _test_multiple(self): self.assert_pcs_success("alert create path=test id=alert1") self.assert_pcs_success("alert create path=test id=alert2") self.assert_pcs_success( "alert recipient add alert1 value=rec_value1 id=rec1" ) self.assert_pcs_success( "alert recipient add alert1 value=rec_value2 id=rec2" ) self.assert_pcs_success( "alert recipient add alert2 value=rec_value3 id=rec3" ) self.assert_pcs_success( "alert recipient add alert2 value=rec_value4 id=rec4" ) self.assert_pcs_success( "alert config", outdent("""\ Alerts: Alert: alert1 (path=test) Recipients: Recipient: rec1 (value=rec_value1) Recipient: rec2 (value=rec_value2) Alert: alert2 (path=test) Recipients: Recipient: rec3 (value=rec_value3) Recipient: rec4 (value=rec_value4) """ ) ) self.assert_pcs_success( f"alert recipient {self.command} rec1 rec2 rec4" ) self.assert_pcs_success( "alert config", outdent("""\ Alerts: Alert: alert1 (path=test) Alert: alert2 (path=test) Recipients: Recipient: rec3 (value=rec_value3) """ ) )
def _test_success_remove_by_node_name(self): self.fixture_remote_node() self.assert_effect( f"cluster node {self.command} NODE-NAME", "<resources/>", fixture_nolive_remove_report(["NODE-NAME"]) + outdent( """\ Deleting Resource - NODE-NAME """ ) )
def test_more_errors(self): self.assert_pcs_fail_regardless_of_force( "resource bundle create B#1 container docker replicas=x", outdent( """\ Error: invalid bundle name 'B#1', '#' is not a valid character for a bundle name Error: required container option 'image' is missing Error: 'x' is not a valid replicas value, use a positive integer """ ) )
def _test_no_recipient(self): self.assert_pcs_success("alert create path=test id=alert1") self.assert_pcs_success( "alert recipient add alert1 value=rec_value1 id=rec1" ) self.assert_pcs_fail( f"alert recipient {self.command} rec1 rec2 rec3", outdent("""\ Error: recipient 'rec2' does not exist Error: recipient 'rec3' does not exist """ ) ) self.assert_pcs_success( "alert config", outdent("""\ Alerts: Alert: alert1 (path=test) Recipients: Recipient: rec1 (value=rec_value1) """ ) )
def test_very_ugly_indented_text(self): self.assertEqual( """\ Cluster Name: test99 Options: """, outdent("""\ Cluster Name: test99 Options: """ ) )
def test_meta(self): self.assert_pcs_success(""" resource bundle create B1 container docker image=pcs:test --disabled """) self.assert_pcs_success("resource config B1", outdent( """\ Bundle: B1 Docker: image=pcs:test Meta Attrs: target-role=Stopped """ ))
def assert_maintenance_all(self): self.assert_pcs_success( "node attribute", outdent( """\ Node Attributes: rh7-1: maintenance=on rh7-2: maintenance=on rh7-3: maintenance=on """ ) )
def assert_standby_all(self): self.assert_pcs_success( "node attribute", outdent( """\ Node Attributes: rh7-1: standby=on rh7-2: standby=on rh7-3: standby=on """ ) )
def test_multiple_devices(self, mock_sbd_config, mock_config_exists): mock_config_exists.return_value = True mock_sbd_config.return_value = outdent(""" SBD_WATCHDOG=/dev/watchdog SBD_WATCHDOG_TIMEOUT=10 SBD_DEVICE="/dev/vda;/dev/sda" """) self.assertEqual( ["/dev/vda", "/dev/sda"], lib_sbd.get_local_sbd_device_list() ) mock_config_exists.assert_called_once_with(settings.sbd_config) mock_sbd_config.assert_called_once_with()
def test_remove_the_smallest_indentation(self): self.assertEqual( "\n".join([ " first line", "second line", " third line", ]), outdent("\n".join([ " first line", " second line", " third line", ])) )
def _test_one(self): self.assert_pcs_success("alert create path=test") self.assert_pcs_success( "alert recipient add alert value=rec_value id=rec" ) self.assert_pcs_success( "alert config", outdent("""\ Alerts: Alert: alert (path=test) Recipients: Recipient: rec (value=rec_value) """ ) ) self.assert_pcs_success(f"alert recipient {self.command} rec") self.assert_pcs_success( "alert config", outdent("""\ Alerts: Alert: alert (path=test) """ ) )
def test_one_device(self): self.config.runner.sbd.list_watchdogs(outdent(""" Discovered 1 watchdog devices: [1] /dev/watchdog Identity: i6300ESB timer Driver: <unknown> """)) self.assertEqual( { "/dev/watchdog": _watchdog_fixture("i6300ESB timer", None) }, get_local_available_watchdogs(self.env_assist.get_env()) )
def quorum_status( self, node_list=None, stdout=None, stderr="", returncode=0, name="runner.corosync.quorum_status", ): if bool(node_list) == bool(stdout): raise AssertionError( "Exactly one of 'node_list', 'stdout' must be specified" ) if node_list: stdout = outdent("""\ Quorum information ------------------ Date: Fri Jan 16 13:03:28 2015 Quorum provider: corosync_votequorum Nodes: {nodes_num} Node ID: 1 Ring ID: 19860 Quorate: Yes\n Votequorum information ---------------------- Expected votes: {nodes_num} Highest expected: {nodes_num} Total votes: {nodes_num} Quorum: {quorum_num} Flags: Quorate\n Membership information ---------------------- Nodeid Votes Qdevice Name {nodes}\ """).format( nodes_num=len(node_list), quorum_num=(len(node_list)//2)+1, nodes="".join([ _quorum_status_node_fixture(node_id, node) for node_id, node in enumerate(node_list, 1) ]) ) self.__calls.place( name, RunnerCall( "{binary} -p".format( binary=os.path.join( settings.corosync_binaries, "corosync-quorumtool" ), ), stdout=stdout, stderr=stderr, returncode=returncode ), )
def test_container(self): self.assert_pcs_success( """ resource bundle create B1 container docker image=pcs:test promoted-max=2 replicas=4 options='a b c' """ ) self.assert_pcs_success("resource config B1", outdent( """\ Bundle: B1 Docker: image=pcs:test options="a b c" promoted-max=2 replicas=4 """ ))
def test_success(self, mock_is_systemctl): mock_is_systemctl.return_value = False self.mock_runner.run.return_value = (outdent( """\ pcsd 0:off 1:off 2:on 3:on 4:on 5:on 6:off sbd 0:off 1:on 2:on 3:on 4:on 5:on 6:off pacemaker 0:off 1:off 2:off 3:off 4:off 5:off 6:off """ ), "", 0) self.assertEqual( lib.get_non_systemd_services(self.mock_runner), ["pcsd", "sbd", "pacemaker"] ) self.assertEqual(mock_is_systemctl.call_count, 1) self.mock_runner.run.assert_called_once_with([_chkconfig])
def test_warning_old_push(self): self.assert_pcs_success( "resource create dummy ocf:pacemaker:Dummy --no-default-ops", "Warning: Replacing the whole CIB instead of applying a diff, " "a race condition may happen if the CIB is pushed more than " "once simultaneously. To fix this, upgrade pacemaker to get " "crm_feature_set at least 3.0.9, current is 3.0.8.\n" ) self.assert_pcs_success( "resource config", outdent("""\ Resource: dummy (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (dummy-monitor-interval-10s) """) )
def test_network(self): self.assert_pcs_success( """ resource bundle create B1 container docker image=pcs:test network host-interface=eth0 host-netmask=24 control-port=12345 """ ) self.assert_pcs_success("resource config B1", outdent( """\ Bundle: B1 Docker: image=pcs:test Network: control-port=12345 host-interface=eth0 host-netmask=24 """ ))
def test_success_net_full(self): config = _read_file("corosync-3nodes.conf") facade = _get_facade(config) facade.add_quorum_device( "net", { "host": "127.0.0.1", "port": "4433", "algorithm": "ffsplit", "connect_timeout": "12345", "force_ip_version": "4", "tie_breaker": "lowest", }, { "timeout": "23456", "sync_timeout": "34567" }, { "mode": "on", "timeout": "5", "sync_timeout": "15", "interval": "30", "exec_ping": 'ping -q -c 1 "127.0.0.1"', "exec_ls": "test -f /tmp/test", }, ) ac( config.replace( " provider: corosync_votequorum\n", outdent("""\ provider: corosync_votequorum device { sync_timeout: 34567 timeout: 23456 model: net votes: 1 net { algorithm: ffsplit connect_timeout: 12345 force_ip_version: 4 host: 127.0.0.1 port: 4433 tie_breaker: lowest } heuristics { exec_ls: test -f /tmp/test exec_ping: ping -q -c 1 "127.0.0.1" interval: 30 mode: on sync_timeout: 15 timeout: 5 } } """), ), facade.config.export(), ) self.assertFalse(facade.need_stopped_cluster) self.assertFalse(facade.need_qdevice_reload)
def fixture_nolive_remove_report(host_list): return outdent("""\ Running action(s) 'pacemaker_remote disable', 'pacemaker_remote stop' on {hosts} was skipped because the command does not run on a live cluster (e.g. -f was used). Please, run the action(s) manually. Removing 'pacemaker authkey' from {hosts} was skipped because the command does not run on a live cluster (e.g. -f was used). Please, remove the file(s) manually. """).format(hosts=", ".join("'{0}'".format(host) for host in host_list))
def fixture_nolive_add_report(node_name): return outdent(f"""\ Unable to check if there is a conflict with nodes set in corosync because the command does not run on a live cluster (e.g. -f was used) Distribution of 'pacemaker authkey' to '{node_name}' was skipped because the command does not run on a live cluster (e.g. -f was used). Please, distribute the file(s) manually. Running action(s) 'pacemaker_remote enable', 'pacemaker_remote start' on '{node_name}' was skipped because the command does not run on a live cluster (e.g. -f was used). Please, run the action(s) manually. """)
class PcsConfigTagsTest(TestTagMixin, TestCase): config_template = dedent("""\ Cluster Name: test99 Corosync Nodes: rh7-1 rh7-2 {pacemaker_nodes} {resources} {stonith_devices}{fencing_levels}{constraints} Alerts: No alerts defined Resources Defaults: No defaults set Operations Defaults: No defaults set Cluster Properties: {tags} Quorum: Options: """) empty_pacemaker_nodes = "Pacemaker Nodes:" empty_resources = "\nResources:\n" empty_stonith_devices = "Stonith Devices:\n" empty_fencing_levels = "Fencing Levels:\n" empty_constraints = outdent(""" Location Constraints: Ordering Constraints: Colocation Constraints: Ticket Constraints: """) empty_tags = outdent(""" Tags: No tags defined """) expected_pacemaker_nodes = outdent("""\ Pacemaker Nodes: rh-1 rh-2 """) expected_resources = outdent( # pylint: disable=line-too-long """\ Resources: Resource: not-in-tags (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (not-in-tags-monitor-interval-10s) Resource: x1 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (x1-monitor-interval-10s) Resource: x2 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (x2-monitor-interval-10s) Resource: x3 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (x3-monitor-interval-10s) Resource: y1 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (y1-monitor-interval-10s) Clone: y2-clone Resource: y2 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (y2-monitor-interval-10s) """) expected_stonith_devices = outdent("""\ Stonith Devices: Resource: fence-rh-1 (class=stonith type=fence_xvm) Operations: monitor interval=60s (fence-rh-1-monitor-interval-60s) Resource: fence-rh-2 (class=stonith type=fence_xvm) Operations: monitor interval=60s (fence-rh-2-monitor-interval-60s) Resource: fence-kdump (class=stonith type=fence_kdump) Attributes: pcmk_host_list="rh-1 rh-2" Operations: monitor interval=60s (fence-kdump-monitor-interval-60s) """) expected_fencing_levels = outdent("""\ Fencing Levels: Target: rh-1 Level 1 - fence-kdump Level 2 - fence-rh-1 Target: rh-2 Level 1 - fence-kdump Level 2 - fence-rh-2 """) expected_tags = outdent(""" Tags: tag1 x1 x2 x3 tag2 y1 x2 tag3 y2-clone tag-mixed-stonith-devices-and-resources fence-rh-2 y1 fence-rh-1 x3 """) expected_constraints = outdent(""" Location Constraints: Resource: x1 Enabled on: Node: rh7-1 (score:INFINITY) (id:cx1) Resource: x2 Enabled on: Node: rh7-1 (score:INFINITY) (id:cx2) Ordering Constraints: Colocation Constraints: Ticket Constraints: """) def setUp(self): super().setUp() self.pcs_runner.mock_settings = { "corosync_conf_file": rc("corosync.conf") } def fixture_expected_config( self, constraints=empty_constraints, pacemaker_nodes=empty_pacemaker_nodes, resources=empty_resources, stonith_devices=empty_stonith_devices, fencing_levels=empty_fencing_levels, tags=empty_tags, ): return self.config_template.format( constraints=constraints, pacemaker_nodes=pacemaker_nodes, resources=resources, stonith_devices=stonith_devices, fencing_levels=fencing_levels, tags=tags, ) def test_config_no_tags(self): write_file_to_tmpfile(empty_cib, self.temp_cib) self.pcs_runner.mock_settings = { "corosync_conf_file": rc("corosync.conf") } self.assert_pcs_success(["config"], self.fixture_expected_config()) def test_config_tags_defined(self): self.assert_pcs_success( ["config"], self.fixture_expected_config( constraints=self.expected_constraints, pacemaker_nodes=self.expected_pacemaker_nodes, resources=self.expected_resources, stonith_devices=self.expected_stonith_devices, fencing_levels=self.expected_fencing_levels, tags=self.expected_tags, ), )
def _test_usage(self): self.assert_pcs_fail(f"booth {self.command} a b", stdout_start=outdent(f""" Usage: pcs booth <command> {self.command} """))
def test_full_2(self): string = outdent( """\ # Please read the corosync.conf.5 manual page totem { version: 2 crypto_cipher: none crypto_hash: none interface { ringnumber: 0 bindnetaddr: 10.16.35.0 mcastport: 5405 ttl: 1 } transport: udpu } logging { fileline: off to_logfile: yes to_syslog: yes logfile: /var/log/cluster/corosync.log debug: off timestamp: on logger_subsys { subsys: QUORUM debug: off } } nodelist { node { ring0_addr: 10.16.35.101 nodeid: 1 } node { ring0_addr: 10.16.35.102 nodeid: 2 } node { ring0_addr: 10.16.35.103 } node { ring0_addr: 10.16.35.104 } node { ring0_addr: 10.16.35.105 } } quorum { # Enable and configure quorum subsystem (default: off) # see also corosync.conf.5 and votequorum.5 #provider: corosync_votequorum } """ ) parsed = outdent( """\ totem { version: 2 crypto_cipher: none crypto_hash: none transport: udpu interface { ringnumber: 0 bindnetaddr: 10.16.35.0 mcastport: 5405 ttl: 1 } } logging { fileline: off to_logfile: yes to_syslog: yes logfile: /var/log/cluster/corosync.log debug: off timestamp: on logger_subsys { subsys: QUORUM debug: off } } nodelist { node { ring0_addr: 10.16.35.101 nodeid: 1 } node { ring0_addr: 10.16.35.102 nodeid: 2 } node { ring0_addr: 10.16.35.103 } node { ring0_addr: 10.16.35.104 } node { ring0_addr: 10.16.35.105 } } quorum { } """ ) self.assertEqual(str(config_parser.parse_string(string)), parsed)
def test_env(self, mock_popen): expected_stdout = "expected output" expected_stderr = "expected stderr" expected_retval = 123 command = ["a_command"] command_str = "a_command" mock_process = mock.MagicMock(spec_set=["communicate", "returncode"]) mock_process.communicate.return_value = ( expected_stdout, expected_stderr, ) mock_process.returncode = expected_retval mock_popen.return_value = mock_process global_env = {"a": "a", "b": "b"} runner = lib.CommandRunner( self.mock_logger, self.mock_reporter, global_env.copy() ) # {C} is for check that no python template conflict appear real_stdout, real_stderr, real_retval = runner.run( command, env_extend={"b": "B", "c": "{C}"} ) # check that env_exted did not affect initial env of runner # pylint: disable=protected-access self.assertEqual(runner._env_vars, global_env) self.assertEqual(real_stdout, expected_stdout) self.assertEqual(real_stderr, expected_stderr) self.assertEqual(real_retval, expected_retval) mock_process.communicate.assert_called_once_with(None) self.assert_popen_called_with( mock_popen, command, {"env": {"a": "a", "b": "B", "c": "{C}"}, "stdin": DEVNULL,}, ) logger_calls = [ mock.call( outdent( """\ Running: {0} Environment: a=a b=B c={1}""" ).format(command_str, "{C}") ), mock.call( outdent( """\ Finished running: {0} Return value: {1} --Debug Stdout Start-- {2} --Debug Stdout End-- --Debug Stderr Start-- {3} --Debug Stderr End--""" ).format( command_str, expected_retval, expected_stdout, expected_stderr, ) ), ] self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls)) self.mock_logger.debug.assert_has_calls(logger_calls) assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.DEBUG, report_codes.RUN_EXTERNAL_PROCESS_STARTED, { "command": command_str, "stdin": None, "environment": {"a": "a", "b": "B", "c": "{C}"}, }, ), ( severity.DEBUG, report_codes.RUN_EXTERNAL_PROCESS_FINISHED, { "command": command_str, "return_value": expected_retval, "stdout": expected_stdout, "stderr": expected_stderr, }, ), ], )
def test_section_del(self): root = config_parser.Section("") child1 = config_parser.Section("child1") child2 = config_parser.Section("child2") childa1 = config_parser.Section("childA") childa2 = config_parser.Section("childA") childa3 = config_parser.Section("childA") childa4 = config_parser.Section("childA") childb1 = config_parser.Section("childB") childb2 = config_parser.Section("childB") childa1.add_attribute("id", "1") childa2.add_attribute("id", "2") childa3.add_attribute("id", "3") childa4.add_attribute("id", "4") childb1.add_attribute("id", "5") childb2.add_attribute("id", "6") root.add_section(child1) root.add_section(child2) child1.add_section(childa1) child1.add_section(childa2) child1.add_section(childb1) child2.add_section(childa3) child2.add_section(childb2) child2.add_section(childa4) self.assertEqual( str(root), outdent( """\ child1 { childA { id: 1 } childA { id: 2 } childB { id: 5 } } child2 { childA { id: 3 } childB { id: 6 } childA { id: 4 } } """ ), ) child2.del_section(childb2) self.assertEqual(childb2.parent, None) self.assertEqual( str(root), outdent( """\ child1 { childA { id: 1 } childA { id: 2 } childB { id: 5 } } child2 { childA { id: 3 } childA { id: 4 } } """ ), ) root.del_section(child2) self.assertEqual(child2.parent, None) self.assertEqual( str(root), outdent( """\ child1 { childA { id: 1 } childA { id: 2 } childB { id: 5 } } """ ), ) self.assertRaises(ValueError, root.del_section, child2) self.assertEqual(childa1.parent.name, "child1") self.assertRaises(ValueError, child2.del_section, childa1) self.assertEqual(childa1.parent.name, "child1") child1.del_section(childb1) self.assertEqual(childb1.parent, None) self.assertEqual( str(root), outdent( """\ child1 { childA { id: 1 } childA { id: 2 } } """ ), ) child1.del_section(childa1) self.assertEqual(childa1.parent, None) child1.del_section(childa2) self.assertEqual(childa2.parent, None) self.assertEqual( str(root), outdent( """\ child1 { } """ ), ) root.del_section(child1) self.assertEqual(child1.parent, None) self.assertEqual(str(root), "")
def test_section_get(self): root = config_parser.Section("") child1 = config_parser.Section("child1") child2 = config_parser.Section("child2") childa1 = config_parser.Section("childA") childa2 = config_parser.Section("childA") childa3 = config_parser.Section("childA") childa4 = config_parser.Section("childA") childb1 = config_parser.Section("childB") childb2 = config_parser.Section("childB") childa1.add_attribute("id", "1") childa2.add_attribute("id", "2") childa3.add_attribute("id", "3") childa4.add_attribute("id", "4") childb1.add_attribute("id", "5") childb2.add_attribute("id", "6") root.add_section(child1) root.add_section(child2) child1.add_section(childa1) child1.add_section(childa2) child1.add_section(childb1) child2.add_section(childa3) child2.add_section(childb2) child2.add_section(childa4) self.assertEqual( str(root), outdent( """\ child1 { childA { id: 1 } childA { id: 2 } childB { id: 5 } } child2 { childA { id: 3 } childB { id: 6 } childA { id: 4 } } """ ), ) self.assertEqual( "---\n".join([str(x) for x in root.get_sections()]), outdent( """\ child1 { childA { id: 1 } childA { id: 2 } childB { id: 5 } } --- child2 { childA { id: 3 } childB { id: 6 } childA { id: 4 } } """ ), ) self.assertEqual( "---\n".join([str(x) for x in root.get_sections("child1")]), outdent( """\ child1 { childA { id: 1 } childA { id: 2 } childB { id: 5 } } """ ), ) self.assertEqual( "---\n".join([str(x) for x in child1.get_sections("childA")]), outdent( """\ childA { id: 1 } --- childA { id: 2 } """ ), ) self.assertEqual( "---\n".join([str(x) for x in child1.get_sections("child2")]), "" )
def test_str(self): root = config_parser.Section("root") self.assertEqual(str(root), "") root.add_attribute("name1", "value1") self.assertEqual(str(root), "name1: value1\n") root.add_attribute("name2", "value2") root.add_attribute("name2", "value2a") root.add_attribute("name3", "value3") self.assertEqual( str(root), outdent( """\ name1: value1 name2: value2 name2: value2a name3: value3 """ ), ) child1 = config_parser.Section("child1") root.add_section(child1) self.assertEqual( str(root), outdent( """\ name1: value1 name2: value2 name2: value2a name3: value3 child1 { } """ ), ) child1.add_attribute("name1.1", "value1.1") child1.add_attribute("name1.2", "value1.2") self.assertEqual( str(root), outdent( """\ name1: value1 name2: value2 name2: value2a name3: value3 child1 { name1.1: value1.1 name1.2: value1.2 } """ ), ) child2 = config_parser.Section("child2") child2.add_attribute("name2.1", "value2.1") root.add_section(child2) self.assertEqual( str(root), outdent( """\ name1: value1 name2: value2 name2: value2a name3: value3 child1 { name1.1: value1.1 name1.2: value1.2 } child2 { name2.1: value2.1 } """ ), ) child2a = config_parser.Section("child2a") child2a.add_attribute("name2.a.1", "value2.a.1") child2.add_section(child2a) self.assertEqual( str(root), outdent( """\ name1: value1 name2: value2 name2: value2a name3: value3 child1 { name1.1: value1.1 name1.2: value1.2 } child2 { name2.1: value2.1 child2a { name2.a.1: value2.a.1 } } """ ), ) child3 = config_parser.Section("child3") root.add_section(child3) child3.add_section(config_parser.Section("child3a")) child3.add_section(config_parser.Section("child3b")) self.assertEqual( str(root), outdent( """\ name1: value1 name2: value2 name2: value2a name3: value3 child1 { name1.1: value1.1 name1.2: value1.2 } child2 { name2.1: value2.1 child2a { name2.a.1: value2.a.1 } } child3 { child3a { } child3b { } } """ ), )
class PcsConfigTagsTest(TestTagMixin, TestCase): config_template = dedent( """\ Cluster Name: test99 Corosync Nodes: rh7-1 rh7-2 Pacemaker Nodes: {resources} Stonith Devices: Fencing Levels: {constraints} Alerts: No alerts defined Resources Defaults: No defaults set Operations Defaults: No defaults set Cluster Properties: {tags} Quorum: Options: """ ) empty_resources = "\nResources:\n" empty_constraints = outdent( """ Location Constraints: Ordering Constraints: Colocation Constraints: Ticket Constraints: """ ) empty_tags = outdent( """ Tags: No tags defined """ ) expected_resources = outdent( # pylint: disable=line-too-long """ Resources: Resource: not-in-tags (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (not-in-tags-monitor-interval-10s) Resource: x1 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (x1-monitor-interval-10s) Resource: x2 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (x2-monitor-interval-10s) Resource: x3 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (x3-monitor-interval-10s) Resource: y1 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (y1-monitor-interval-10s) Clone: y2-clone Resource: y2 (class=ocf provider=pacemaker type=Dummy) Operations: monitor interval=10s timeout=20s (y2-monitor-interval-10s) """ ) expected_tags = outdent( """ Tags: tag1 x1 x2 x3 tag2 y1 x2 tag3 y2-clone """ ) expected_constraints = outdent( """ Location Constraints: Resource: x1 Enabled on: Node: rh7-1 (score:INFINITY) (id:cx1) Resource: x2 Enabled on: Node: rh7-1 (score:INFINITY) (id:cx2) Ordering Constraints: Colocation Constraints: Ticket Constraints: """ ) def setUp(self): super(PcsConfigTagsTest, self).setUp() self.pcs_runner.mock_settings = { "corosync_conf_file": rc("corosync.conf") } def fixture_expected_config( self, constraints=empty_constraints, resources=empty_resources, tags=empty_tags, ): return self.config_template.format( constraints=constraints, resources=resources, tags=tags, ) def test_config_no_tags(self): write_file_to_tmpfile(empty_cib, self.temp_cib) self.pcs_runner.mock_settings = { "corosync_conf_file": rc("corosync.conf") } self.assert_pcs_success("config", self.fixture_expected_config()) def test_config_tags_defined(self): self.assert_pcs_success( "config", self.fixture_expected_config( constraints=self.expected_constraints, resources=self.expected_resources, tags=self.expected_tags, ), )
def test_sections_unexpected_closing_brace(self): string = outdent("""\ } """) self.assertRaises(config_parser.UnexpectedClosingBraceException, config_parser.parse_string, string)
def test_sections_missing_closing_brace(self): string = outdent("""\ section1 { """) self.assertRaises(config_parser.MissingClosingBraceException, config_parser.parse_string, string)
def test_full_1(self): # pylint: disable=line-too-long string = outdent( """\ # Please read the corosync.conf.5 manual page totem { version: 2 # crypto_cipher and crypto_hash: Used for mutual node authentication. # If you choose to enable this, then do remember to create a shared # secret with "corosync-keygen". # enabling crypto_cipher, requires also enabling of crypto_hash. crypto_cipher: none crypto_hash: none # interface: define at least one interface to communicate # over. If you define more than one interface stanza, you must # also set rrp_mode. interface { # Rings must be consecutively numbered, starting at 0. ringnumber: 0 # This is normally the *network* address of the # interface to bind to. This ensures that you can use # identical instances of this configuration file # across all your cluster nodes, without having to # modify this option. bindnetaddr: 192.168.1.0 # However, if you have multiple physical network # interfaces configured for the same subnet, then the # network address alone is not sufficient to identify # the interface Corosync should bind to. In that case, # configure the *host* address of the interface # instead: # bindnetaddr: 192.168.1.1 # When selecting a multicast address, consider RFC # 2365 (which, among other things, specifies that # 239.255.x.x addresses are left to the discretion of # the network administrator). Do not reuse multicast # addresses across multiple Corosync clusters sharing # the same network. mcastaddr: 239.255.1.1 # Corosync uses the port you specify here for UDP # messaging, and also the immediately preceding # port. Thus if you set this to 5405, Corosync sends # messages over UDP ports 5405 and 5404. mcastport: 5405 # Time-to-live for cluster communication packets. The # number of hops (routers) that this ring will allow # itself to pass. Note that multicast routing must be # specifically enabled on most network routers. ttl: 1 } } logging { # Log the source file and line where messages are being # generated. When in doubt, leave off. Potentially useful for # debugging. fileline: off # Log to standard error. When in doubt, set to no. Useful when # running in the foreground (when invoking "corosync -f") to_stderr: no # Log to a log file. When set to "no", the "logfile" option # must not be set. to_logfile: yes logfile: /var/log/cluster/corosync.log # Log to the system log daemon. When in doubt, set to yes. to_syslog: yes # Log debug messages (very verbose). When in doubt, leave off. debug: off # Log messages with time stamps. When in doubt, set to on # (unless you are only logging to syslog, where double # timestamps can be annoying). timestamp: on logger_subsys { subsys: QUORUM debug: off } } quorum { # Enable and configure quorum subsystem (default: off) # see also corosync.conf.5 and votequorum.5 #provider: corosync_votequorum } """ ) parsed = outdent( """\ totem { version: 2 crypto_cipher: none crypto_hash: none interface { ringnumber: 0 bindnetaddr: 192.168.1.0 mcastaddr: 239.255.1.1 mcastport: 5405 ttl: 1 } } logging { fileline: off to_stderr: no to_logfile: yes logfile: /var/log/cluster/corosync.log to_syslog: yes debug: off timestamp: on logger_subsys { subsys: QUORUM debug: off } } quorum { } """ ) self.assertEqual(str(config_parser.parse_string(string)), parsed)
def test_different_responses(self): node_name_list = ["node-1", "node-2", "node-3", "node-4", "node-5"] ( self.config.env.set_known_nodes(node_name_list) .corosync_conf.load( node_name_list=node_name_list, auto_tie_breaker=True, ) .http.add_communication( "get_sbd_config", [ dict( label="node-1", output=outdent( """\ # This file has been generated by pcs. SBD_DELAY_START=no SBD_OPTS="-n node-1" SBD_PACEMAKER=yes SBD_STARTMODE=always SBD_WATCHDOG_DEV=/dev/watchdog SBD_WATCHDOG_TIMEOUT=5 """ ), response_code=200, ), dict( label="node-2", was_connected=False, errno=7, error_msg="Failed connect to node-2:2224;" " No route to host", ), dict( label="node-3", output="OPTION= value", response_code=200, ), dict( label="node-4", output="# just comment", response_code=200, ), dict( label="node-5", output="invalid value", response_code=200, ), ], action="remote/get_sbd_config", ) ) self.assertEqual( get_cluster_sbd_config(self.env_assist.get_env()), [ { "node": "node-1", "config": { "SBD_WATCHDOG_TIMEOUT": "5", "SBD_WATCHDOG_DEV": "/dev/watchdog", "SBD_PACEMAKER": "yes", "SBD_OPTS": '"-n node-1"', "SBD_STARTMODE": "always", "SBD_DELAY_START": "no", }, }, {"node": "node-3", "config": {"OPTION": "value",}}, {"node": "node-4", "config": {},}, {"node": "node-5", "config": {},}, {"node": "node-2", "config": None,}, ], ) self.env_assist.assert_reports( [ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-2", reason="Failed connect to node-2:2224; No route to host", command="remote/get_sbd_config", ), fixture.warn( report_codes.UNABLE_TO_GET_SBD_CONFIG, node="node-2", reason="", ), ] )
def test_stdin(self, mock_popen): expected_stdout = "expected output" expected_stderr = "expected stderr" expected_retval = 123 command = ["a_command"] command_str = "a_command" stdin = "stdin string" mock_process = mock.MagicMock(spec_set=["communicate", "returncode"]) mock_process.communicate.return_value = ( expected_stdout, expected_stderr, ) mock_process.returncode = expected_retval mock_popen.return_value = mock_process runner = lib.CommandRunner(self.mock_logger, self.mock_reporter) real_stdout, real_stderr, real_retval = runner.run( command, stdin_string=stdin ) self.assertEqual(real_stdout, expected_stdout) self.assertEqual(real_stderr, expected_stderr) self.assertEqual(real_retval, expected_retval) mock_process.communicate.assert_called_once_with(stdin) self.assert_popen_called_with( mock_popen, command, {"env": {}, "stdin": -1} ) logger_calls = [ mock.call( outdent( """\ Running: {0} Environment: --Debug Input Start-- {1} --Debug Input End--""" ).format(command_str, stdin) ), mock.call( outdent( """\ Finished running: {0} Return value: {1} --Debug Stdout Start-- {2} --Debug Stdout End-- --Debug Stderr Start-- {3} --Debug Stderr End--""" ).format( command_str, expected_retval, expected_stdout, expected_stderr, ) ), ] self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls)) self.mock_logger.debug.assert_has_calls(logger_calls) assert_report_item_list_equal( self.mock_reporter.report_item_list, [ ( severity.DEBUG, report_codes.RUN_EXTERNAL_PROCESS_STARTED, { "command": command_str, "stdin": stdin, "environment": dict(), }, ), ( severity.DEBUG, report_codes.RUN_EXTERNAL_PROCESS_FINISHED, { "command": command_str, "return_value": expected_retval, "stdout": expected_stdout, "stderr": expected_stderr, }, ), ], )