def fixture_reports_new_node_unreachable(node_name, omitting=False): if omitting: report = [ fixture.warn( report_codes.OMITTING_NODE, node=node_name, ), ] else: report = [ fixture.warn( report_codes.HOST_NOT_FOUND, host_list=[node_name], ), ] return report + [ fixture.info( report_codes.FILES_DISTRIBUTION_SKIPPED, reason_type="unreachable", file_list=["pacemaker authkey"], node_list=[node_name], ), fixture.info( report_codes.SERVICE_COMMANDS_ON_NODES_SKIPPED, reason_type="unreachable", action_list=[ "pacemaker_remote start", "pacemaker_remote enable" ], node_list=[node_name], ), ]
def test_reload_not_successful(self): self.corosync_conf_facade.need_qdevice_reload = True (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .http.corosync.reload_corosync_conf( communication_list=[ [ { "label": self.node_labels[0], "response_code": 200, "output": json.dumps( dict(code="not_running", message="not running") ), }, ], [ { "label": self.node_labels[1], "response_code": 200, "output": "not a json", }, ], ] ) ) self.env_assistant.assert_raise_library_error( lambda: self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ), [] ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-1" ), fixture.warn( report_codes.INVALID_RESPONSE_FORMAT, node="node-2" ), fixture.error( report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE ), ])
def test_set_stonith_watchdog_timeout_fails_on_some_nodes(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( communication_list=[ [{ "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, }], [{ "label": "rh7-2", "response_code": 400, "output": "FAILED", }], [{"label": "rh7-3"}] ] ) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="rh7-1", reason=err_msg, command="remote/set_stonith_watchdog_timeout_to_zero" ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="rh7-2", reason="FAILED", command="remote/set_stonith_watchdog_timeout_to_zero" ) ] + [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def test_can_skip_all_offline(self, generate_binary_key): generate_binary_key.return_value = b"password" (self.config .local.load_cib() .corosync_conf.load(node_name_list=[NODE_1, NODE_2]) .http.host.check_auth( communication_list=[ dict( label=NODE_NAME, dest_list=NODE_DEST_LIST, **FAIL_HTTP_KWARGS ) ], ) .local.authkey_exists(return_value=False) .local.distribute_authkey( communication_list=[ dict( label=NODE_1, dest_list=NODE_1_DEST_LIST, **FAIL_HTTP_KWARGS, ), dict( label=NODE_2, dest_list=NODE_2_DEST_LIST, **FAIL_HTTP_KWARGS, ), ], pcmk_authkey_content=generate_binary_key.return_value, ) .local.push_cib() ) node_add_guest(self.env_assist.get_env(), skip_offline_nodes=True) self.env_assist.assert_reports( fixture_reports_new_node_unreachable(NODE_NAME, omitting=True) + [ fixture.info( report_codes.FILES_DISTRIBUTION_STARTED, file_list=["pacemaker authkey"], node_list=[NODE_1, NODE_2], ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=NODE_1, command="remote/put_file", reason="Could not resolve host", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node=NODE_2, command="remote/put_file", reason="Could not resolve host", ), ])
def test_unknown_agent_forced(self): agent_name = "test_unknown" (self.config .runner.pcmk.load_agent( agent_name=f"stonith:{agent_name}", agent_is_missing=True, ) .runner.cib.load() .env.push_cib(resources=self._expected_cib(expected_cib_unknown)) ) self._create( self.env_assist.get_env(), "stonith-test", agent_name, operations=[], meta_attributes={}, instance_attributes={}, allow_absent_agent=True, ) self.env_assist.assert_reports([ fixture.warn( report_codes.UNABLE_TO_GET_AGENT_METADATA, agent="test_unknown", reason=( "Agent stonith:test_unknown not found or does not support " "meta-data: Invalid argument (22)\n" "Metadata query for stonith:test_unknown failed: " "Input/output error" ) ), ])
def test_some_node_names_missing(self): (self.config .env.set_known_nodes(["rh7-2"]) .corosync_conf.load(filename="corosync-some-node-names.conf") .http.add_communication( "get_sbd_config", [ dict( label="rh7-2", output="OPTION=value", response_code=200, ), ], action="remote/get_sbd_config", ) ) result = get_cluster_sbd_config(self.env_assist.get_env()) self.assertEqual( result, [ { "node": "rh7-2", "config": {"OPTION": "value"} }, ] ) self.env_assist.assert_reports([ fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ])
def test_success(self): self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1] ) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def test_all_node_names_missing(self): (self.config .corosync_conf.load( filename="corosync-no-node-names.conf", instead="corosync_conf.load" ) .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) ) self.env_assist.assert_raise_library_error( lambda: pcsd.synchronize_ssl_certificate(self.env_assist.get_env()), [] ) self.env_assist.assert_reports( [ fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), fixture.error( report_codes.COROSYNC_CONFIG_NO_NODES_DEFINED, ), ] )
def test_success_config_and_authfile_exists(self, pwd_mock, grp_mock): self._set_pwd_mock(pwd_mock) self._set_grp_mock(grp_mock) (self.config .fs.exists(self.config_path, True, instead="fs.exists") .fs.exists( self.authfile_path, True, name="fs.exists.authfile", instead="fs.exists.authfile", ) ) commands.pull_config(self.env_assist.get_env(), self.node_name) self.env_assist.assert_reports( self.report_list + [ fixture.warn( report_codes.FILE_ALREADY_EXISTS, node=None, file_role=role, file_path=path, ) for role, path in [ (file_roles.BOOTH_CONFIG, self.config_path), (file_roles.BOOTH_KEY, self.authfile_path) ] ] )
def test_all_node_names_missing(self): auth_file = "auth.file" auth_file_path = os.path.join(settings.booth_config_dir, auth_file) config_content = "authfile={}".format(auth_file_path) auth_file_content = b"auth" (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_content)(), name="open.conf" ) .fs.open( auth_file_path, mock.mock_open(read_data=auth_file_content)(), mode="rb", name="open.authfile", ) .corosync_conf.load(filename="corosync-no-node-names.conf") ) self.env_assist.assert_raise_library_error( lambda: commands.config_sync(self.env_assist.get_env(), self.name), [ fixture.error( report_codes.COROSYNC_CONFIG_NO_NODES_DEFINED, ), ] ) self.env_assist.assert_reports([ fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ])
def test_return_error_on_not_allowed_and_banned_names_forced(self): code = "force_code" assert_report_item_list_equal( validate.names_in( ["a", "b"], ["x", "a", "z", "c", "d"], banned_name_list=["x", "y", "z"], code_to_allow_extra_names=code, extra_names_allowed=True, ), [ fixture.warn( report_codes.INVALID_OPTIONS, option_names=["c", "d"], allowed=["a", "b"], option_type="option", allowed_patterns=[], ), fixture.error( report_codes.INVALID_OPTIONS, option_names=["x", "z"], allowed=["a", "b"], option_type="option", allowed_patterns=[], ), ] )
def test_node_addrs_unresolvable_forced(self): assert_report_item_list_equal( config_validators.add_nodes( [ # Duplicated addresses reported only once but they trigger # a duplicate addresses report. {"name": "node3", "addrs": ["addr03", "addrX2"]}, {"name": "node4", "addrs": ["addrX2", "addr14"]}, # Extra address reported as well, it triggres its own report # about being an extra address. {"name": "node5", "addrs": ["addr05", "addrX1", "addrX3"]}, ], self.fixture_coronodes_2_links, [], force_unresolvable=True ), [ fixture.error( report_codes.COROSYNC_BAD_NODE_ADDRESSES_COUNT, actual_count=3, min_count=2, max_count=2, node_name="node5", node_index=3 ), fixture.warn( report_codes.NODE_ADDRESSES_UNRESOLVABLE, address_list=["addrX1", "addrX2", "addrX3"] ), fixture.error( report_codes.NODE_ADDRESSES_DUPLICATION, address_list=["addrX2"] ), ] )
def test_set_stonith_watchdog_timeout_fails_on_all_nodes(self): err_msg = "Error" self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( communication_list=[ [dict(label=node, response_code=400, output=err_msg)] for node in self.node_list ] ) self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [], ) self.env_assist.assert_reports( [ fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node=node, reason=err_msg, command="remote/set_stonith_watchdog_timeout_to_zero" ) for node in self.node_list ] + [ fixture.error( report_codes.UNABLE_TO_PERFORM_OPERATION_ON_ANY_NODE, ) ] )
def test_authfile_not_in_booth_dir(self): config_file_content = "authfile=/etc/my_booth.conf" (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_file_content)(), name="open.conf" ) .corosync_conf.load() .http.booth.send_config( self.name, config_file_content, node_labels=self.node_list, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [ fixture.warn(report_codes.BOOTH_UNSUPPORTED_FILE_LOCATION), fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED) ] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in self.node_list ] )
def test_invalid_and_unsupported_options_forced(self): config = { "SBD_DELAY_START": "yes", "SBD_WATCHDOG_TIMEOUT": "5", "SBD_STARTMODE": "clean", "SBD_WATCHDOG_DEV": "/dev/watchdog", "SBD_UNKNOWN": "", "SBD_OPTS": " ", "SBD_PACEMAKER": "false", } assert_report_item_list_equal( cmd_sbd._validate_sbd_options(config, allow_unknown_opts=True), [ fixture.error( report_codes.INVALID_OPTIONS, option_names=sorted( ["SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"] ), option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], ), fixture.warn( report_codes.INVALID_OPTIONS, option_names=["SBD_UNKNOWN"], option_type=None, allowed=self.allowed_sbd_options, allowed_patterns=[], ), ] )
def push_full_forced_reports(version): return [ fixture.warn( report_codes.CIB_PUSH_FORCED_FULL_DUE_TO_CRM_FEATURE_SET, current_set=version, required_set="3.0.9" ) ]
def test_reload_corosync_not_running_anywhere(self): (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .http.corosync.reload_corosync_conf( communication_list=[ [ { "label": node, "response_code": 200, "output": json.dumps( dict(code="not_running", message="not running") ), }, ] for node in self.node_labels ] ) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-1" ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-2" ), ])
def test_success_node_offline_skip_offline(self): err_msg = "Failed connect to rh7-3:2224; No route to host" online_nodes_list = ["rh7-2", "rh7-3"] self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth( communication_list=[ { "label": "rh7-1", "was_connected": False, "errno": 7, "error_msg": err_msg, }, {"label": "rh7-2"}, {"label": "rh7-3"} ] ) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=online_nodes_list[:1] ) self.config.http.sbd.disable_sbd(node_labels=online_nodes_list) disable_sbd(self.env_assist.get_env(), ignore_offline_nodes=True) self.env_assist.assert_reports( [fixture.warn(report_codes.OMITTING_NODE, node="rh7-1")] + [fixture.info(report_codes.SBD_DISABLING_STARTED)] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in online_nodes_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def test_multiple_not_found_skip_offline(self): host = "host0" unknown_hosts = ["node0", "node1"] target_list = self.factory.get_target_list( [host] + unknown_hosts, skip_non_existing=True ) self.assert_equal_known_host_target( self.known_hosts[host], target_list[0] ) self.report_processor.assert_reports([ fixture.warn(report_codes.HOST_NOT_FOUND, host_list=unknown_hosts) ])
def test_no_host_found_skip_offline(self): unknown_hosts = ["node0", "node1"] report_list = [ fixture.warn(report_codes.HOST_NOT_FOUND, host_list=unknown_hosts), fixture.error(report_codes.NONE_HOST_FOUND) ] assert_raise_library_error( lambda: self.factory.get_target_list( unknown_hosts, skip_non_existing=True ), report_list[1] ) self.report_processor.assert_reports(report_list)
def test_some_node_names_missing(self): (self.config .env.set_known_nodes(["rh7-2"]) .corosync_conf.load(filename="corosync-some-node-names.conf") .http.add_communication( "check_sbd", [ dict( label="rh7-2", output=json.dumps({ "sbd":{ "installed": True, "enabled": False, "running":False }, "watchdog":{ "path":"", "exist":False }, "device_list":[] }), response_code=200, ), ], action="remote/check_sbd", param_list=[("watchdog", ""), ("device_list", "[]")], ) ) result = get_cluster_sbd_status(self.env_assist.get_env()) self.assertEqual( result, [ { "node": "rh7-2", "status": { "running": False, "enabled": False, "installed": True, } }, ] ) self.env_assist.assert_reports([ fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ])
def test_some_node_names_missing(self): self.corosync_conf_name = "corosync-some-node-names.conf" self.node_list = ["rh7-2"] self.config.corosync_conf.load(filename=self.corosync_conf_name) self.config.http.host.check_auth(node_labels=self.node_list) self.config.http.pcmk.set_stonith_watchdog_timeout_to_zero( node_labels=self.node_list[:1] ) self.config.http.sbd.disable_sbd(node_labels=self.node_list) disable_sbd(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), fixture.info(report_codes.SBD_DISABLING_STARTED) ] + [ fixture.info( report_codes.SERVICE_DISABLE_SUCCESS, service="sbd", node=node, instance=None ) for node in self.node_list ] + [ fixture.warn( report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES ) ] )
def test_all_node_names_missing(self): self.config.corosync_conf.load(filename="corosync-no-node-names.conf") self.env_assist.assert_raise_library_error( lambda: disable_sbd(self.env_assist.get_env()), [ fixture.error( report_codes.COROSYNC_CONFIG_NO_NODES_DEFINED, ), ] ) self.env_assist.assert_reports([ fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ])
def test_success_config_exists(self): self.config.fs.exists(self.config_path, True, instead="fs.exists") commands.pull_config(self.env_assist.get_env(), self.node_name) self.env_assist.assert_reports( self.report_list + [ fixture.warn( report_codes.FILE_ALREADY_EXISTS, node=None, file_role=file_roles.BOOTH_CONFIG, file_path=self.config_path, ), ] )
def test_some_nodes_unknown_forced(self): (self.config .env.set_known_nodes(self.node_names[1:]) .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) .http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=self.node_names[1:] ) ) pcsd.synchronize_ssl_certificate( self.env_assist.get_env(), skip_offline=True ) self.env_assist.assert_reports( [ fixture.warn( report_codes.HOST_NOT_FOUND, host_list=[self.node_names[0]] ), ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=self.node_names[1:] ) ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in self.node_names[1:] ] )
def test_invalid_value_forced(self): config = { "SBD_TIMEOUT_ACTION": "flush,noflush", } assert_report_item_list_equal( cmd_sbd._validate_sbd_options( config, allow_invalid_option_values=True ), [ fixture.warn( report_codes.INVALID_OPTION_VALUE, option_name="SBD_TIMEOUT_ACTION", option_value="flush,noflush", allowed_values=self.timeout_action_allowed_values, ), ] )
def test_some_node_names_missing(self): auth_file = "auth.file" auth_file_path = os.path.join(settings.booth_config_dir, auth_file) config_content = "authfile={}".format(auth_file_path) auth_file_content = b"auth" nodes = ["rh7-2"] (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_content)(), name="open.conf" ) .fs.open( auth_file_path, mock.mock_open(read_data=auth_file_content)(), mode="rb", name="open.authfile", ) .corosync_conf.load(filename="corosync-some-node-names.conf") .http.booth.send_config( self.name, config_content, authfile=auth_file, authfile_data=auth_file_content, node_labels=nodes, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [ fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED), fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in nodes ] )
def test_some_node_names_missing(self): nodes = ["rh7-2"] (self.config .corosync_conf.load( filename="corosync-some-node-names.conf", instead="corosync_conf.load" ) .fs.open( settings.pcsd_cert_location, mock.mock_open(read_data=self.pcsd_ssl_cert)(), name="fs.open.pcsd_ssl_cert" ) .fs.open( settings.pcsd_key_location, mock.mock_open(read_data=self.pcsd_ssl_key)(), name="fs.open.pcsd_ssl_key" ) .http.host.send_pcsd_cert( cert=self.pcsd_ssl_cert, key=self.pcsd_ssl_key, node_labels=nodes ) ) pcsd.synchronize_ssl_certificate(self.env_assist.get_env()) self.env_assist.assert_reports( [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_DISTRIBUTION_STARTED, node_name_list=nodes ), fixture.warn( report_codes.COROSYNC_CONFIG_MISSING_NAMES_OF_NODES, fatal=False, ), ] + [ fixture.info( report_codes.PCSD_SSL_CERT_AND_KEY_SET_SUCCESS, node=node, ) for node in nodes ] )
def test_authfile_not_accessible(self): auth_file = "auth.file" auth_file_path = os.path.join(settings.booth_config_dir, auth_file) config_content = "authfile={}".format(auth_file_path) (self.config .fs.open( self.config_path, mock.mock_open(read_data=config_content)(), name="open.conf" ) .fs.open( auth_file_path, mode="rb", name="open.authfile", side_effect=EnvironmentError(0, self.reason, auth_file_path), ) .corosync_conf.load() .http.booth.send_config( self.name, config_content, node_labels=self.node_list, ) ) commands.config_sync(self.env_assist.get_env(), self.name) self.env_assist.assert_reports( [ fixture.warn( report_codes.FILE_IO_ERROR, reason="{}: '{}'".format(self.reason, auth_file_path), file_role=file_roles.BOOTH_KEY, file_path=auth_file_path, operation="read", ), fixture.info(report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED) ] + [ fixture.info( report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, node=node, name_list=[self.name] ) for node in self.node_list ] )
def test_cib_not_available_forced(self): (self.config .corosync_conf.load_content(self.before) .runner.cib.load(stderr="an error", returncode=1) .env.push_corosync_conf(corosync_conf_text=self.after) ) cluster.add_link( self.env_assist.get_env(), self.node_addr_map, self.link_options, force_flags=[report_codes.FORCE] ) self.env_assist.assert_reports( [ fixture.warn( report_codes.CIB_LOAD_ERROR_GET_NODES_FOR_VALIDATION, ), ] )
def test_describe(self): self.config.runner.pcmk.load_agent( agent_name="ocf:test:Delay", stdout=self._fixture_agent_metadata("ocf:test:Delay"), env={"PATH": "/usr/sbin:/bin:/usr/bin"}, name="runner.pcmk.load_agent.delay", ) self.config.runner.pcmk.load_agent( agent_name="ocf:test:Stateful", agent_is_missing=True, env={"PATH": "/usr/sbin:/bin:/usr/bin"}, name="runner.pcmk.load_agent.stateful", ) self.config.runner.pcmk.load_agent( agent_name="service:corosync", stdout=self._fixture_agent_metadata("service:corosync"), env={"PATH": "/usr/sbin:/bin:/usr/bin"}, name="runner.pcmk.load_agent.corosync", ) self.config.runner.pcmk.load_agent( agent_name="service:pacemaker_remote", stdout=self._fixture_agent_metadata("service:pacemaker_remote"), env={"PATH": "/usr/sbin:/bin:/usr/bin"}, name="runner.pcmk.load_agent.pacemaker_remote", ) agent_stub = { "parameters": [], "actions": [], "default_actions": [ { "interval": "60s", "name": "monitor", "OCF_CHECK_LEVEL": None, "automatic": False, "on_target": False, "role": None, "start-delay": None, "timeout": None, } ], } self.assertEqual( lib.list_agents(self.env_assist.get_env(), True, None), [ dict( name="ocf:test:Delay", standard="ocf", provider="test", type="Delay", shortdesc="short ocf:test:Delay", longdesc="long ocf:test:Delay", **agent_stub, ), dict( name="service:corosync", standard="service", provider=None, type="corosync", shortdesc="short service:corosync", longdesc="long service:corosync", **agent_stub, ), dict( name="service:pacemaker_remote", standard="service", provider=None, type="pacemaker_remote", shortdesc="short service:pacemaker_remote", longdesc="long service:pacemaker_remote", **agent_stub, ), ], ) self.env_assist.assert_reports( [ fixture.warn( report_codes.UNABLE_TO_GET_AGENT_METADATA, agent="ocf:test:Stateful", reason=( "Agent ocf:test:Stateful not found or does not support " "meta-data: Invalid argument (22)\nMetadata query for " "ocf:test:Stateful failed: Input/output error" ), ) ] )
def test_parse_error_forced(self): assert_report_item_list_equal(self._parse_error("force code", True), [ fixture.warn(report_codes.PARSE_ERROR_JSON_FILE, **self._parse_error_report_args()), ])
def test_need_stopped_cluster_comunnication_failures_skip_offline(self): # If we don't know if corosync is running, skip_offline matters. self.corosync_conf_facade.need_stopped_cluster = True (self.config .http.corosync.check_corosync_offline( communication_list=[ dict( label="node-1", response_code=401, output="""{"notauthorized":"true"}""" ), dict( label="node-2", output="{" # not valid json ), ] ) .http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ dict( label="node-1", response_code=401, output="""{"notauthorized":"true"}""", ), dict( label="node-2", ) ] ) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-1", reason="HTTP error: 401", command="remote/status", ), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-1", ), fixture.warn( report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR, node="node-2", ), fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-1", reason="HTTP error: 401", command="remote/set_corosync_conf", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), ])
def test_dont_need_stopped_cluster_error_skip_offline(self): (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ { "label": "node-1", "response_code": 400, "output": "Failed" }, { "label": "node-2", }, ] ) .http.corosync.reload_corosync_conf( communication_list=[ [ { "label": self.node_labels[0], "response_code": 400, "output": "Failed" }, ], [ { "label": self.node_labels[1], }, ], ] ) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="node-1", command="remote/set_corosync_conf", reason="Failed", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="node-1", command="remote/reload_corosync_conf", reason="Failed", ), fixture.info( report_codes.COROSYNC_CONFIG_RELOADED, node="node-2" ), ])
def test_qdevice_reload_failures_skip_offline(self): self.corosync_conf_facade.need_qdevice_reload = True (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, communication_list=[ dict( label="node-1", ), dict( label="node-2", errno=8, error_msg="failure", was_connected=False, ), ] ) .http.corosync.reload_corosync_conf( communication_list=[ [ { "label": self.node_labels[0], "response_code": 400, "output": "Failed" }, ], [ { "label": self.node_labels[1], }, ], ] ) .http.corosync.qdevice_client_stop( communication_list=[ dict( label="node-1", ), dict( label="node-2", response_code=400, output="error", ), ] ) .http.corosync.qdevice_client_start( communication_list=[ dict( label="node-1", errno=8, error_msg="failure", was_connected=False, ), dict( label="node-2", ), ] ) ) env = self.env_assistant.get_env() env.push_corosync_conf( self.corosync_conf_facade, skip_offline_nodes=True ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-2", reason="failure", command="remote/set_corosync_conf", ), fixture.warn( report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, node="node-2", ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="node-1", command="remote/reload_corosync_conf", reason="Failed", ), fixture.info( report_codes.COROSYNC_CONFIG_RELOADED, node="node-2" ), fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-1", service="corosync-qdevice", instance=None, ), fixture.warn( report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL, node="node-2", reason="error", command="remote/qdevice_client_stop", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-1", reason="failure", command="remote/qdevice_client_start", ), fixture.info( report_codes.SERVICE_START_SUCCESS, node="node-2", service="corosync-qdevice", instance=None, ), ])
def test_describe(self): self.config.runner.pcmk.load_agent( agent_name="stonith:fence_apc", stdout=self._fixture_agent_metadata("stonith:fence_apc"), env={"PATH": "/usr/sbin:/bin:/usr/bin"}, name="runner.pcmk.load_agent.fence_apc", ) self.config.runner.pcmk.load_fenced_metadata( stdout=_fixture_fenced_xml) self.config.runner.pcmk.load_agent( agent_name="stonith:fence_dummy", agent_is_missing=True, env={"PATH": "/usr/sbin:/bin:/usr/bin"}, name="runner.pcmk.load_agent.fence_dummy", ) self.config.runner.pcmk.load_agent( agent_name="stonith:fence_xvm", stdout=self._fixture_agent_metadata("stonith:fence_xvm"), env={"PATH": "/usr/sbin:/bin:/usr/bin"}, name="runner.pcmk.load_agent.fence_xvm", ) agent_stub = { "parameters": [_fixture_parameter("own-param", "testing own parameter")] + _fixture_fenced_parsed, "actions": [], "default_actions": [{ "name": "monitor", "interval": "60s", "OCF_CHECK_LEVEL": None, "automatic": False, "on_target": False, "role": None, "start-delay": None, "timeout": None, }], } self.assertEqual( lib.list_agents(self.env_assist.get_env(), True, None), [ dict( name="stonith:fence_apc", standard="stonith", provider=None, type="fence_apc", shortdesc="short stonith:fence_apc", longdesc="long stonith:fence_apc", **agent_stub, ), dict( name="stonith:fence_xvm", standard="stonith", provider=None, type="fence_xvm", shortdesc="short stonith:fence_xvm", longdesc="long stonith:fence_xvm", **agent_stub, ), ], ) self.env_assist.assert_reports([ fixture.warn( report_codes.UNABLE_TO_GET_AGENT_METADATA, agent="stonith:fence_dummy", reason=( "Agent stonith:fence_dummy not found or does not support " "meta-data: Invalid argument (22)\nMetadata query for " "stonith:fence_dummy failed: Input/output error"), ) ])
def setUp(self): # pylint: disable=invalid-name self.env_assist, self.config = get_env_tools(self) self.reports = [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)]
def test_instance_meta_and_operations(self): agent_name = "test_simple" (self.config .runner.pcmk.load_agent( agent_name=f"stonith:{agent_name}", agent_filename="stonith_agent_fence_simple.xml" ) .runner.cib.load() .runner.pcmk.load_fenced_metadata() .env.push_cib( resources=self._expected_cib(expected_cib_simple_forced) ) ) self._create( self.env_assist.get_env(), "stonith-test", agent_name, operations=[ {"name": "bad-action"}, ], meta_attributes={ "metaname": "metavalue", }, instance_attributes={ "undefined": "attribute" }, allow_invalid_operation=True, allow_invalid_instance_attributes=True, ) self.env_assist.assert_reports([ fixture.warn( report_codes.INVALID_OPTION_VALUE, option_value="bad-action", option_name="operation name", allowed_values=[ "on", "off", "reboot", "status", "list", "list-status", "monitor", "metadata", "validate-all", ], ), fixture.warn( report_codes.INVALID_OPTIONS, option_names=["undefined"], option_type="stonith", allowed=[ "may-set", "must-set", "must-set-new", "must-set-old", "pcmk_action_limit", "pcmk_delay_base", "pcmk_delay_max", "pcmk_host_argument", "pcmk_host_check", "pcmk_host_list", "pcmk_host_map", "pcmk_list_action", "pcmk_list_retries", "pcmk_list_timeout", "pcmk_monitor_action", "pcmk_monitor_retries", "pcmk_monitor_timeout", "pcmk_off_action", "pcmk_off_retries", "pcmk_off_timeout", "pcmk_on_action", "pcmk_on_retries", "pcmk_on_timeout", "pcmk_reboot_action", "pcmk_reboot_retries", "pcmk_reboot_timeout", "pcmk_status_action", "pcmk_status_retries", "pcmk_status_timeout", "priority", ], allowed_patterns=[] ), fixture.warn( report_codes.REQUIRED_OPTION_IS_MISSING, option_names=["must-set", "must-set-new"], option_type="stonith", ), ])
def test_node_issues(self): self._set_up(local_node_count=7) self._fixture_load_configs() ( self.config.http.status.get_full_cluster_status_plaintext( name="http.status.get_full_cluster_status_plaintext.local", cluster_status_plaintext=self.local_status, communication_list=[ [ dict( label=self.local_node_name_list[0], was_connected=False, ) ], [ dict( label=self.local_node_name_list[1], response_code=401, ) ], [ dict( label=self.local_node_name_list[2], response_code=500, ) ], [ dict( label=self.local_node_name_list[3], response_code=404, ) ], [ dict( label=self.local_node_name_list[4], output="invalid data", ) ], [ dict( label=self.local_node_name_list[5], output=json.dumps(dict(status="success")), ) ], [dict(label=self.local_node_name_list[6],)], ], ).http.status.get_full_cluster_status_plaintext( name="http.status.get_full_cluster_status_plaintext.remote", node_labels=self.remote_node_name_list[:1], cluster_status_plaintext=self.remote_status, ) ) result = dr.status_all_sites_plaintext(self.env_assist.get_env()) self.assertEqual(result, self._fixture_result()) self.env_assist.assert_reports( [ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, command="remote/cluster_status_plaintext", node="node1", reason=None, ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, command="remote/cluster_status_plaintext", node="node2", reason="HTTP error: 401", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR, command="remote/cluster_status_plaintext", node="node3", reason="HTTP error: 500", ), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND, command="remote/cluster_status_plaintext", node="node4", reason="HTTP error: 404", ), fixture.warn( report_codes.INVALID_RESPONSE_FORMAT, node="node5", ), fixture.warn( report_codes.INVALID_RESPONSE_FORMAT, node="node6", ), ] )
def test_different_responses(self): node_name_list = ["node-1", "node-2", "node-3", "node-4", "node-5"] (self.config .env.set_known_nodes(node_name_list) .corosync_conf.load( node_name_list=node_name_list, auto_tie_breaker=True, ) .http.add_communication( "get_sbd_config", [ dict( label="node-1", output=outdent( """\ # This file has been generated by pcs. SBD_DELAY_START=no SBD_OPTS="-n node-1" SBD_PACEMAKER=yes SBD_STARTMODE=always SBD_WATCHDOG_DEV=/dev/watchdog SBD_WATCHDOG_TIMEOUT=5 """ ), response_code=200, ), dict( label="node-2", was_connected=False, errno=7, error_msg="Failed connect to node-2:2224;" " No route to host" , ), dict( label="node-3", output="OPTION= value", response_code=200, ), dict( label="node-4", output="# just comment", response_code=200, ), dict( label="node-5", output="invalid value", response_code=200, ), ], action="remote/get_sbd_config", ) ) self.assertEqual( get_cluster_sbd_config(self.env_assist.get_env()), [ { 'node': 'node-1', 'config': { 'SBD_WATCHDOG_TIMEOUT': '5', 'SBD_WATCHDOG_DEV': '/dev/watchdog', 'SBD_PACEMAKER': 'yes', 'SBD_OPTS': '"-n node-1"', 'SBD_STARTMODE': 'always', 'SBD_DELAY_START': 'no' }, }, { 'node': 'node-3', 'config': { "OPTION": "value", } }, { 'node': 'node-4', 'config': {}, }, { 'node': 'node-5', 'config': {}, }, { 'node': 'node-2', 'config': None, }, ] ) self.env_assist.assert_reports([ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-2", reason="Failed connect to node-2:2224; No route to host", command="remote/get_sbd_config", ), fixture.warn( report_codes.UNABLE_TO_GET_SBD_CONFIG, node="node-2", reason="", ), ])
def tearDown(self): self.env_assist.assert_reports( [fixture.warn(report_codes.DEFAULTS_CAN_BE_OVERRIDEN)])
def test_default_different_results_on_different_nodes(self): node_name_list = ["node-1", "node-2", "node-3", "node-4", "node-5"] (self.config .env.set_known_nodes(node_name_list) .corosync_conf.load(node_name_list=node_name_list) .http.add_communication( "check_sbd", [ dict( label="node-1", output='{"notauthorized":"true"}', response_code=401, ), dict( label="node-2", was_connected=False, errno=6, error_msg="Could not resolve host: node-2;" " Name or service not known" , ), dict( label="node-3", output=json.dumps({ "sbd":{ "installed": True, "enabled": False, "running":False }, "watchdog":{ "path":"", "exist":False }, "device_list":[] }), response_code=200, ), dict( label="node-4", output=json.dumps({ "watchdog":{ "path":"", "exist":False }, "device_list":[] }), response_code=200, ), dict( label="node-5", output="invalid json", response_code=200, ), ], action="remote/check_sbd", param_list=[("watchdog", ""), ("device_list", "[]")], ) ) default_status = { 'running': None, 'enabled': None, 'installed': None, } self.assertEqual( get_cluster_sbd_status(self.env_assist.get_env()), [ { 'node': 'node-3', 'status': { 'running': False, 'enabled': False, 'installed': True, } }, { 'node': 'node-1', 'status': default_status }, { 'node': 'node-2', 'status': default_status }, { 'node': 'node-4', 'status': default_status }, { 'node': 'node-5', 'status': default_status }, ] ) self.env_assist.assert_reports([ fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, node="node-1", reason="HTTP error: 401", command="remote/check_sbd", ), warn_unable_to_get_sbd_status(node="node-1", reason=""), fixture.warn( report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, node="node-2", reason= "Could not resolve host: node-2; Name or service not known" , command="remote/check_sbd", ), warn_unable_to_get_sbd_status(node="node-2", reason=""), warn_unable_to_get_sbd_status(node="node-4", reason="'sbd'"), warn_unable_to_get_sbd_status( node="node-5", #the reason differs in python3 #reason="No JSON object could be decoded", ), ])
def test_success_full(self): defaults_xml = f""" <{self.tag}> <meta_attributes id="my-id" score="10"> <rule id="my-id-rule" boolean-op="and" score="INFINITY"> <rsc_expression id="my-id-rule-rsc-ocf-pacemaker-Dummy" class="ocf" provider="pacemaker" type="Dummy" /> <rule id="my-id-rule-rule" boolean-op="or" score="0"> <expression id="my-id-rule-rule-expr" operation="defined" attribute="attr1" /> <expression id="my-id-rule-rule-expr-1" attribute="attr2" operation="gt" type="number" value="5" /> <date_expression id="my-id-rule-rule-expr-2" operation="lt" end="2020-08-07" /> <date_expression id="my-id-rule-rule-expr-3" operation="in_range" start="2020-09-01" end="2020-09-11" /> <date_expression id="my-id-rule-rule-expr-4" operation="in_range" start="2020-10-01" > <duration id="my-id-rule-rule-expr-4-duration" months="1" /> </date_expression> <date_expression id="my-id-rule-rule-expr-5" operation="date_spec" > <date_spec id="my-id-rule-rule-expr-5-datespec" years="2021-2022" /> </date_expression> <date_expression id="my-id-rule-rule-expr-6" operation="in_range" end="2020-12-11" /> </rule> </rule> <nvpair id="my-id-name1" name="name1" value="value1" /> <nvpair id="my-id-2name" name="2na#me" value="value2" /> </meta_attributes> </{self.tag}> """ self.config.runner.cib.load( filename="cib-empty-3.4.xml", instead="runner.cib.load" ) self.config.env.push_cib(optional_in_conf=defaults_xml) self.command( self.env_assist.get_env(), {"name1": "value1", "2na#me": "value2"}, {"id": "my-id", "score": "10"}, nvset_rule=( "resource ocf:pacemaker:Dummy and " "(defined attr1 or attr2 gt number 5 or date lt 2020-08-07 or " "date in_range 2020-09-01 to 2020-09-11 or " "date in_range 2020-10-01 to duration months=1 or " "date-spec years=2021-2022 or " "date in_range to 2020-12-11)" ), ) self.env_assist.assert_reports( [fixture.warn(reports.codes.DEFAULTS_CAN_BE_OVERRIDEN)] )
def test_qdevice_reload_corosync_stopped(self): self.corosync_conf_facade.need_qdevice_reload = True (self.config .http.corosync.set_corosync_conf( self.corosync_conf_text, node_labels=self.node_labels ) .http.corosync.reload_corosync_conf( communication_list=[ [ { "label": label, "response_code": 200, "output": json.dumps( dict(code="not_running", message="") ), }, ] for label in self.node_labels ] ) .http.corosync.qdevice_client_stop( node_labels=self.node_labels ) .http.corosync.qdevice_client_start( communication_list=[ { "label": label, "output": "corosync is not running, skipping", } for label in self.node_labels ] ) ) self.env_assistant.get_env().push_corosync_conf( self.corosync_conf_facade ) self.env_assistant.assert_reports([ fixture.info(report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-1", ), fixture.info( report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE, node="node-2", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-1", ), fixture.warn( report_codes.COROSYNC_CONFIG_RELOAD_NOT_POSSIBLE, node="node-2", ), fixture.info(report_codes.QDEVICE_CLIENT_RELOAD_STARTED), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-1", service="corosync-qdevice", instance=None, ), fixture.info( report_codes.SERVICE_STOP_SUCCESS, node="node-2", service="corosync-qdevice", instance=None, ), fixture.info( report_codes.SERVICE_START_SKIPPED, node="node-1", service="corosync-qdevice", instance=None, reason="corosync is not running", ), fixture.info( report_codes.SERVICE_START_SKIPPED, node="node-2", service="corosync-qdevice", instance=None, reason="corosync is not running", ), ])
def test_unsupported_bundle_container_type(self): self.config.runner.cib.load(resources=""" <resources> <bundle id="B1"> <unsupported image="pcs:test"/> <primitive id="R1" class="ocf" type="Dummy" provider="pacemaker"/> </bundle> <primitive id="R2" class="ocf" type="Dummy" provider="pacemaker"/> </resources> """) self.assertEqual( ListCibResourcesDto( primitives=[ CibResourcePrimitiveDto( id="R1", agent_name=ResourceAgentNameDto(standard="ocf", provider="pacemaker", type="Dummy"), description=None, operations=[], meta_attributes=[], instance_attributes=[], utilization=[], ), CibResourcePrimitiveDto( id="R2", agent_name=ResourceAgentNameDto(standard="ocf", provider="pacemaker", type="Dummy"), description=None, operations=[], meta_attributes=[], instance_attributes=[], utilization=[], ), ], clones=[], groups=[], bundles=[ CibResourceBundleDto( id="B1", description=None, member_id="R1", container_type=None, container_options=None, network=None, port_mappings=[], storage_mappings=[], meta_attributes=[], instance_attributes=[], ), ], ), resource.get_configured_resources(self.env_assist.get_env()), ) self.env_assist.assert_reports([ fixture.warn( reports.codes.RESOURCE_BUNDLE_UNSUPPORTED_CONTAINER_TYPE, bundle_id="B1", supported_container_types=sorted(GENERIC_CONTAINER_TYPES), updating_options=False, ) ])
def test_full(self): defaults_xml = f""" <{self.tag}> <meta_attributes id="{self.tag}-meta_attributes"> <rule id="{self.tag}-meta_attributes-rule" boolean-op="and" score="INFINITY" > <rsc_expression id="{self.tag}-meta_attributes-rule-rsc-Dummy" class="ocf" provider="pacemaker" type="Dummy" /> <rule id="{self.tag}-meta_attributes-rule-rule" boolean-op="or" > <expression id="{self.tag}-meta_attributes-rule-rule-expr" operation="defined" attribute="attr1" /> <expression id="{self.tag}-meta_attributes-rule-rule-expr-1" attribute="attr2" operation="gt" type="integer" value="5" /> <date_expression id="{self.tag}-meta_attributes-rule-rule-expr-2" operation="lt" end="2020-08-07" /> <date_expression id="{self.tag}-meta_attributes-rule-rule-expr-3" operation="in_range" start="2020-09-01" end="2020-09-11" /> <date_expression id="{self.tag}-meta_attributes-rule-rule-expr-4" operation="in_range" start="2020-10-01" > <duration id="{self.tag}-meta_attributes-rule-rule-expr-4-duration" months="1" /> </date_expression> <date_expression id="{self.tag}-meta_attributes-rule-rule-expr-5" operation="date_spec" > <date_spec id="{self.tag}-meta_attributes-rule-rule-expr-5-datespec" years="2021-2022" /> </date_expression> <date_expression id="{self.tag}-meta_attributes-rule-rule-expr-6" operation="in_range" end="2020-12-11" /> </rule> </rule> <nvpair id="my-id-pair1" name="name1" value="value1" /> <nvpair id="my-id-pair2" name="name2" value="value2" /> </meta_attributes> <instance_attributes id="instance"> <nvpair id="instance-pair" name="inst" value="ance" /> </instance_attributes> <meta_attributes id="meta-plain" score="123"> <nvpair id="my-id-pair3" name="name1" value="value1" /> </meta_attributes> </{self.tag}> """ self.config.runner.cib.load( filename="cib-empty-3.4.xml", optional_in_conf=defaults_xml ) self.config.fs.isfile( (os.path.join(settings.pacemaker_binaries, "crm_rule")), return_value=False, ) self.assertEqual( [ CibNvsetDto( f"{self.tag}-meta_attributes", CibNvsetType.META, {}, CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule", CibRuleExpressionType.RULE, CibRuleInEffectStatus.UNKNOWN, {"boolean-op": "and", "score": "INFINITY"}, None, None, [ CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rsc-Dummy", CibRuleExpressionType.RSC_EXPRESSION, CibRuleInEffectStatus.UNKNOWN, { "class": "ocf", "provider": "pacemaker", "type": "Dummy", }, None, None, [], "resource ocf:pacemaker:Dummy", ), CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule", CibRuleExpressionType.RULE, CibRuleInEffectStatus.UNKNOWN, {"boolean-op": "or"}, None, None, [ CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule-expr", CibRuleExpressionType.EXPRESSION, CibRuleInEffectStatus.UNKNOWN, { "operation": "defined", "attribute": "attr1", }, None, None, [], "defined attr1", ), CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule-expr-1", CibRuleExpressionType.EXPRESSION, CibRuleInEffectStatus.UNKNOWN, { "attribute": "attr2", "operation": "gt", "type": "integer", "value": "5", }, None, None, [], "attr2 gt integer 5", ), CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule-expr-2", CibRuleExpressionType.DATE_EXPRESSION, CibRuleInEffectStatus.UNKNOWN, { "operation": "lt", "end": "2020-08-07", }, None, None, [], "date lt 2020-08-07", ), CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule-expr-3", CibRuleExpressionType.DATE_EXPRESSION, CibRuleInEffectStatus.UNKNOWN, { "operation": "in_range", "start": "2020-09-01", "end": "2020-09-11", }, None, None, [], "date in_range 2020-09-01 to 2020-09-11", ), CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule-expr-4", CibRuleExpressionType.DATE_EXPRESSION, CibRuleInEffectStatus.UNKNOWN, { "operation": "in_range", "start": "2020-10-01", }, None, CibRuleDateCommonDto( f"{self.tag}-meta_attributes-rule-rule-expr-4-duration", {"months": "1"}, ), [], "date in_range 2020-10-01 to duration months=1", ), CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule-expr-5", CibRuleExpressionType.DATE_EXPRESSION, CibRuleInEffectStatus.UNKNOWN, {"operation": "date_spec"}, CibRuleDateCommonDto( f"{self.tag}-meta_attributes-rule-rule-expr-5-datespec", {"years": "2021-2022"}, ), None, [], "date-spec years=2021-2022", ), CibRuleExpressionDto( f"{self.tag}-meta_attributes-rule-rule-expr-6", CibRuleExpressionType.DATE_EXPRESSION, CibRuleInEffectStatus.UNKNOWN, { "operation": "in_range", "end": "2020-12-11", }, None, None, [], "date in_range to 2020-12-11", ), ], "defined attr1 or attr2 gt integer 5 or " "date lt 2020-08-07 or " "date in_range 2020-09-01 to 2020-09-11 or " "date in_range 2020-10-01 to duration months=1 " "or date-spec years=2021-2022 or " "date in_range to 2020-12-11", ), ], "resource ocf:pacemaker:Dummy and " "(defined attr1 or attr2 gt integer 5 or " "date lt 2020-08-07 or " "date in_range 2020-09-01 to 2020-09-11 or " "date in_range 2020-10-01 to duration months=1 or " "date-spec years=2021-2022 or " "date in_range to 2020-12-11)", ), [ CibNvpairDto("my-id-pair1", "name1", "value1"), CibNvpairDto("my-id-pair2", "name2", "value2"), ], ), CibNvsetDto( "instance", CibNvsetType.INSTANCE, {}, None, [CibNvpairDto("instance-pair", "inst", "ance")], ), CibNvsetDto( "meta-plain", CibNvsetType.META, {"score": "123"}, None, [CibNvpairDto("my-id-pair3", "name1", "value1")], ), ], self.command(self.env_assist.get_env(), True), ) self.env_assist.assert_reports( [ fixture.warn( reports.codes.RULE_IN_EFFECT_STATUS_DETECTION_NOT_SUPPORTED ), ] )