def test_success_no_fence_history(self): self.mock_fencehistory_supported.stop() self.mock_fencehistory_supported = mock.patch( "pcs.lib.pacemaker.live.is_fence_history_supported_status", return_value=False, ) self.mock_fencehistory_supported.start() mock_runner = self.get_runner() real_status, warnings = lib.get_cluster_status_text( mock_runner, False, True ) mock_runner.run.assert_called_once_with( [ "/usr/sbin/crm_mon", "--one-shot", "--inactive", "--show-detail", "--show-node-attributes", "--failcounts", ] ) self.assertEqual(self.expected_stdout, real_status) self.assertEqual(warnings, [])
def test_error(self): mock_runner = self.get_runner("stdout", "stderr", 1) assert_raise_library_error( lambda: lib.get_cluster_status_text(mock_runner, False, False), (fixture.error(report_codes.CRM_MON_ERROR, reason="stderr\nstdout"))) mock_runner.run.assert_called_once_with( ["/usr/sbin/crm_mon", "--one-shot", "--inactive"])
def test_success_minimal(self): mock_runner = self.get_runner() real_status, warnings = lib.get_cluster_status_text( mock_runner, False, False) mock_runner.run.assert_called_once_with( ["/usr/sbin/crm_mon", "--one-shot", "--inactive"]) self.assertEqual(self.expected_stdout, real_status) self.assertEqual(warnings, [])
def test_warnings(self): mock_runner = self.get_runner( stderr="msgA\nDEBUG: msgB\nmsgC\nDEBUG: msgd\n") real_status, warnings = lib.get_cluster_status_text( mock_runner, False, False) mock_runner.run.assert_called_once_with( ["/usr/sbin/crm_mon", "--one-shot", "--inactive"]) self.assertEqual(self.expected_stdout, real_status) self.assertEqual(warnings, ["msgA", "msgC"])
def test_success_hide_inactive_verbose(self): mock_runner = self.get_runner() real_status, warnings = lib.get_cluster_status_text( mock_runner, True, True) mock_runner.run.assert_called_once_with([ "/usr/sbin/crm_mon", "--one-shot", "--show-detail", "--show-node-attributes", "--failcounts", "--fence-history=3", ]) self.assertEqual(self.expected_stdout, real_status) self.assertEqual(warnings, [])
def test_warnings_verbose(self): mock_runner = self.get_runner( stderr="msgA\nDEBUG: msgB\nmsgC\nDEBUG: msgd\n") real_status, warnings = lib.get_cluster_status_text( mock_runner, False, True) mock_runner.run.assert_called_once_with([ "/usr/sbin/crm_mon", "--one-shot", "--inactive", "--show-detail", "--show-node-attributes", "--failcounts", "--fence-history=3", ]) self.assertEqual(self.expected_stdout, real_status) self.assertEqual(warnings, ["msgA", "DEBUG: msgB", "msgC", "DEBUG: msgd"])
def full_cluster_status_plaintext( env: LibraryEnvironment, hide_inactive_resources: bool = False, verbose: bool = False, ) -> str: """ Return full cluster status as plaintext env -- LibraryEnvironment hide_inactive_resources -- if True, do not display non-running resources verbose -- if True, display more info """ # pylint: disable=too-many-branches # pylint: disable=too-many-locals # pylint: disable=too-many-statements # validation if not env.is_cib_live and env.is_corosync_conf_live: raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentNotConsistent( [file_type_codes.CIB], [file_type_codes.COROSYNC_CONF], ) ) ) if env.is_cib_live and not env.is_corosync_conf_live: raise LibraryError( ReportItem.error( reports.messages.LiveEnvironmentNotConsistent( [file_type_codes.COROSYNC_CONF], [file_type_codes.CIB], ) ) ) # initialization runner = env.cmd_runner() report_processor = env.report_processor live = env.is_cib_live and env.is_corosync_conf_live is_sbd_running = False # load status, cib, corosync.conf status_text, warning_list = get_cluster_status_text( runner, hide_inactive_resources, verbose ) corosync_conf = None # If we are live on a remote node, we have no corosync.conf. # TODO Use the new file framework so the path is not exposed. if not live or os.path.exists(settings.corosync_conf_file): corosync_conf = env.get_corosync_conf() cib = env.get_cib() if verbose: ( ticket_status_text, ticket_status_stderr, ticket_status_retval, ) = get_ticket_status_text(runner) # get extra info if live if live: try: is_sbd_running = is_service_running(runner, get_sbd_service_name()) except LibraryError: pass local_services_status = _get_local_services_status(runner) if verbose and corosync_conf: node_name_list, node_names_report_list = get_existing_nodes_names( corosync_conf ) report_processor.report_list(node_names_report_list) node_reachability = _get_node_reachability( env.get_node_target_factory(), env.get_node_communicator(), report_processor, node_name_list, ) # check stonith configuration warning_list = list(warning_list) warning_list.extend(_stonith_warnings(cib, is_sbd_running)) # put it all together if report_processor.has_errors: raise LibraryError() cluster_name = ( corosync_conf.get_cluster_name() if corosync_conf else nvpair.get_value( "cluster_property_set", get_crm_config(cib), "cluster-name", "" ) ) parts = [] parts.append(f"Cluster name: {cluster_name}") if warning_list: parts.extend(["", "WARNINGS:"] + warning_list + [""]) parts.append(status_text) if verbose: parts.extend(["", "Tickets:"]) if ticket_status_retval != 0: ticket_warning_parts = [ "WARNING: Unable to get information about tickets" ] if ticket_status_stderr: ticket_warning_parts.extend( indent(ticket_status_stderr.splitlines()) ) parts.extend(indent(ticket_warning_parts)) else: parts.extend(indent(ticket_status_text.splitlines())) if live: if verbose and corosync_conf: parts.extend(["", "PCSD Status:"]) parts.extend( indent( _format_node_reachability(node_name_list, node_reachability) ) ) parts.extend(["", "Daemon Status:"]) parts.extend( indent(_format_local_services_status(local_services_status)) ) return "\n".join(parts)
def full_cluster_status_plaintext( env: LibraryEnvironment, hide_inactive_resources: bool = False, verbose: bool = False, ) -> str: """ Return full cluster status as plaintext env -- LibraryEnvironment hide_inactive_resources -- if True, do not display non-running resources verbose -- if True, display more info """ # pylint: disable=too-many-branches # pylint: disable=too-many-locals # validation if not env.is_cib_live and env.is_corosync_conf_live: raise LibraryError( reports.live_environment_not_consistent( [file_type_codes.CIB], [file_type_codes.COROSYNC_CONF], )) if env.is_cib_live and not env.is_corosync_conf_live: raise LibraryError( reports.live_environment_not_consistent( [file_type_codes.COROSYNC_CONF], [file_type_codes.CIB], )) # initialization runner = env.cmd_runner() report_processor = SimpleReportProcessor(env.report_processor) live = env.is_cib_live and env.is_corosync_conf_live is_sbd_running = False # load status, cib, corosync.conf status_text, warning_list = get_cluster_status_text( runner, hide_inactive_resources, verbose) corosync_conf = env.get_corosync_conf() cib = env.get_cib() if verbose: ticket_status_text, ticket_status_stderr, ticket_status_retval = ( get_ticket_status_text(runner)) # get extra info if live if live: try: is_sbd_running = is_service_running(runner, get_sbd_service_name()) except LibraryError: pass local_services_status = _get_local_services_status(runner) if verbose: node_name_list, node_names_report_list = get_existing_nodes_names( corosync_conf) report_processor.report_list(node_names_report_list) node_reachability = _get_node_reachability( env.get_node_target_factory(), env.get_node_communicator(), report_processor, node_name_list, ) # check stonith configuration warning_list = list(warning_list) warning_list.extend(_stonith_warnings(cib, is_sbd_running)) # put it all together if report_processor.has_errors: raise LibraryError() parts = [] parts.append(f"Cluster name: {corosync_conf.get_cluster_name()}") if warning_list: parts.extend(["", "WARNINGS:"] + warning_list + [""]) parts.append(status_text) if verbose: parts.extend(["", "Tickets:"]) if ticket_status_retval != 0: ticket_warning_parts = [ "WARNING: Unable to get information about tickets" ] if ticket_status_stderr: ticket_warning_parts.extend( indent(ticket_status_stderr.splitlines())) parts.extend(indent(ticket_warning_parts)) else: parts.extend(indent(ticket_status_text.splitlines())) if live: if verbose: parts.extend(["", "PCSD Status:"]) parts.extend( indent( _format_node_reachability(node_name_list, node_reachability))) parts.extend(["", "Daemon Status:"]) parts.extend( indent(_format_local_services_status(local_services_status))) return "\n".join(parts)