def config_show(argv): print("Cluster Name: %s" % utils.getClusterName()) status.nodes_status(["config"]) print() config_show_cib() if ( utils.hasCorosyncConf() and ( utils.is_rhel6() or (not utils.usefile and "--corosync_conf" not in utils.pcs_options) ) ): # with corosync 1 and cman, uid gid is part of cluster.conf file # with corosync 2, uid gid is in a separate directory cluster.cluster_uidgid([], True) if ( "--corosync_conf" in utils.pcs_options or (not utils.is_rhel6() and utils.hasCorosyncConf()) ): print() print("Quorum:") try: config = lib_quorum.get_config(utils.get_lib_env()) print("\n".join(indent(quorum.quorum_config_to_str(config)))) except LibraryError as e: utils.process_library_reports(e.args)
def config_show(lib, argv, modifiers): """ Options: * -f - CIB file, when getting cluster name on remote node (corosync.conf doesn't exist) * --corosync_conf - corosync.conf file """ modifiers.ensure_only_supported("-f", "--corosync_conf") if argv: raise CmdLineInputError() print("Cluster Name: %s" % utils.getClusterName()) status.nodes_status(lib, ["config"], modifiers.get_subset("-f")) print() print("\n".join(_config_show_cib_lines(lib))) if (utils.hasCorosyncConf() and not modifiers.is_specified("-f") and not modifiers.is_specified("--corosync_conf")): cluster.cluster_uidgid(lib, [], modifiers.get_subset(), silent_list=True) if modifiers.is_specified("--corosync_conf") or utils.hasCorosyncConf(): print() print("Quorum:") try: config = lib_quorum.get_config(utils.get_lib_env()) print("\n".join(indent(quorum.quorum_config_to_str(config)))) except LibraryError as e: process_library_reports(e.args)
def full_status(): if "--hide-inactive" in utils.pcs_options and "--full" in utils.pcs_options: utils.err("you cannot specify both --hide-inactive and --full") monitor_command = ["crm_mon", "--one-shot"] if "--hide-inactive" not in utils.pcs_options: monitor_command.append('--inactive') if "--full" in utils.pcs_options: monitor_command.extend( ["--show-detail", "--show-node-attributes", "--failcounts"]) output, retval = utils.run(monitor_command) if (retval != 0): utils.err("cluster is not currently running on this node") if not utils.usefile or "--corosync_conf" in utils.pcs_options: cluster_name = utils.getClusterName() print("Cluster name: %s" % cluster_name) status_stonith_check() if (not utils.usefile and not utils.is_rhel6() and utils.corosyncPacemakerNodeCheck()): print( "WARNING: corosync and pacemaker node names do not match (IPs used in setup?)" ) print(output) if not utils.usefile: if "--full" in utils.pcs_options and utils.hasCorosyncConf(): print_pcsd_daemon_status() print() utils.serviceStatus(" ")
def full_status(): if "--hide-inactive" in utils.pcs_options and "--full" in utils.pcs_options: utils.err("you cannot specify both --hide-inactive and --full") monitor_command = ["crm_mon", "--one-shot"] if "--hide-inactive" not in utils.pcs_options: monitor_command.append('--inactive') if "--full" in utils.pcs_options: monitor_command.extend( ["--show-detail", "--show-node-attributes", "--failcounts"] ) output, retval = utils.run(monitor_command) if (retval != 0): utils.err("cluster is not currently running on this node") if not utils.usefile or "--corosync_conf" in utils.pcs_options: cluster_name = utils.getClusterName() print("Cluster name: %s" % cluster_name) if utils.stonithCheck(): print("WARNING: no stonith devices and stonith-enabled is not false") if not utils.is_rhel6() and utils.corosyncPacemakerNodeCheck(): print("WARNING: corosync and pacemaker node names do not match (IPs used in setup?)") print(output) if not utils.usefile: if "--full" in utils.pcs_options: print_pcsd_daemon_status() print() utils.serviceStatus(" ")
def config_show(argv): print("Cluster Name: %s" % utils.getClusterName()) status.nodes_status(["config"]) print() config_show_cib() cluster.cluster_uidgid([], True) if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6(): print() print("Quorum:") try: config = lib_quorum.get_config(utils.get_lib_env()) print("\n".join(indent(quorum.quorum_config_to_str(config)))) except LibraryError as e: utils.process_library_reports(e.args)
def full_status(): if "--hide-inactive" in utils.pcs_options and "--full" in utils.pcs_options: utils.err("you cannot specify both --hide-inactive and --full") monitor_command = ["crm_mon", "--one-shot"] if "--hide-inactive" not in utils.pcs_options: monitor_command.append('--inactive') if "--full" in utils.pcs_options: monitor_command.extend( ["--show-detail", "--show-node-attributes", "--failcounts"]) output, retval = utils.run(monitor_command) if (retval != 0): utils.err("cluster is not currently running on this node") if not utils.usefile or "--corosync_conf" in utils.pcs_options: cluster_name = utils.getClusterName() print("Cluster name: %s" % cluster_name) status_stonith_check() print(output) if "--full" in utils.pcs_options: tickets, retval = utils.run(["crm_ticket", "-L"]) if retval != 0: print("WARNING: Unable to get information about tickets") print() elif tickets: print("Tickets:") print("\n".join(indent(tickets.split("\n")))) if not utils.usefile: if "--full" in utils.pcs_options and utils.hasCorosyncConf(): print_pcsd_daemon_status() print() utils.serviceStatus(" ")
def config_show(lib, argv, modifiers): """ Options: * -f - CIB file, when getting cluster name on remote node (corosync.conf doesn't exist) * --corosync_conf - corosync.conf file """ modifiers.ensure_only_supported("-f", "--corosync_conf") if argv: raise CmdLineInputError() print("Cluster Name: %s" % utils.getClusterName()) status.nodes_status(lib, ["config"], modifiers.get_subset("-f")) print() print("\n".join(_config_show_cib_lines(lib))) if ( utils.hasCorosyncConf() and not modifiers.is_specified("-f") and not modifiers.is_specified("--corosync_conf") ): cluster.cluster_uidgid( lib, [], modifiers.get_subset(), silent_list=True ) if ( modifiers.is_specified("--corosync_conf") or utils.hasCorosyncConf() ): print() print("Quorum:") try: config = lib_quorum.get_config(utils.get_lib_env()) print("\n".join(indent(quorum.quorum_config_to_str(config)))) except LibraryError as e: utils.process_library_reports(e.args)
def full_status(lib, argv, modifiers): """ Options: * --hide-inactive - hide inactive resources * --full - show full details, node attributes and failcount * -f - CIB file, crm_mon accepts CIB_file environment variable * --corosync_conf - file corocync.conf * --request-timeout - HTTP timeout for node authorization check """ modifiers.ensure_only_supported( "--hide-inactive", "--full", "-f", "--corosync_conf", "--request-timeout", ) if argv: raise CmdLineInputError() if (modifiers.is_specified("--hide-inactive") and modifiers.is_specified("--full")): utils.err("you cannot specify both --hide-inactive and --full") monitor_command = [ os.path.join(settings.pacemaker_binaries, "crm_mon"), "--one-shot" ] if not modifiers.get("--hide-inactive"): monitor_command.append('--inactive') if modifiers.get("--full"): monitor_command.extend( ["--show-detail", "--show-node-attributes", "--failcounts"]) # by default, pending and failed actions are displayed # with --full, we display the whole history if is_fence_history_supported(): monitor_command.append("--fence-history=3") stdout, stderr, retval = utils.cmd_runner().run(monitor_command) if retval != 0: utils.err("cluster is not currently running on this node") warnings = [] if stderr.strip(): for line in stderr.strip().splitlines(): if line.startswith("DEBUG: "): if modifiers.get("--full"): warnings.append(line) else: warnings.append(line) warnings.extend(status_stonith_check(modifiers)) print("Cluster name: %s" % utils.getClusterName()) if warnings: print() print("WARNINGS:") print("\n".join(warnings)) print() print(stdout) if modifiers.get("--full"): tickets, retval = utils.run(["crm_ticket", "-L"]) if retval != 0: print("WARNING: Unable to get information about tickets") print() elif tickets: print("Tickets:") print("\n".join(indent(tickets.split("\n")))) if not (modifiers.is_specified("-f") or modifiers.is_specified("--corosync_conf")): # do this only if in live environment if modifiers.get("--full"): print_pcsd_daemon_status(lib, modifiers) print() utils.serviceStatus(" ")
def config_show(argv): print("Cluster Name: %s" % utils.getClusterName()) status.nodes_status(["config"]) print() config_show_cib() cluster.cluster_uidgid([], True)
def get_cluster_name(): """ Return the cluster name. Example of output: 'mycluster' """ return utils.getClusterName()