def sbd_setup_block_device(lib, argv, modifiers): """ Options: * --force - do not show warning about wiping the devices """ modifiers.ensure_only_supported("--force") options = parse_args.prepare_options( argv, allowed_repeatable_options=("device", )) device_list = options.get("device", []) if not device_list: raise CmdLineInputError("No device defined") if not modifiers.get("--force"): answer = utils.get_terminal_input( ("WARNING: All current content on device(s) '{device}' will be" + " overwritten. Are you sure you want to continue? [y/N] ").format( device="', '".join(device_list))) if answer.lower() not in ["y", "yes"]: print("Canceled") return lib.sbd.initialize_block_devices( device_list, {name: value for name, value in options.items() if name != "device"})
def sbd_setup_block_device(lib, argv, modifiers): """ Options: * --force - do not show warning about wiping the devices """ modifiers.ensure_only_supported("--force") options = parse_args.prepare_options( argv, allowed_repeatable_options=("device",) ) device_list = options.get("device", []) if not device_list: raise CmdLineInputError("No device defined") if not modifiers.get("--force"): answer = utils.get_terminal_input( ( "WARNING: All current content on device(s) '{device}' will be" + " overwritten. Are you sure you want to continue? [y/N] " ).format(device="', '".join(device_list)) ) if answer.lower() not in ["y", "yes"]: print("Canceled") return lib.sbd.initialize_block_devices( device_list, { name: value for name, value in options.items() if name != "device" } )
def stonith_confirm(lib, argv, modifiers): """ Options: * --force - do not warn user """ modifiers.ensure_only_supported("--force") if len(argv) != 1: utils.err("must specify one (and only one) node to confirm fenced") node = argv.pop(0) if not modifiers.get("--force"): answer = utils.get_terminal_input( ("WARNING: If node {node} is not powered off or it does" + " have access to shared resources, data corruption and/or" + " cluster failure may occur. Are you sure you want to" + " continue? [y/N] ").format(node=node)) if answer.lower() not in ["y", "yes"]: print("Canceled") return args = ["stonith_admin", "-C", node] output, retval = utils.run(args) if retval != 0: utils.err("unable to confirm fencing of node '%s'\n" % node + output) else: print("Node: %s confirmed fenced" % node)
def stonith_confirm(lib, argv, modifiers): """ Options: * --force - do not warn user """ del lib modifiers.ensure_only_supported("--force") if len(argv) != 1: utils.err("must specify one (and only one) node to confirm fenced") node = argv.pop(0) if not modifiers.get("--force"): answer = utils.get_terminal_input( ( "WARNING: If node {node} is not powered off or it does" + " have access to shared resources, data corruption and/or" + " cluster failure may occur. Are you sure you want to" + " continue? [y/N] " ).format(node=node) ) if answer.lower() not in ["y", "yes"]: print("Canceled") return args = ["stonith_admin", "-C", node] output, retval = utils.run(args) if retval != 0: utils.err("unable to confirm fencing of node '%s'\n" % node + output) else: print("Node: %s confirmed fenced" % node)
def quorum_unblock_cmd(lib, argv, modifiers): """ Options: * --force - no error when removing non existing property and no warning about this action """ modifiers.ensure_only_supported("--force") if argv: raise CmdLineInputError() output, retval = utils.run( ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"] ) if retval != 0: utils.err("unable to check quorum status") if output.split("=")[-1].strip() != "1": utils.err("cluster is not waiting for nodes to establish quorum") all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if report_list: utils.process_library_reports(report_list) unjoined_nodes = set(all_nodes) - set(utils.getCorosyncActiveNodes()) if not unjoined_nodes: utils.err("no unjoined nodes found") if not modifiers.get("--force"): answer = utils.get_terminal_input( ( "WARNING: If node(s) {nodes} are not powered off or they do" + " have access to shared resources, data corruption and/or" + " cluster failure may occur. Are you sure you want to" + " continue? [y/N] " ).format(nodes=", ".join(unjoined_nodes)) ) if answer.lower() not in ["y", "yes"]: print("Canceled") return for node in unjoined_nodes: # pass --force so no warning will be displayed stonith.stonith_confirm( lib, [node], parse_args.InputModifiers({"--force": ""}) ) output, retval = utils.run( ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"] ) if retval != 0: utils.err("unable to cancel waiting for nodes") print("Quorum unblocked") startup_fencing = utils.get_set_properties().get("startup-fencing", "") utils.set_cib_property( "startup-fencing", "false" if startup_fencing.lower() != "false" else "true" ) utils.set_cib_property("startup-fencing", startup_fencing) print("Waiting for nodes canceled")
def quorum_unblock_cmd(lib, argv, modifiers): """ Options: * --force - no error when removing non existing property and no warning about this action """ modifiers.ensure_only_supported("--force") if argv: raise CmdLineInputError() output, retval = utils.run( ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"] ) if retval != 0: utils.err("unable to check quorum status") if output.split("=")[-1].strip() != "1": utils.err("cluster is not waiting for nodes to establish quorum") all_nodes, report_list = get_existing_nodes_names( utils.get_corosync_conf_facade() ) if report_list: process_library_reports(report_list) unjoined_nodes = set(all_nodes) - set(utils.getCorosyncActiveNodes()) if not unjoined_nodes: utils.err("no unjoined nodes found") if not modifiers.get("--force"): answer = utils.get_terminal_input( ( "WARNING: If node(s) {nodes} are not powered off or they do" + " have access to shared resources, data corruption and/or" + " cluster failure may occur. Are you sure you want to" + " continue? [y/N] " ).format(nodes=", ".join(unjoined_nodes)) ) if answer.lower() not in ["y", "yes"]: print("Canceled") return for node in unjoined_nodes: # pass --force so no warning will be displayed stonith.stonith_confirm( lib, [node], parse_args.InputModifiers({"--force": ""}) ) output, retval = utils.run( ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"] ) if retval != 0: utils.err("unable to cancel waiting for nodes") print("Quorum unblocked") startup_fencing = utils.get_set_properties().get("startup-fencing", "") utils.set_cib_property( "startup-fencing", "false" if startup_fencing.lower() != "false" else "true" ) utils.set_cib_property("startup-fencing", startup_fencing) print("Waiting for nodes canceled")
def non_root_run(argv_cmd): """ This function will run commands which has to be run as root for users which are not root. If it required to run such command as root it will do that by sending it to the local pcsd and then it will exit. """ # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ['cluster', 'auth', '...'], ['cluster', 'corosync', '...'], ['cluster', 'destroy', '...'], ['cluster', 'disable', '...'], ['cluster', 'enable', '...'], ['cluster', 'node', '...'], ['cluster', 'pcsd-status', '...'], ['cluster', 'start', '...'], ['cluster', 'stop', '...'], ['cluster', 'sync', '...'], # ['config', 'restore', '...'], # handled in config.config_restore ['host', 'auth', '...'], ['host', 'deauth', '...'], ['pcsd', 'deauth', '...'], ['pcsd', 'sync-certificates'], ["quorum", "device", "status", "..."], ["quorum", "status", "..."], ['status', 'corosync', '...'], ['status', 'quorum', '...'], ['status', 'pcsd', '...'], ] orig_argv = argv_cmd[:] for root_cmd in root_command_list: if ((argv_cmd == root_cmd) or (root_cmd[-1] == "..." and argv_cmd[:len(root_cmd) - 1] == root_cmd[:-1])): # handle interactivity of 'pcs cluster auth' if argv_cmd[0:2] in [["cluster", "auth"], ["host", "auth"]]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input('Username: ') orig_argv.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() orig_argv.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( orig_argv) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode)
def sbd_watchdog_test(lib, argv, modifiers): if len(argv) > 1: raise CmdLineInputError() print("Warning: This operation is expected to force-reboot this system " "without following any shutdown procedures.") if utils.get_terminal_input("Proceed? [no/yes]: ") != "yes": return watchdog = None if len(argv) == 1: watchdog = argv[0] lib.sbd.test_local_watchdog(watchdog)
def quorum_unblock_cmd(argv): if len(argv) > 0: usage.quorum(["unblock"]) sys.exit(1) if utils.is_rhel6(): utils.err("operation is not supported on CMAN clusters") output, retval = utils.run( ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"] ) if retval != 0: utils.err("unable to check quorum status") if output.split("=")[-1].strip() != "1": utils.err("cluster is not waiting for nodes to establish quorum") unjoined_nodes = ( set(utils.getNodesFromCorosyncConf()) - set(utils.getCorosyncActiveNodes()) ) if not unjoined_nodes: utils.err("no unjoined nodes found") if "--force" not in utils.pcs_options: answer = utils.get_terminal_input( ( "WARNING: If node(s) {nodes} are not powered off or they do" + " have access to shared resources, data corruption and/or" + " cluster failure may occur. Are you sure you want to" + " continue? [y/N] " ).format(nodes=", ".join(unjoined_nodes)) ) if answer.lower() not in ["y", "yes"]: print("Canceled") return for node in unjoined_nodes: stonith.stonith_confirm([node], skip_question=True) output, retval = utils.run( ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"] ) if retval != 0: utils.err("unable to cancel waiting for nodes") print("Quorum unblocked") startup_fencing = utils.get_set_properties().get("startup-fencing", "") utils.set_cib_property( "startup-fencing", "false" if startup_fencing.lower() != "false" else "true" ) utils.set_cib_property("startup-fencing", startup_fencing) print("Waiting for nodes canceled")
def sbd_setup_block_device(lib, argv, modifiers): device_list = modifiers["device"] if not device_list: raise CmdLineInputError("No device defined") options = parse_args.prepare_options(argv) if not modifiers["force"]: answer = utils.get_terminal_input( ("WARNING: All current content on device(s) '{device}' will be" + " overwritten. Are you sure you want to continue? [y/N] ").format( device="', '".join(device_list))) if answer.lower() not in ["y", "yes"]: print("Canceled") return lib.sbd.initialize_block_devices(device_list, options)
def sbd_watchdog_test(lib, argv, modifiers): """ Options: no options """ modifiers.ensure_only_supported() if len(argv) > 1: raise CmdLineInputError() print( "Warning: This operation is expected to force-reboot this system " "without following any shutdown procedures." ) if utils.get_terminal_input("Proceed? [no/yes]: ") != "yes": return watchdog = None if len(argv) == 1: watchdog = argv[0] lib.sbd.test_local_watchdog(watchdog)
def stonith_confirm(argv, skip_question=False): if len(argv) != 1: utils.err("must specify one (and only one) node to confirm fenced") node = argv.pop(0) if not skip_question and "--force" not in utils.pcs_options: answer = utils.get_terminal_input( ("WARNING: If node {node} is not powered off or it does" + " have access to shared resources, data corruption and/or" + " cluster failure may occur. Are you sure you want to" + " continue? [y/N] ").format(node=node)) if answer.lower() not in ["y", "yes"]: print("Canceled") return args = ["stonith_admin", "-C", node] output, retval = utils.run(args) if retval != 0: utils.err("unable to confirm fencing of node '%s'\n" % node + output) else: print("Node: %s confirmed fenced" % node)
def stonith_confirm(argv, skip_question=False): if len(argv) != 1: utils.err("must specify one (and only one) node to confirm fenced") node = argv.pop(0) if not skip_question and "--force" not in utils.pcs_options: answer = utils.get_terminal_input( ( "WARNING: If node {node} is not powered off or it does" + " have access to shared resources, data corruption and/or" + " cluster failure may occur. Are you sure you want to" + " continue? [y/N] " ).format(node=node) ) if answer.lower() not in ["y", "yes"]: print("Canceled") return args = ["stonith_admin", "-C", node] output, retval = utils.run(args) if retval != 0: utils.err("unable to confirm fencing of node '%s'\n" % node + output) else: print("Node: %s confirmed fenced" % node)
def _non_root_run(argv_cmd): """ This function will run commands which has to be run as root for users which are not root. If it required to run such command as root it will do that by sending it to the local pcsd and then it will exit. """ # matching the commands both in here and in pcsd expects -o and --options # to be at the end of a command argv_and_options = argv_cmd[:] for option, value in utils.pcs_options.items(): if parse_args.is_option_expecting_value(option): argv_and_options.extend([option, value]) else: argv_and_options.append(option) # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ["cluster", "auth", "..."], ["cluster", "corosync", "..."], ["cluster", "destroy", "..."], ["cluster", "disable", "..."], ["cluster", "enable", "..."], ["cluster", "node", "..."], ["cluster", "pcsd-status", "..."], ["cluster", "start", "..."], ["cluster", "stop", "..."], ["cluster", "sync", "..."], # ['config', 'restore', '...'], # handled in config.config_restore ["host", "auth", "..."], ["host", "deauth", "..."], ["pcsd", "deauth", "..."], ["pcsd", "sync-certificates"], ["quorum", "device", "status", "..."], ["quorum", "status", "..."], ["status"], ["status", "corosync", "..."], ["status", "pcsd", "..."], ["status", "quorum", "..."], ["status", "status", "..."], ] for root_cmd in root_command_list: if (argv_and_options == root_cmd) or ( root_cmd[-1] == "..." and argv_and_options[: len(root_cmd) - 1] == root_cmd[:-1] ): # handle interactivity of 'pcs cluster auth' if argv_and_options[0:2] in [["cluster", "auth"], ["host", "auth"]]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input("Username: "******"-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() argv_and_options.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( argv_and_options ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode)
def main(argv=None): if completion.has_applicable_environment(os.environ): print( completion.make_suggestions( os.environ, usage.generate_completion_tree_from_usage())) sys.exit() argv = argv if argv else sys.argv[1:] utils.subprocess_setup() global filename, usefile orig_argv = argv[:] utils.pcs_options = {} argv = parse_args.upgrade_args(argv) # we want to support optional arguments for --wait, so if an argument # is specified with --wait (ie. --wait=30) then we use them waitsecs = None new_argv = [] for arg in argv: if arg.startswith("--wait="): tempsecs = arg.replace("--wait=", "") if len(tempsecs) > 0: waitsecs = tempsecs arg = "--wait" new_argv.append(arg) argv = new_argv try: pcs_options, dummy_argv = getopt.gnu_getopt( parse_args.filter_out_non_option_negative_numbers(argv), parse_args.PCS_SHORT_OPTIONS, parse_args.PCS_LONG_OPTIONS, ) except getopt.GetoptError as err: print(err) usage.main() sys.exit(1) argv = parse_args.filter_out_options(argv) full = False for option, dummy_value in pcs_options: if option == "--full": full = True break for o, a in pcs_options: if not o in utils.pcs_options: if o in ["--watchdog", "--device"]: a = [a] utils.pcs_options[o] = a else: # If any options are a list then they've been entered twice which isn't valid if o not in ["--watchdog", "--device"]: utils.err("%s can only be used once" % o) else: utils.pcs_options[o].append(a) if o == "-h" or o == "--help": if len(argv) == 0: usage.main() sys.exit() else: argv = [argv[0], "help"] + argv[1:] elif o == "-f": usefile = True filename = a utils.usefile = usefile utils.filename = filename elif o == "--corosync_conf": settings.corosync_conf_file = a elif o == "--cluster_conf": settings.cluster_conf_file = a elif o == "--version": print(settings.pcs_version) if full: print(" ".join( sorted([ feat["id"] for feat in capabilities.get_pcs_capabilities() ]))) sys.exit() elif o == "--fullhelp": usage.full_usage() sys.exit() elif o == "--wait": utils.pcs_options[o] = waitsecs elif o == "--request-timeout": request_timeout_valid = False try: timeout = int(a) if timeout > 0: utils.pcs_options[o] = timeout request_timeout_valid = True except ValueError: pass if not request_timeout_valid: utils.err(("'{0}' is not a valid --request-timeout value, use " "a positive integer").format(a)) if len(argv) == 0: usage.main() sys.exit(1) # create a dummy logger # we do not have a log file for cli (yet), but library requires a logger logger = logging.getLogger("old_cli") logger.propagate = 0 logger.handlers = [] command = argv.pop(0) if (command == "-h" or command == "help"): usage.main() return cmd_map = { "resource": resource.resource_cmd, "cluster": cluster.cluster_cmd, "stonith": stonith.stonith_cmd, "property": prop.property_cmd, "constraint": constraint.constraint_cmd, "acl": lambda argv: acl.acl_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), "status": status.status_cmd, "config": config.config_cmd, "pcsd": pcsd.pcsd_cmd, "node": lambda argv: node.node_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), "quorum": lambda argv: quorum.quorum_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), "qdevice": lambda argv: qdevice.qdevice_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), "alert": lambda args: alert.alert_cmd(utils.get_library_wrapper(), args, utils.get_modificators()), "booth": lambda argv: booth.booth_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), } if command not in cmd_map: usage.main() sys.exit(1) # root can run everything directly, also help can be displayed, # working on a local file also do not need to run under root if (os.getuid() == 0) or (argv and argv[0] == "help") or usefile: cmd_map[command](argv) return # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ['cluster', 'auth', '...'], ['cluster', 'corosync', '...'], ['cluster', 'destroy', '...'], ['cluster', 'disable', '...'], ['cluster', 'enable', '...'], ['cluster', 'node', '...'], ['cluster', 'pcsd-status', '...'], ['cluster', 'setup', '...'], ['cluster', 'start', '...'], ['cluster', 'stop', '...'], ['cluster', 'sync', '...'], # ['config', 'restore', '...'], # handled in config.config_restore ['pcsd', 'sync-certificates'], ['status', 'nodes', 'corosync-id'], ['status', 'nodes', 'pacemaker-id'], ['status', 'pcsd', '...'], ] argv_cmd = argv[:] argv_cmd.insert(0, command) for root_cmd in root_command_list: if ((argv_cmd == root_cmd) or (root_cmd[-1] == "..." and argv_cmd[:len(root_cmd) - 1] == root_cmd[:-1])): # handle interactivity of 'pcs cluster auth' if argv_cmd[0:2] == ["cluster", "auth"]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input('Username: ') orig_argv.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() orig_argv.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( orig_argv, True) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode) return cmd_map[command](argv)
def non_root_run(argv_cmd): """ This function will run commands which has to be run as root for users which are not root. If it required to run such command as root it will do that by sending it to the local pcsd and then it will exit. """ # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ['cluster', 'auth', '...'], ['cluster', 'corosync', '...'], ['cluster', 'destroy', '...'], ['cluster', 'disable', '...'], ['cluster', 'enable', '...'], ['cluster', 'node', '...'], ['cluster', 'pcsd-status', '...'], ['cluster', 'start', '...'], ['cluster', 'stop', '...'], ['cluster', 'sync', '...'], # ['config', 'restore', '...'], # handled in config.config_restore ['host', 'auth', '...'], ['host', 'deauth', '...'], ['pcsd', 'deauth', '...'], ['pcsd', 'sync-certificates'], ["quorum", "device", "status", "..."], ["quorum", "status", "..."], ['status', 'corosync', '...'], ['status', 'quorum', '...'], ['status', 'pcsd', '...'], ] orig_argv = argv_cmd[:] for root_cmd in root_command_list: if ( (argv_cmd == root_cmd) or ( root_cmd[-1] == "..." and argv_cmd[:len(root_cmd)-1] == root_cmd[:-1] ) ): # handle interactivity of 'pcs cluster auth' if argv_cmd[0:2] in [["cluster", "auth"], ["host", "auth"]]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input('Username: ') orig_argv.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() orig_argv.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( orig_argv ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode)
def main(argv=None): if completion.has_applicable_environment(os.environ): print(completion.make_suggestions( os.environ, usage.generate_completion_tree_from_usage() )) sys.exit() argv = argv if argv else sys.argv[1:] utils.subprocess_setup() global filename, usefile orig_argv = argv[:] utils.pcs_options = {} argv = parse_args.upgrade_args(argv) # we want to support optional arguments for --wait, so if an argument # is specified with --wait (ie. --wait=30) then we use them waitsecs = None new_argv = [] for arg in argv: if arg.startswith("--wait="): tempsecs = arg.replace("--wait=","") if len(tempsecs) > 0: waitsecs = tempsecs arg = "--wait" new_argv.append(arg) argv = new_argv try: pcs_options, dummy_argv = getopt.gnu_getopt( parse_args.filter_out_non_option_negative_numbers(argv), parse_args.PCS_SHORT_OPTIONS, parse_args.PCS_LONG_OPTIONS, ) except getopt.GetoptError as err: print(err) usage.main() sys.exit(1) argv = parse_args.filter_out_options(argv) for o, a in pcs_options: if not o in utils.pcs_options: if o == "--watchdog": a = [a] utils.pcs_options[o] = a else: # If any options are a list then they've been entered twice which isn't valid if o != "--watchdog": utils.err("%s can only be used once" % o) else: utils.pcs_options[o].append(a) if o == "-h" or o == "--help": if len(argv) == 0: usage.main() sys.exit() else: argv = [argv[0], "help" ] + argv[1:] elif o == "-f": usefile = True filename = a utils.usefile = usefile utils.filename = filename elif o == "--corosync_conf": settings.corosync_conf_file = a elif o == "--cluster_conf": settings.cluster_conf_file = a elif o == "--version": print(settings.pcs_version) sys.exit() elif o == "--fullhelp": usage.full_usage() sys.exit() elif o == "--wait": utils.pcs_options[o] = waitsecs elif o == "--request-timeout": request_timeout_valid = False try: timeout = int(a) if timeout > 0: utils.pcs_options[o] = timeout request_timeout_valid = True except ValueError: pass if not request_timeout_valid: utils.err( ( "'{0}' is not a valid --request-timeout value, use " "a positive integer" ).format(a) ) if len(argv) == 0: usage.main() sys.exit(1) # create a dummy logger # we do not have a log file for cli (yet), but library requires a logger logger = logging.getLogger("old_cli") logger.propagate = 0 logger.handlers = [] command = argv.pop(0) if (command == "-h" or command == "help"): usage.main() return cmd_map = { "resource": resource.resource_cmd, "cluster": cluster.cluster_cmd, "stonith": stonith.stonith_cmd, "property": prop.property_cmd, "constraint": constraint.constraint_cmd, "acl": lambda argv: acl.acl_cmd( utils.get_library_wrapper(), argv, utils.get_modificators() ), "status": status.status_cmd, "config": config.config_cmd, "pcsd": pcsd.pcsd_cmd, "node": lambda argv: node.node_cmd( utils.get_library_wrapper(), argv, utils.get_modificators() ), "quorum": lambda argv: quorum.quorum_cmd( utils.get_library_wrapper(), argv, utils.get_modificators() ), "qdevice": lambda argv: qdevice.qdevice_cmd( utils.get_library_wrapper(), argv, utils.get_modificators() ), "alert": lambda args: alert.alert_cmd( utils.get_library_wrapper(), args, utils.get_modificators() ), "booth": lambda argv: booth.booth_cmd( utils.get_library_wrapper(), argv, utils.get_modificators() ), } if command not in cmd_map: usage.main() sys.exit(1) # root can run everything directly, also help can be displayed, # working on a local file also do not need to run under root if (os.getuid() == 0) or (argv and argv[0] == "help") or usefile: cmd_map[command](argv) return # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ['cluster', 'auth', '...'], ['cluster', 'corosync', '...'], ['cluster', 'destroy', '...'], ['cluster', 'disable', '...'], ['cluster', 'enable', '...'], ['cluster', 'node', '...'], ['cluster', 'pcsd-status', '...'], ['cluster', 'setup', '...'], ['cluster', 'start', '...'], ['cluster', 'stop', '...'], ['cluster', 'sync', '...'], # ['config', 'restore', '...'], # handled in config.config_restore ['pcsd', 'sync-certificates'], ['status', 'nodes', 'corosync-id'], ['status', 'nodes', 'pacemaker-id'], ['status', 'pcsd', '...'], ] argv_cmd = argv[:] argv_cmd.insert(0, command) for root_cmd in root_command_list: if ( (argv_cmd == root_cmd) or ( root_cmd[-1] == "..." and argv_cmd[:len(root_cmd)-1] == root_cmd[:-1] ) ): # handle interactivity of 'pcs cluster auth' if argv_cmd[0:2] == ["cluster", "auth"]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input('Username: ') orig_argv.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() orig_argv.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( orig_argv, True ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode) return cmd_map[command](argv)
def main(argv=None): if completion.has_applicable_environment(os.environ): print( completion.make_suggestions( os.environ, usage.generate_completion_tree_from_usage())) sys.exit() argv = argv if argv else sys.argv[1:] utils.subprocess_setup() global filename, usefile orig_argv = argv[:] utils.pcs_options = {} modified_argv = [] real_argv = [] try: # we change --cloneopt to "clone" for backwards compatibility new_argv = [] for arg in argv: if arg == "--cloneopt" or arg == "--clone": new_argv.append("clone") elif arg.startswith("--cloneopt="): new_argv.append("clone") new_argv.append(arg.split('=', 1)[1]) else: new_argv.append(arg) argv = new_argv # we want to support optional arguments for --wait, so if an argument # is specified with --wait (ie. --wait=30) then we use them waitsecs = None new_argv = [] for arg in argv: if arg.startswith("--wait="): tempsecs = arg.replace("--wait=", "") if len(tempsecs) > 0: waitsecs = tempsecs arg = "--wait" new_argv.append(arg) argv = new_argv # h = help, f = file, # p = password (cluster auth), u = user (cluster auth), # V = verbose (cluster verify) pcs_short_options = "hf:p:u:V" pcs_long_options = [ "debug", "version", "help", "fullhelp", "force", "skip-offline", "autocorrect", "interactive", "autodelete", "all", "full", "groups", "local", "wait", "config", "start", "enable", "disabled", "off", "pacemaker", "corosync", "no-default-ops", "defaults", "nodesc", "clone", "master", "name=", "group=", "node=", "from=", "to=", "after=", "before=", "transport=", "rrpmode=", "ipv6", "addr0=", "bcast0=", "mcast0=", "mcastport0=", "ttl0=", "broadcast0", "addr1=", "bcast1=", "mcast1=", "mcastport1=", "ttl1=", "broadcast1", "wait_for_all=", "auto_tie_breaker=", "last_man_standing=", "last_man_standing_window=", "token=", "token_coefficient=", "consensus=", "join=", "miss_count_const=", "fail_recv_const=", "corosync_conf=", "cluster_conf=", "booth-conf=", "booth-key=", "remote", "watchdog=", #in pcs status - do not display resorce status on inactive node "hide-inactive", ] # pull out negative number arguments and add them back after getopt prev_arg = "" for arg in argv: if len(arg) > 0 and arg[0] == "-": if arg[1:].isdigit() or arg[1:].startswith("INFINITY"): real_argv.append(arg) else: modified_argv.append(arg) else: # If previous argument required an argument, then this arg # should not be added back in if not prev_arg or ( not (prev_arg[0] == "-" and prev_arg[1:] in pcs_short_options) and not (prev_arg[0:2] == "--" and (prev_arg[2:] + "=") in pcs_long_options)): real_argv.append(arg) modified_argv.append(arg) prev_arg = arg pcs_options, argv = getopt.gnu_getopt(modified_argv, pcs_short_options, pcs_long_options) except getopt.GetoptError as err: print(err) usage.main() sys.exit(1) argv = real_argv for o, a in pcs_options: if not o in utils.pcs_options: if o == "--watchdog": a = [a] utils.pcs_options[o] = a else: # If any options are a list then they've been entered twice which isn't valid if o != "--watchdog": utils.err("%s can only be used once" % o) else: utils.pcs_options[o].append(a) if o == "-h" or o == "--help": if len(argv) == 0: usage.main() sys.exit() else: argv = [argv[0], "help"] + argv[1:] elif o == "-f": usefile = True filename = a utils.usefile = usefile utils.filename = filename elif o == "--corosync_conf": settings.corosync_conf_file = a elif o == "--cluster_conf": settings.cluster_conf_file = a elif o == "--version": print(settings.pcs_version) sys.exit() elif o == "--fullhelp": usage.full_usage() sys.exit() elif o == "--wait": utils.pcs_options[o] = waitsecs if len(argv) == 0: usage.main() sys.exit(1) # create a dummy logger # we do not have a log file for cli (yet), but library requires a logger logger = logging.getLogger("old_cli") logger.propagate = 0 logger.handlers = [] command = argv.pop(0) if (command == "-h" or command == "help"): usage.main() return cmd_map = { "resource": resource.resource_cmd, "cluster": cluster.cluster_cmd, "stonith": stonith.stonith_cmd, "property": prop.property_cmd, "constraint": constraint.constraint_cmd, "acl": lambda argv: acl.acl_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), "status": status.status_cmd, "config": config.config_cmd, "pcsd": pcsd.pcsd_cmd, "node": node.node_cmd, "quorum": lambda argv: quorum.quorum_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), "qdevice": lambda argv: qdevice.qdevice_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), "alert": lambda args: alert.alert_cmd(utils.get_library_wrapper(), args, utils.get_modificators()), "booth": lambda argv: booth.booth_cmd(utils.get_library_wrapper(), argv, utils.get_modificators()), } if command not in cmd_map: usage.main() sys.exit(1) # root can run everything directly, also help can be displayed, # working on a local file also do not need to run under root if (os.getuid() == 0) or (argv and argv[0] == "help") or usefile: cmd_map[command](argv) return # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ['cluster', 'auth', '...'], ['cluster', 'corosync', '...'], ['cluster', 'destroy', '...'], ['cluster', 'disable', '...'], ['cluster', 'enable', '...'], ['cluster', 'node', '...'], ['cluster', 'pcsd-status', '...'], ['cluster', 'setup', '...'], ['cluster', 'start', '...'], ['cluster', 'stop', '...'], ['cluster', 'sync', '...'], # ['config', 'restore', '...'], # handled in config.config_restore ['pcsd', 'sync-certificates'], ['status', 'nodes', 'corosync-id'], ['status', 'nodes', 'pacemaker-id'], ['status', 'pcsd', '...'], ] argv_cmd = argv[:] argv_cmd.insert(0, command) for root_cmd in root_command_list: if ((argv_cmd == root_cmd) or (root_cmd[-1] == "..." and argv_cmd[:len(root_cmd) - 1] == root_cmd[:-1])): # handle interactivity of 'pcs cluster auth' if argv_cmd[0:2] == ["cluster", "auth"]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input('Username: ') orig_argv.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() orig_argv.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( orig_argv, True) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode) return cmd_map[command](argv)
def main(argv=None): if completion.has_applicable_environment(os.environ): print(completion.make_suggestions( os.environ, usage.generate_completion_tree_from_usage() )) sys.exit() argv = argv if argv else sys.argv[1:] utils.subprocess_setup() global filename, usefile orig_argv = argv[:] utils.pcs_options = {} modified_argv = [] real_argv = [] try: # we change --cloneopt to "clone" for backwards compatibility new_argv = [] for arg in argv: if arg == "--cloneopt" or arg == "--clone": new_argv.append("clone") elif arg.startswith("--cloneopt="): new_argv.append("clone") new_argv.append(arg.split('=',1)[1]) else: new_argv.append(arg) argv = new_argv # we want to support optional arguments for --wait, so if an argument # is specified with --wait (ie. --wait=30) then we use them waitsecs = None new_argv = [] for arg in argv: if arg.startswith("--wait="): tempsecs = arg.replace("--wait=","") if len(tempsecs) > 0: waitsecs = tempsecs arg = "--wait" new_argv.append(arg) argv = new_argv # h = help, f = file, # p = password (cluster auth), u = user (cluster auth), # V = verbose (cluster verify) pcs_short_options = "hf:p:u:V" pcs_long_options = [ "debug", "version", "help", "fullhelp", "force", "skip-offline", "autocorrect", "interactive", "autodelete", "all", "full", "groups", "local", "wait", "config", "start", "enable", "disabled", "off", "pacemaker", "corosync", "no-default-ops", "defaults", "nodesc", "clone", "master", "name=", "group=", "node=", "from=", "to=", "after=", "before=", "transport=", "rrpmode=", "ipv6", "addr0=", "bcast0=", "mcast0=", "mcastport0=", "ttl0=", "broadcast0", "addr1=", "bcast1=", "mcast1=", "mcastport1=", "ttl1=", "broadcast1", "wait_for_all=", "auto_tie_breaker=", "last_man_standing=", "last_man_standing_window=", "token=", "token_coefficient=", "consensus=", "join=", "miss_count_const=", "fail_recv_const=", "corosync_conf=", "cluster_conf=", "remote", "watchdog=", #in pcs status - do not display resorce status on inactive node "hide-inactive", ] # pull out negative number arguments and add them back after getopt prev_arg = "" for arg in argv: if len(arg) > 0 and arg[0] == "-": if arg[1:].isdigit() or arg[1:].startswith("INFINITY"): real_argv.append(arg) else: modified_argv.append(arg) else: # If previous argument required an argument, then this arg # should not be added back in if not prev_arg or (not (prev_arg[0] == "-" and prev_arg[1:] in pcs_short_options) and not (prev_arg[0:2] == "--" and (prev_arg[2:] + "=") in pcs_long_options)): real_argv.append(arg) modified_argv.append(arg) prev_arg = arg pcs_options, argv = getopt.gnu_getopt(modified_argv, pcs_short_options, pcs_long_options) except getopt.GetoptError as err: print(err) usage.main() sys.exit(1) argv = real_argv for o, a in pcs_options: if not o in utils.pcs_options: if o == "--watchdog": a = [a] utils.pcs_options[o] = a else: # If any options are a list then they've been entered twice which isn't valid if o != "--watchdog": utils.err("%s can only be used once" % o) else: utils.pcs_options[o].append(a) if o == "-h" or o == "--help": if len(argv) == 0: usage.main() sys.exit() else: argv = [argv[0], "help" ] + argv[1:] elif o == "-f": usefile = True filename = a utils.usefile = usefile utils.filename = filename elif o == "--corosync_conf": settings.corosync_conf_file = a elif o == "--cluster_conf": settings.cluster_conf_file = a elif o == "--version": print(settings.pcs_version) sys.exit() elif o == "--fullhelp": usage.full_usage() sys.exit() elif o == "--wait": utils.pcs_options[o] = waitsecs if len(argv) == 0: usage.main() sys.exit(1) # create a dummy logger # we do not have a log file for cli (yet), but library requires a logger logger = logging.getLogger("old_cli") logger.propagate = 0 logger.handlers = [] command = argv.pop(0) if (command == "-h" or command == "help"): usage.main() return cmd_map = { "resource": resource.resource_cmd, "cluster": cluster.cluster_cmd, "stonith": stonith.stonith_cmd, "property": prop.property_cmd, "constraint": constraint.constraint_cmd, "acl": acl.acl_cmd, "status": status.status_cmd, "config": config.config_cmd, "pcsd": pcsd.pcsd_cmd, "node": node.node_cmd, "quorum": lambda argv: quorum.quorum_cmd( utils.get_library_wrapper(), argv, utils.get_modificators() ), "qdevice": lambda argv: qdevice.qdevice_cmd( utils.get_library_wrapper(), argv, utils.get_modificators() ), "alert": lambda args: alert.alert_cmd( utils.get_library_wrapper(), args, utils.get_modificators() ), } if command not in cmd_map: usage.main() sys.exit(1) # root can run everything directly, also help can be displayed, # working on a local file also do not need to run under root if (os.getuid() == 0) or (argv and argv[0] == "help") or usefile: cmd_map[command](argv) return # specific commands need to be run under root account, pass them to pcsd # don't forget to allow each command in pcsd.rb in "post /run_pcs do" root_command_list = [ ['cluster', 'auth', '...'], ['cluster', 'corosync', '...'], ['cluster', 'destroy', '...'], ['cluster', 'disable', '...'], ['cluster', 'enable', '...'], ['cluster', 'node', '...'], ['cluster', 'pcsd-status', '...'], ['cluster', 'setup', '...'], ['cluster', 'start', '...'], ['cluster', 'stop', '...'], ['cluster', 'sync', '...'], # ['config', 'restore', '...'], # handled in config.config_restore ['pcsd', 'sync-certificates'], ['status', 'nodes', 'corosync-id'], ['status', 'nodes', 'pacemaker-id'], ['status', 'pcsd', '...'], ] argv_cmd = argv[:] argv_cmd.insert(0, command) for root_cmd in root_command_list: if ( (argv_cmd == root_cmd) or ( root_cmd[-1] == "..." and argv_cmd[:len(root_cmd)-1] == root_cmd[:-1] ) ): # handle interactivity of 'pcs cluster auth' if argv_cmd[0:2] == ["cluster", "auth"]: if "-u" not in utils.pcs_options: username = utils.get_terminal_input('Username: ') orig_argv.extend(["-u", username]) if "-p" not in utils.pcs_options: password = utils.get_terminal_password() orig_argv.extend(["-p", password]) # call the local pcsd err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd( orig_argv, True ) if err_msgs: for msg in err_msgs: utils.err(msg, False) sys.exit(1) if std_out.strip(): print(std_out) if std_err.strip(): sys.stderr.write(std_err) sys.exit(exitcode) return cmd_map[command](argv)