def cluster_localnode(argv): if len(argv) != 2: usage.cluster() exit(1) elif argv[0] == "add": node = argv[1] if not utils.is_rhel6(): success = utils.addNodeToCorosync(node) else: success = utils.addNodeToClusterConf(node) if success: print "%s: successfully added!" % node else: utils.err("unable to add %s" % node) elif argv[0] in ["remove","delete"]: node = argv[1] if not utils.is_rhel6(): success = utils.removeNodeFromCorosync(node) else: success = utils.removeNodeFromClusterConf(node) if success: print "%s: successfully removed!" % node else: utils.err("unable to remove %s" % node) else: usage.cluster() exit(1)
def node_standby(argv,standby=True): if len(argv) == 0 and "--all" not in utils.pcs_options: if standby: usage.cluster(["standby"]) else: usage.cluster(["unstandby"]) sys.exit(1) nodes = utils.getNodesFromPacemaker() if "--all" not in utils.pcs_options: nodeFound = False for node in nodes: if node == argv[0]: nodeFound = True break if not nodeFound: utils.err("node '%s' does not appear to exist in configuration" % argv[0]) if standby: utils.run(["crm_standby", "-v", "on", "-N", node]) else: utils.run(["crm_standby", "-D", "-N", node]) else: for node in nodes: if standby: utils.run(["crm_standby", "-v", "on", "-N", node]) else: utils.run(["crm_standby", "-D", "-N", node])
def cluster_get_corosync_conf(argv): if len(argv) != 1: usage.cluster() exit(1) node = argv[0] print utils.getCorosyncConfig(node)
def node_standby(argv, standby=True): if len(argv) == 0 and "--all" not in utils.pcs_options: if standby: usage.cluster(["standby"]) else: usage.cluster(["unstandby"]) sys.exit(1) nodes = utils.getNodesFromCorosyncConf() if "--all" not in utils.pcs_options: nodeFound = False for node in nodes: if node == argv[0]: nodeFound = True break if not nodeFound: utils.err("node '%s' does not appear to exist in configuration" % argv[0]) if standby: utils.run(["crm_standby", "-v", "on", "-N", node]) else: utils.run(["crm_standby", "-D", "-N", node]) else: for node in nodes: if standby: utils.run(["crm_standby", "-v", "on", "-N", node]) else: utils.run(["crm_standby", "-D", "-N", node])
def cluster_get_corosync_conf(argv): if len(argv) != 1: usage.cluster() exit(1) node = argv[0] retval, output = utils.getCorosyncConfig(node) print output
def corosync_setup(argv,returnConfig=False): fedora_config = not utils.is_rhel6() if len(argv) < 2: usage.cluster() exit(1) if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options and fedora_config: sync_start(argv) return elif not returnConfig and not "--local" in utils.pcs_options and fedora_config: sync(argv) return else: nodes = argv[1:] cluster_name = argv[0] # Verify that all nodes are resolvable otherwise problems may occur for node in nodes: try: socket.gethostbyname(node) except socket.error: print "Warning: Unable to resolve hostname: %s" % node if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') corosync_config = f.read() f.close() i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i+1 corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name",cluster_name) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config) else: output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--createcluster", cluster_name]) if retval != 0: print output print "Error creating cluster:", cluster_name sys.exit(1) for node in nodes: output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--addnode", node]) if retval != 0: print output print "Error adding node:", node sys.exit(1) if "--start" in utils.pcs_options: start_cluster([])
def cluster_node(argv): if len(argv) != 2: usage.cluster() sys.exit(1) if argv[0] == "add": add_node = True elif argv[0] == "remove": add_node = False else: usage.cluster() sys.exit(1) node = argv[1] status, output = utils.checkStatus(node) if status == 2: print "Error: pcsd is not running on %s" % node sys.exit(1) elif status == 3: print "Error: %s is not yet authenticated (try pcs cluster auth %s)" % ( node, node) sys.exit(1) if add_node == True: corosync_conf = None for my_node in utils.getNodesFromCorosyncConf(): retval, output = utils.addLocalNode(my_node, node) if retval != 0: print "Error: unable to add %s on %s - %s" % (node, my_node, output.strip()) else: print "%s: Corosync updated" % my_node corosync_conf = output if corosync_conf != None: utils.setCorosyncConfig(node, corosync_conf) utils.startCluster(node) else: print "Error: Unable to update any nodes" sys.exit(1) else: nodesRemoved = False output, retval = utils.run(["crm_node", "--force", "-R", node]) for my_node in utils.getNodesFromCorosyncConf(): retval, output = utils.removeLocalNode(my_node, node) if retval != 0: print "Error: unable to remove %s on %s - %s" % ( node, my_node, output.strip()) else: if output[0] == 0: print "%s: Corosync updated" % my_node nodesRemoved = True else: print "%s: Error executing command occured: %s" % ( my_node, "".join(output[1])) if nodesRemoved == False: print "Error: Unable to update any nodes" sys.exit(1)
def cluster_reload(argv): if len(argv) != 1 or argv[0] != "corosync": usage.cluster(["reload"]) exit(1) output, retval = utils.reloadCorosync() if retval != 0 or "invalid option" in output: utils.err(output.rstrip()) print "Corosync reloaded"
def node_standby(argv,standby=True): if len(argv) == 0: usage.cluster() sys.exit(1) if standby: utils.run(["crm_standby", "-v", "on", "-N", argv[0]]) else: utils.run(["crm_standby", "-D", "-N", argv[0]])
def node_standby(argv, standby=True): if len(argv) == 0: usage.cluster() sys.exit(1) if standby: utils.run(["crm_standby", "-v", "on", "-N", argv[0]]) else: utils.run(["crm_standby", "-D", "-N", argv[0]])
def cluster_push(argv): if len(argv) == 1: filename = argv[0] else: usage.cluster() sys.exit(1) output, retval = utils.run(["cibadmin", "--replace", "--xml-file", filename]) if retval != 0: utils.err("unable to push cib\n" + output) else: print "CIB updated"
def cluster_node(argv): if len(argv) != 2: usage.cluster(); sys.exit(1) if argv[0] == "add": add_node = True elif argv[0] == "remove": add_node = False else: usage.cluster(); sys.exit(1) node = argv[1] status,output = utils.checkStatus(node) if status == 2: print "Error: pcsd is not running on %s" % node sys.exit(1) elif status == 3: print "Error: %s is not yet authenticated (try pcs cluster auth %s)" % (node, node) sys.exit(1) if add_node == True: corosync_conf = None for my_node in utils.getNodesFromCorosyncConf(): retval, output = utils.addLocalNode(my_node,node) if retval != 0: print "Error: unable to add %s on %s - %s" % (node,my_node,output.strip()) else: print "%s: Corosync updated" % my_node corosync_conf = output if corosync_conf != None: utils.setCorosyncConfig(node, corosync_conf) utils.startCluster(node) else: print "Error: Unable to update any nodes" sys.exit(1) else: nodesRemoved = False output, retval = utils.run(["crm_node", "--force","-R", node]) for my_node in utils.getNodesFromCorosyncConf(): retval, output = utils.removeLocalNode(my_node,node) if retval != 0: print "Error: unable to remove %s on %s - %s" % (node,my_node,output.strip()) else: if output[0] == 0: print "%s: Corosync updated" % my_node nodesRemoved = True else: print "%s: Error executing command occured: %s" % (my_node, "".join(output[1])) if nodesRemoved == False: print "Error: Unable to update any nodes" sys.exit(1)
def cluster_uidgid(argv, silent_list = False): if utils.is_rhel6(): cluster_uidgid_rhel6(argv, silent_list) return if len(argv) == 0: found = False uid_gid_files = os.listdir(settings.corosync_uidgid_dir) for ug_file in uid_gid_files: uid_gid_dict = utils.read_uid_gid_file(ug_file) if "uid" in uid_gid_dict or "gid" in uid_gid_dict: line = "UID/GID: uid=" if "uid" in uid_gid_dict: line += uid_gid_dict["uid"] line += " gid=" if "gid" in uid_gid_dict: line += uid_gid_dict["gid"] print line found = True if not found and not silent_list: print "No uidgids configured in cluster.conf" return command = argv.pop(0) uid="" gid="" if (command == "add" or command == "rm") and len(argv) > 0: for arg in argv: if arg.find('=') == -1: utils.err("uidgid options must be of the form uid=<uid> gid=<gid>") (k,v) = arg.split('=',1) if k != "uid" and k != "gid": utils.err("%s is not a valid key, you must use uid or gid" %k) if k == "uid": uid = v if k == "gid": gid = v if uid == "" and gid == "": utils.err("you must set either uid or gid") if command == "add": utils.write_uid_gid_file(uid,gid) elif command == "rm": retval = utils.remove_uid_gid_file(uid,gid) if retval == False: utils.err("no uidgid files with uid=%s and gid=%s found" % (uid,gid)) else: usage.cluster(["uidgid"]) exit(1)
def cluster_pacemaker(argv): if len(argv) < 2 or argv[0] != "remove": usage.cluster(["pacemaker"]) sys.exit(1) argv.pop(0) node = argv.pop(0) output, retval = utils.run(["crm_node", "--force","-R", node]) if retval != 0 and output != "": print output utils.err("unable to remove node from pacemaker")
def cluster_cib_rollback(argv): if len(argv) != 1: usage.cluster(["cib-rollback"]) sys.exit(1) cib_path = os.path.join(settings.cib_dir, argv[0]) try: snapshot_dom = parse(cib_path) except Exception as e: utils.err("unable to read CIB from '%s': %s" % (cib_path, e)) utils.replace_cib_configuration(snapshot_dom)
def cluster_uidgid_rhel6(argv, silent_list = False): if not os.path.isfile("/etc/cluster/cluster.conf"): utils.err("the /etc/cluster/cluster.conf file doesn't exist on this machine, create a cluster before running this command") if len(argv) == 0: found = False output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--lsmisc"]) if retval != 0: utils.err("error running ccs\n" + output) lines = output.split('\n') for line in lines: if line.startswith('UID/GID: '): print line found = True if not found and not silent_list: print "No uidgids configured in cluster.conf" return command = argv.pop(0) uid="" gid="" if (command == "add" or command == "rm") and len(argv) > 0: for arg in argv: if arg.find('=') == -1: utils.err("uidgid options must be of the form uid=<uid> gid=<gid>") (k,v) = arg.split('=',1) if k != "uid" and k != "gid": utils.err("%s is not a valid key, you must use uid or gid" %k) if k == "uid": uid = v if k == "gid": gid = v if uid == "" and gid == "": utils.err("you must set either uid or gid") if command == "add": output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--setuidgid", "uid="+uid, "gid="+gid]) if retval != 0: utils.err("unable to add uidgid\n" + output.rstrip()) elif command == "rm": output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--rmuidgid", "uid="+uid, "gid="+gid]) if retval != 0: utils.err("unable to remove uidgid\n" + output.rstrip()) # If we make a change, we sync out the changes to all nodes unless we're using -f if not utils.usefile: sync_nodes(utils.getNodesFromCorosyncConf(), utils.getCorosyncConf()) else: usage.cluster(["uidgid"]) exit(1)
def cluster_remote_node(argv): if len(argv) < 1: usage.cluster(["remote-node"]) sys.exit(1) command = argv.pop(0) if command == "add": if len(argv) < 2: usage.cluster(["remote-node"]) sys.exit(1) hostname = argv.pop(0) rsc = argv.pop(0) if not utils.is_resource(rsc): utils.err("unable to find resource '%s'", rsc) resource.resource_update(rsc, ["meta", "remote-node="+hostname] + argv) elif command in ["remove","delete"]: if len(argv) < 1: usage.cluster(["remote-node"]) sys.exit(1) hostname = argv.pop(0) dom = utils.get_cib_dom() nvpairs = dom.getElementsByTagName("nvpair") nvpairs_to_remove = [] for nvpair in nvpairs: if nvpair.getAttribute("name") == "remote-node" and nvpair.getAttribute("value") == hostname: for np in nvpair.parentNode.getElementsByTagName("nvpair"): if np.getAttribute("name").startswith("remote-"): nvpairs_to_remove.append(np) for nvpair in nvpairs_to_remove[:]: nvpair.parentNode.removeChild(nvpair) utils.replace_cib_configuration(dom) else: usage.cluster(["remote-node"]) sys.exit(1)
def cluster_remote_node(argv): if len(argv) < 1: usage.cluster(["remote-node"]) sys.exit(1) command = argv.pop(0) if command == "add": if len(argv) < 2: usage.cluster(["remote-node"]) sys.exit(1) hostname = argv.pop(0) rsc = argv.pop(0) if not utils.is_resource(rsc): utils.err("unable to find resource '%s'", rsc) resource.resource_update(rsc, ["meta", "remote-node="+hostname] + argv) elif command == "remove": if len(argv) < 1: usage.cluster(["remote-node"]) sys.exit(1) hostname = argv.pop(0) dom = utils.get_cib_dom() nvpairs = dom.getElementsByTagName("nvpair") nvpairs_to_remove = [] for nvpair in nvpairs: if nvpair.getAttribute("name") == "remote-node" and nvpair.getAttribute("value") == hostname: for np in nvpair.parentNode.getElementsByTagName("nvpair"): if np.getAttribute("name").startswith("remote-"): nvpairs_to_remove.append(np) for nvpair in nvpairs_to_remove[:]: nvpair.parentNode.removeChild(nvpair) utils.replace_cib_configuration(dom) else: usage.cluster(["remote-node"]) sys.exit(1)
def corosync_configure(argv,returnConfig=False): fedora_config = True if len(argv) == 0: usage.cluster() exit(1) elif argv[0] == "sync" and len(argv) > 2: sync(argv[1:]) return elif argv[0] == "sync_start" and len(argv) > 2: sync_start(argv[1:]) return elif len(argv) > 1: nodes = argv[1:] cluster_name = argv[0] else: usage.cluster() exit(1) if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') else: f = open(COROSYNC_CONFIG_TEMPLATE, 'r') corosync_config = f.read() f.close() if fedora_config == True: i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i+1 corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name",cluster_name) if returnConfig: return corosync_config try: f = open(COROSYNC_CONFIG_FILE,'w') f.write(corosync_config) f.close() except IOError: print "ERROR: Unable to write corosync configuration file, try running as root." exit(1)
def cluster_report(argv): if len(argv) != 1: usage.cluster(["report"]) sys.exit(1) outfile = argv[0] dest_outfile = outfile + ".tar.bz2" if os.path.exists(dest_outfile): if "--force" not in utils.pcs_options: utils.err(dest_outfile + " already exists, use --force to overwrite") else: try: os.remove(dest_outfile) except OSError, e: utils.err("Unable to remove " + dest_outfile + ": " + e.strerror)
def cluster_get_corosync_conf(argv): if utils.is_rhel6(): utils.err("corosync.conf is not supported on RHEL6") if len(argv) > 1: usage.cluster() exit(1) if len(argv) == 0: print utils.getCorosyncConf() return node = argv[0] retval, output = utils.getCorosyncConfig(node) if retval != 0: utils.err(output) else: print output
def corosync_setup(argv, returnConfig=False): fedora_config = True if len(argv) < 2: usage.cluster() exit(1) if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options: sync_start(argv) return elif not returnConfig and not "--local" in utils.pcs_options: sync(argv) return else: nodes = argv[1:] cluster_name = argv[0] if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') else: f = open(COROSYNC_CONFIG_TEMPLATE, 'r') corosync_config = f.read() f.close() if fedora_config == True: i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i + 1 corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name", cluster_name) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config) if "--start" in utils.pcs_options: start_cluster([])
def corosync_configure(argv,returnConfig=False): fedora_config = True if len(argv) == 0: usage.cluster() exit(1) elif argv[0] == "sync" and len(argv) > 2: sync(argv[1:]) return elif argv[0] == "sync_start" and len(argv) > 2: sync_start(argv[1:]) return elif len(argv) > 1: nodes = argv[1:] cluster_name = argv[0] else: usage.cluster() exit(1) if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') else: f = open(COROSYNC_CONFIG_TEMPLATE, 'r') corosync_config = f.read() f.close() if fedora_config == True: i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i+1 corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name",cluster_name) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config)
def corosync_setup(argv,returnConfig=False): fedora_config = True if len(argv) < 2: usage.cluster() exit(1) if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options: sync_start(argv) return elif not returnConfig and not "--local" in utils.pcs_options: sync(argv) return else: nodes = argv[1:] cluster_name = argv[0] if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') else: f = open(COROSYNC_CONFIG_TEMPLATE, 'r') corosync_config = f.read() f.close() if fedora_config == True: i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i+1 corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name",cluster_name) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config) if "--start" in utils.pcs_options: start_cluster([])
def cluster_localnode(argv): if len(argv) != 2: usage.cluster() exit(1) elif argv[0] == "add": node = argv[1] success = utils.addNodeToCorosync(node) if success: print "%s: successfully added!" % node else: utils.err("unable to add %s" % node) elif argv[0] == "remove": node = argv[1] success = utils.removeNodeFromCorosync(node) if success: print "%s: successfully removed!" % node else: utils.err("unable to remove %s" % node) else: usage.cluster() exit(1)
def cluster_cmd(argv): if len(argv) == 0: usage.cluster() exit(1) sub_cmd = argv.pop(0) if (sub_cmd == "help"): usage.cluster() elif (sub_cmd == "configure"): corosync_configure(argv) elif (sub_cmd == "sync"): sync_nodes(utils.getNodesFromCorosyncConf(),utils.getCorosyncConf()) elif (sub_cmd == "gui-status"): cluster_gui_status(argv) elif (sub_cmd == "auth"): cluster_auth(argv) elif (sub_cmd == "token"): cluster_token(argv) elif (sub_cmd == "start"): start_cluster(argv) elif (sub_cmd == "stop"): stop_cluster(argv) elif (sub_cmd == "startall"): start_cluster_all() elif (sub_cmd == "stopall"): stop_cluster_all() else: usage.cluster()
def cluster_certkey(argv): if len(argv) != 2: usage.cluster(["certkey"]) exit(1) certfile = argv[0] keyfile = argv[1] try: with open(certfile, 'r') as myfile: cert = myfile.read() except IOError as e: utils.err(e) try: with open(keyfile, 'r') as myfile: key = myfile.read() except IOError as e: utils.err(e) if not "--force" in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)): utils.err("certificate and/or key already exists, your must use --force to overwrite") try: try: os.chmod(settings.pcsd_cert_location, 0700) except OSError: # If the file doesn't exist, we don't care pass try: os.chmod(settings.pcsd_key_location, 0700) except OSError: # If the file doesn't exist, we don't care pass with os.fdopen(os.open(settings.pcsd_cert_location, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0700), 'wb') as myfile: myfile.write(cert) with os.fdopen(os.open(settings.pcsd_key_location, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0700), 'wb') as myfile: myfile.write(key)
def cluster_cmd(argv): if len(argv) == 0: usage.cluster() exit(1) sub_cmd = argv.pop(0) if (sub_cmd == "help"): usage.cluster() elif (sub_cmd == "setup"): corosync_setup(argv) elif (sub_cmd == "sync"): sync_nodes(utils.getNodesFromCorosyncConf(), utils.getCorosyncConf()) elif (sub_cmd == "status"): status.cluster_status(argv) elif (sub_cmd == "pcsd-status"): cluster_gui_status(argv) elif (sub_cmd == "auth"): cluster_auth(argv) elif (sub_cmd == "token"): cluster_token(argv) elif (sub_cmd == "start"): if "--all" in utils.pcs_options: start_cluster_all() else: start_cluster(argv) elif (sub_cmd == "stop"): if "--all" in utils.pcs_options: stop_cluster_all() else: stop_cluster(argv) elif (sub_cmd == "force_stop"): force_stop_cluster(argv) elif (sub_cmd == "standby"): node_standby(argv) elif (sub_cmd == "unstandby"): node_standby(argv, False) elif (sub_cmd == "enable"): if "--all" in utils.pcs_options: enable_cluster_all() else: enable_cluster(argv) elif (sub_cmd == "disable"): if "--all" in utils.pcs_options: disable_cluster_all() else: disable_cluster(argv) elif (sub_cmd == "cib"): get_cib(argv) elif (sub_cmd == "push"): cluster_push(argv) elif (sub_cmd == "node"): cluster_node(argv) elif (sub_cmd == "localnode"): cluster_localnode(argv) elif (sub_cmd == "corosync"): cluster_get_corosync_conf(argv) else: usage.cluster() sys.exit(1)
def cluster_cmd(argv): if len(argv) == 0: usage.cluster() exit(1) sub_cmd = argv.pop(0) if (sub_cmd == "help"): usage.cluster() elif (sub_cmd == "setup"): corosync_setup(argv) elif (sub_cmd == "sync"): sync_nodes(utils.getNodesFromCorosyncConf(),utils.getCorosyncConf()) elif (sub_cmd == "status"): status.cluster_status(argv) elif (sub_cmd == "pcsd-status"): cluster_gui_status(argv) elif (sub_cmd == "auth"): cluster_auth(argv) elif (sub_cmd == "token"): cluster_token(argv) elif (sub_cmd == "start"): if "--all" in utils.pcs_options: start_cluster_all() else: start_cluster(argv) elif (sub_cmd == "stop"): if "--all" in utils.pcs_options: stop_cluster_all() else: stop_cluster(argv) elif (sub_cmd == "force_stop"): force_stop_cluster(argv) elif (sub_cmd == "standby"): node_standby(argv) elif (sub_cmd == "unstandby"): node_standby(argv, False) elif (sub_cmd == "enable"): if "--all" in utils.pcs_options: enable_cluster_all() else: enable_cluster(argv) elif (sub_cmd == "disable"): if "--all" in utils.pcs_options: disable_cluster_all() else: disable_cluster(argv) elif (sub_cmd == "cib"): get_cib(argv) elif (sub_cmd == "push"): cluster_push(argv) elif (sub_cmd == "node"): cluster_node(argv) elif (sub_cmd == "localnode"): cluster_localnode(argv) elif (sub_cmd == "corosync"): cluster_get_corosync_conf(argv) else: usage.cluster() sys.exit(1)
def cluster_verify(argv): nofilename = True if len(argv) == 1: filename = argv.pop(0) nofilename = False elif len(argv) > 1: usage.cluster("verify") options = [] if "-V" in utils.pcs_options: options.append("-V") if nofilename: options.append("--live-check") else: options.append("--xml-file") options.append(filename) output, retval = utils.run([settings.crm_verify] + options) if output != "": print output stonith.stonith_level_verify() return retval
def cluster_cmd(argv): if len(argv) == 0: usage.cluster() exit(1) sub_cmd = argv.pop(0) if (sub_cmd == "help"): usage.cluster() elif (sub_cmd == "configure"): corosync_configure(argv) elif (sub_cmd == "sync"): sync_nodes(utils.getNodesFromCorosyncConf(),utils.getCorosyncConf()) elif (sub_cmd == "gui-status"): cluster_gui_status(argv) elif (sub_cmd == "auth"): cluster_auth(argv) elif (sub_cmd == "token"): cluster_token(argv) elif (sub_cmd == "start"): start_cluster(argv) elif (sub_cmd == "stop"): stop_cluster(argv) elif (sub_cmd == "enable"): enable_cluster() elif (sub_cmd == "disable"): disable_cluster() elif (sub_cmd == "startall"): start_cluster_all() elif (sub_cmd == "stopall"): stop_cluster_all() elif (sub_cmd == "enableall"): enable_cluster_all() elif (sub_cmd == "disableall"): disable_cluster_all() elif (sub_cmd == "cib"): get_cib() elif (sub_cmd == "push"): cluster_push(argv) elif (sub_cmd == "node"): cluster_node(argv) elif (sub_cmd == "localnode"): cluster_localnode(argv) elif (sub_cmd == "get_conf"): cluster_get_corosync_conf(argv) else: usage.cluster()
def corosync_setup(argv,returnConfig=False): fedora_config = not utils.is_rhel6() failure = False primary_nodes = [] # If node contains a ',' we only care about the first address for node in argv[1:]: if "," in node: primary_nodes.append(node.split(',')[0]) else: primary_nodes.append(node) if len(argv) < 2: usage.cluster() exit(1) if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options and fedora_config: sync_start(argv, primary_nodes) if "--enable" in utils.pcs_options: enable_cluster(primary_nodes) return elif not returnConfig and not "--local" in utils.pcs_options and fedora_config: sync(argv, primary_nodes) if "--enable" in utils.pcs_options: enable_cluster(primary_nodes) return else: nodes = argv[1:] cluster_name = argv[0] # Verify that all nodes are resolvable otherwise problems may occur udpu_rrp = False for node in nodes: try: if "," in node: socket.getaddrinfo(node.split(",")[0],None) socket.getaddrinfo(node.split(",")[1],None) udpu_rrp = True else: socket.getaddrinfo(node,None) except socket.error: print "Warning: Unable to resolve hostname: %s" % node failure = True if udpu_rrp: for node in nodes: if "," not in node: utils.err("if one node is configured for RRP, all nodes must configured for RRP") if failure and "--force" not in utils.pcs_options: utils.err("Unable to resolve all hostnames (use --force to override).") if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') corosync_config = f.read() f.close() i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" if udpu_rrp: new_nodes_section += " ring0_addr: %s\n" % (node.split(",")[0]) new_nodes_section += " ring1_addr: %s\n" % (node.split(",")[1]) else: new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i+1 two_node_section = "" if len(nodes) == 2: two_node_section = "two_node: 1" quorum_options = "" if "--wait_for_all" in utils.pcs_options: quorum_options += "wait_for_all: " + utils.pcs_options["--wait_for_all"] + "\n" if "--auto_tie_breaker" in utils.pcs_options: quorum_options += "auto_tie_breaker: " + utils.pcs_options["--auto_tie_breaker"] + "\n" if "--last_node_standing" in utils.pcs_options: quorum_options += "last_node_standing: " + utils.pcs_options["--last_node_standing"] + "\n" if "--last_node_standing_window" in utils.pcs_options: quorum_options += "last_node_standing_window: " + utils.pcs_options["--last_node_standing_window"] + "\n" transport = "udpu" if "--transport" in utils.pcs_options: transport = utils.pcs_options["--transport"] ir = "" if transport == "udpu" and ("--addr0" in utils.pcs_options or "--addr1" in utils.pcs_options): utils.err("--addr0 and --addr1 can only be used with --transport=udp") if "--rrpmode" in utils.pcs_options or udpu_rrp or "--addr0" in utils.pcs_options: rrpmode = "passive" if "--rrpmode" in utils.pcs_options: rrpmode = utils.pcs_options["--rrpmode"] ir += "rrp_mode: " + rrpmode + "\n" if transport == "udp": if "--addr0" in utils.pcs_options: ir += utils.generate_rrp_corosync_config(0) if "--addr1" in utils.pcs_options: ir += utils.generate_rrp_corosync_config(1) corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name",cluster_name) corosync_config = corosync_config.replace("@@quorum_options\n",quorum_options) corosync_config = corosync_config.replace("@@two_node",two_node_section) corosync_config = corosync_config.replace("@@transport",transport) corosync_config = corosync_config.replace("@@interfaceandrrpmode\n",ir) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config) else: if os.path.exists("/etc/cluster/cluster.conf") and not "--force" in utils.pcs_options: print "Error: /etc/cluster/cluster.conf already exists, use --force to overwrite" sys.exit(1) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--createcluster", cluster_name]) if retval != 0: print output utils.err("error creating cluster: %s" % cluster_name) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addfencedev", "pcmk-redirect", "agent=fence_pcmk"]) if retval != 0: print output utils.err("error creating fence dev: %s" % cluster_name) if len(nodes) == 2: output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--setcman", "two_node=1", "expected_votes=1"]) if retval != 0: print output utils.err("error adding node: %s" % node) for node in nodes: output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--addnode", node]) if retval != 0: print output utils.err("error adding node: %s" % node) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addmethod", "pcmk-method", node]) if retval != 0: print output utils.err("error adding fence method: %s" % node) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addfenceinst", "pcmk-redirect", node, "pcmk-method", "port="+node]) if retval != 0: print output utils.err("error adding fence instance: %s" % node) if "--start" in utils.pcs_options: start_cluster([]) if "--enable" in utils.pcs_options: enable_cluster([])
def corosync_setup(argv,returnConfig=False): fedora_config = not utils.is_rhel6() failure = False if len(argv) < 2: usage.cluster() exit(1) if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options and fedora_config: sync_start(argv) if "--enable" in utils.pcs_options: enable_cluster(argv[1:]) return elif not returnConfig and not "--local" in utils.pcs_options and fedora_config: sync(argv) if "--enable" in utils.pcs_options: enable_cluster(argv[1:]) return else: nodes = argv[1:] cluster_name = argv[0] # Verify that all nodes are resolvable otherwise problems may occur for node in nodes: try: socket.gethostbyname(node) except socket.error: print "Warning: Unable to resolve hostname: %s" % node failure = True if failure: utils.err("Unable to resolve all hostnames.") if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') corosync_config = f.read() f.close() i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i+1 two_node_section = "" if len(nodes) == 2: two_node_section = "two_node: 1" corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name",cluster_name) corosync_config = corosync_config.replace("@@two_node",two_node_section) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config) else: if os.path.exists("/etc/cluster/cluster.conf") and not "--force" in utils.pcs_options: print "Error: /etc/cluster/cluster.conf already exists, use --force to overwrite" sys.exit(1) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--createcluster", cluster_name]) if retval != 0: print output utils.err("error creating cluster: %s" % cluster_name) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addfencedev", "pcmk-redirect", "agent=fence_pcmk"]) if retval != 0: print output utils.err("error creating fence dev: %s" % cluster_name) if len(nodes) == 2: output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--setcman", "two_node=1", "expected_votes=1"]) if retval != 0: print output utils.err("error adding node: %s" % node) for node in nodes: output, retval = utils.run(["/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--addnode", node]) if retval != 0: print output utils.err("error adding node: %s" % node) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addmethod", "pcmk-method", node]) if retval != 0: print output utils.err("error adding fence method: %s" % node) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addfenceinst", "pcmk-redirect", node, "pcmk-method", "port="+node]) if retval != 0: print output utils.err("error adding fence instance: %s" % node) if "--start" in utils.pcs_options: start_cluster([]) if "--enable" in utils.pcs_options: enable_cluster([])
def corosync_setup(argv, returnConfig=False): fedora_config = not utils.is_rhel6() failure = False if len(argv) < 2: usage.cluster() exit(1) if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options and fedora_config: sync_start(argv) return elif not returnConfig and not "--local" in utils.pcs_options and fedora_config: sync(argv) return else: nodes = argv[1:] cluster_name = argv[0] # Verify that all nodes are resolvable otherwise problems may occur for node in nodes: try: socket.gethostbyname(node) except socket.error: print "Warning: Unable to resolve hostname: %s" % node failure = True if failure: utils.err("Unable to resolve all hostnames.") if fedora_config == True: f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') corosync_config = f.read() f.close() i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i + 1 two_node_section = "" if len(nodes) == 2: two_node_section = "two_node: 1" corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name", cluster_name) corosync_config = corosync_config.replace("@@two_node", two_node_section) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config) else: if os.path.exists("/etc/cluster/cluster.conf" ) and not "--force" in utils.pcs_options: print "Error: /etc/cluster/cluster.conf already exists, use --force to overwrite" sys.exit(1) output, retval = utils.run([ "/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--createcluster", cluster_name ]) if retval != 0: print output utils.err("error creating cluster: %s" % cluster_name) output, retval = utils.run([ "/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addfencedev", "pcmk-redirect", "agent=fence_pcmk" ]) if retval != 0: print output utils.err("error creating fence dev: %s" % cluster_name) for node in nodes: output, retval = utils.run([ "/usr/sbin/ccs", "-f", "/etc/cluster/cluster.conf", "--addnode", node ]) if retval != 0: print output utils.err("error adding node: %s" % node) output, retval = utils.run([ "/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addmethod", "pcmk-method", node ]) if retval != 0: print output utils.err("error adding fence method: %s" % node) output, retval = utils.run([ "/usr/sbin/ccs", "-i", "-f", "/etc/cluster/cluster.conf", "--addfenceinst", "pcmk-redirect", node, "pcmk-method", "port=" + node ]) if retval != 0: print output utils.err("error adding fence instance: %s" % node) if "--start" in utils.pcs_options: start_cluster([])
def cluster_cmd(argv): if len(argv) == 0: usage.cluster() exit(1) sub_cmd = argv.pop(0) if (sub_cmd == "help"): usage.cluster(argv) elif (sub_cmd == "setup"): if "--name" in utils.pcs_options: corosync_setup([utils.pcs_options["--name"]] + argv) else: utils.err("A cluster name (--name <name>) is required to setup a cluster") elif (sub_cmd == "sync"): sync_nodes(utils.getNodesFromCorosyncConf(),utils.getCorosyncConf()) elif (sub_cmd == "status"): status.cluster_status(argv) print "" print "PCSD Status:" cluster_gui_status([],True) elif (sub_cmd == "pcsd-status"): cluster_gui_status(argv) elif (sub_cmd == "certkey"): cluster_certkey(argv) elif (sub_cmd == "auth"): cluster_auth(argv) elif (sub_cmd == "token"): cluster_token(argv) elif (sub_cmd == "start"): if "--all" in utils.pcs_options: start_cluster_all() else: start_cluster(argv) elif (sub_cmd == "stop"): if "--all" in utils.pcs_options: stop_cluster_all() else: stop_cluster(argv) elif (sub_cmd == "kill"): kill_cluster(argv) elif (sub_cmd == "standby"): node_standby(argv) elif (sub_cmd == "unstandby"): node_standby(argv, False) elif (sub_cmd == "enable"): if "--all" in utils.pcs_options: enable_cluster_all() else: enable_cluster(argv) elif (sub_cmd == "disable"): if "--all" in utils.pcs_options: disable_cluster_all() else: disable_cluster(argv) elif (sub_cmd == "remote-node"): cluster_remote_node(argv) elif (sub_cmd == "cib"): get_cib(argv) elif (sub_cmd == "cib-push"): cluster_push(argv) elif (sub_cmd == "cib-upgrade"): cluster_upgrade() elif (sub_cmd == "cib-revisions"): cluster_cib_revisions(argv) elif (sub_cmd == "cib-rollback"): cluster_cib_rollback(argv) elif (sub_cmd == "edit"): cluster_edit(argv) elif (sub_cmd == "node"): cluster_node(argv) elif (sub_cmd == "localnode"): cluster_localnode(argv) elif (sub_cmd == "uidgid"): cluster_uidgid(argv) elif (sub_cmd == "corosync"): cluster_get_corosync_conf(argv) elif (sub_cmd == "reload"): cluster_reload(argv) elif (sub_cmd == "destroy"): cluster_destroy(argv) elif (sub_cmd == "verify"): cluster_verify(argv) elif (sub_cmd == "report"): cluster_report(argv) else: usage.cluster() sys.exit(1)
def corosync_setup(argv,returnConfig=False): fedora_config = not utils.is_rhel6() failure = False primary_nodes = [] # If node contains a ',' we only care about the first address for node in argv[1:]: if "," in node: primary_nodes.append(node.split(',')[0]) else: primary_nodes.append(node) if len(argv) < 2: usage.cluster() exit(1) if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options:# and fedora_config: sync_start(argv, primary_nodes) if "--enable" in utils.pcs_options: enable_cluster(primary_nodes) return elif not returnConfig and not "--local" in utils.pcs_options:# and fedora_config: sync(argv, primary_nodes) if "--enable" in utils.pcs_options: enable_cluster(primary_nodes) return else: nodes = argv[1:] cluster_name = argv[0] # Verify that all nodes are resolvable otherwise problems may occur udpu_rrp = False for node in nodes: try: if "," in node: socket.getaddrinfo(node.split(",")[0],None) socket.getaddrinfo(node.split(",")[1],None) udpu_rrp = True else: socket.getaddrinfo(node,None) except socket.error: print "Warning: Unable to resolve hostname: %s" % node failure = True if udpu_rrp: for node in nodes: if "," not in node: utils.err("if one node is configured for RRP, all nodes must configured for RRP") if failure and "--force" not in utils.pcs_options: utils.err("Unable to resolve all hostnames (use --force to override).") if fedora_config == True: if os.path.exists(settings.corosync_conf_file) and not "--force" in utils.pcs_options: utils.err("%s already exists, use --force to overwrite" % settings.corosync_conf_file) if not ("--corosync_conf" in utils.pcs_options and "--local" in utils.pcs_options): cib_path = os.path.join(settings.cib_dir, "cib.xml") if os.path.exists(cib_path) and not "--force" in utils.pcs_options: utils.err("%s already exists, use --force to overwrite" % cib_path) if "--corosync_conf" not in utils.pcs_options: cluster_destroy([]) f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r') corosync_config = f.read() f.close() i = 1 new_nodes_section = "" for node in nodes: new_nodes_section += " node {\n" if udpu_rrp: new_nodes_section += " ring0_addr: %s\n" % (node.split(",")[0]) new_nodes_section += " ring1_addr: %s\n" % (node.split(",")[1]) else: new_nodes_section += " ring0_addr: %s\n" % (node) new_nodes_section += " nodeid: %d\n" % (i) new_nodes_section += " }\n" i = i+1 two_node_section = "" if len(nodes) == 2: two_node_section = "two_node: 1" quorum_options = "" if "--wait_for_all" in utils.pcs_options: quorum_options += "wait_for_all: " + utils.pcs_options["--wait_for_all"] + "\n" if "--auto_tie_breaker" in utils.pcs_options: quorum_options += "auto_tie_breaker: " + utils.pcs_options["--auto_tie_breaker"] + "\n" if "--last_man_standing" in utils.pcs_options: quorum_options += "last_man_standing: " + utils.pcs_options["--last_man_standing"] + "\n" if "--last_man_standing_window" in utils.pcs_options: quorum_options += "last_man_standing_window: " + utils.pcs_options["--last_man_standing_window"] + "\n" transport = "udpu" if "--transport" in utils.pcs_options: transport = utils.pcs_options["--transport"] ir = "" if transport == "udpu" and ("--addr0" in utils.pcs_options or "--addr1" in utils.pcs_options): utils.err("--addr0 and --addr1 can only be used with --transport=udp") if "--rrpmode" in utils.pcs_options or udpu_rrp or "--addr0" in utils.pcs_options: rrpmode = "passive" if "--rrpmode" in utils.pcs_options: rrpmode = utils.pcs_options["--rrpmode"] if rrpmode == "active" and "--force" not in utils.pcs_options: utils.err("using a RRP mode of 'active' is not supported or tested, use --force to override") elif rrpmode != "passive" and "--force" not in utils.pcs_options: utils.err("%s is an unknown RRP mode, use --force to override" % rrpmode) ir += "rrp_mode: " + rrpmode + "\n" if transport == "udp": if "--addr0" in utils.pcs_options: ir += utils.generate_rrp_corosync_config(0) if "--addr1" in utils.pcs_options: ir += utils.generate_rrp_corosync_config(1) if "--ipv6" in utils.pcs_options: ip_version = "ip_version: ipv6\n" else: ip_version = "" totem_options = "" if "--token" in utils.pcs_options: totem_options += "token: " + utils.pcs_options["--token"] + "\n" if "--token_coefficient" in utils.pcs_options: totem_options += "token_coefficient: " + utils.pcs_options["--token_coefficient"] + "\n" if "--join" in utils.pcs_options: totem_options += "join: " + utils.pcs_options["--join"] + "\n" if "--consensus" in utils.pcs_options: totem_options += "consensus: " + utils.pcs_options["--consensus"] + "\n" if "--miss_count_const" in utils.pcs_options: totem_options += "miss_count_const: " + utils.pcs_options["--miss_count_const"] + "\n" if "--fail_recv_const" in utils.pcs_options: totem_options += "fail_recv_const: " + utils.pcs_options["--fail_recv_const"] + "\n" corosync_config = corosync_config.replace("@@nodes", new_nodes_section) corosync_config = corosync_config.replace("@@cluster_name",cluster_name) corosync_config = corosync_config.replace("@@quorum_options\n",quorum_options) corosync_config = corosync_config.replace("@@two_node",two_node_section) corosync_config = corosync_config.replace("@@transport",transport) corosync_config = corosync_config.replace("@@interfaceandrrpmode\n",ir) corosync_config = corosync_config.replace("@@ip_version\n",ip_version) corosync_config = corosync_config.replace("@@totem_options\n",totem_options) if returnConfig: return corosync_config utils.setCorosyncConf(corosync_config) else: cluster_conf_location = "/etc/cluster/cluster.conf" if returnConfig: cc_temp = tempfile.NamedTemporaryFile('w+b', -1, ".pcs") cluster_conf_location = cc_temp.name if os.path.exists("/etc/cluster/cluster.conf") and not "--force" in utils.pcs_options and not returnConfig: print "Error: /etc/cluster/cluster.conf already exists, use --force to overwrite" sys.exit(1) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--createcluster", cluster_name]) if retval != 0: print output utils.err("error creating cluster: %s" % cluster_name) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addfencedev", "pcmk-redirect", "agent=fence_pcmk"]) if retval != 0: print output utils.err("error creating fence dev: %s" % cluster_name) if len(nodes) == 2: output, retval = utils.run(["/usr/sbin/ccs", "-f", cluster_conf_location, "--setcman", "two_node=1", "expected_votes=1"]) if retval != 0: print output utils.err("error adding node: %s" % node) for node in nodes: output, retval = utils.run(["/usr/sbin/ccs", "-f", cluster_conf_location, "--addnode", node]) if retval != 0: print output utils.err("error adding node: %s" % node) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addmethod", "pcmk-method", node]) if retval != 0: print output utils.err("error adding fence method: %s" % node) output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addfenceinst", "pcmk-redirect", node, "pcmk-method", "port="+node]) if retval != 0: print output utils.err("error adding fence instance: %s" % node) if returnConfig: cc_temp.seek(0) cluster_conf_data = cc_temp.read() cc_temp.close() return cluster_conf_data if "--start" in utils.pcs_options: start_cluster([]) if "--enable" in utils.pcs_options: enable_cluster([])
def cluster_node(argv): if len(argv) != 2: usage.cluster(); sys.exit(1) if argv[0] == "add": add_node = True elif argv[0] in ["remove","delete"]: add_node = False else: usage.cluster(); sys.exit(1) node = argv[1] if "," in node: node0 = node.split(",")[0] node1 = node.split(",")[1] else: node0 = node node1 = None status,output = utils.checkAuthorization(node0) if status == 2: utils.err("pcsd is not running on %s" % node0) elif status == 3: utils.err( "%s is not yet authenticated (try pcs cluster auth %s)" % (node0, node0) ) if add_node == True: if node1 is None and utils.need_ring1_address(utils.getCorosyncConf()): utils.err( "cluster is configured for RRP, " "you have to specify ring 1 address for the node" ) elif ( node1 is not None and not utils.need_ring1_address(utils.getCorosyncConf()) ): utils.err( "cluster is not configured for RRP, " "you must not specify ring 1 address for the node" ) corosync_conf = None (canAdd, error) = utils.canAddNodeToCluster(node0) if not canAdd: utils.err("Unable to add '%s' to cluster: %s" % (node0, error)) for my_node in utils.getNodesFromCorosyncConf(): retval, output = utils.addLocalNode(my_node, node0, node1) if retval != 0: print >> sys.stderr, "Error: unable to add %s on %s - %s" % (node0, my_node, output.strip()) else: print "%s: Corosync updated" % my_node corosync_conf = output if corosync_conf != None: utils.setCorosyncConfig(node0, corosync_conf) if "--enable" in utils.pcs_options: utils.enableCluster(node0) if "--start" in utils.pcs_options: utils.startCluster(node0) else: utils.err("Unable to update any nodes") else: nodesRemoved = False c_nodes = utils.getNodesFromCorosyncConf() destroy_cluster([node0]) for my_node in c_nodes: if my_node == node0: continue retval, output = utils.removeLocalNode(my_node, node0) if retval != 0: print >> sys.stderr, "Error: unable to remove %s on %s - %s" % (node0,my_node,output.strip()) else: if output[0] == 0: print "%s: Corosync updated" % my_node nodesRemoved = True else: print >> sys.stderr, "%s: Error executing command occured: %s" % (my_node, "".join(output[1])) if nodesRemoved == False: utils.err("Unable to update any nodes") output, retval = utils.run(["crm_node", "--force", "-R", node0])