def FinalizeClusterDestroy(master_uuid): """Execute the last steps of cluster destroy This function shuts down all the daemons, completing the destroy begun in cmdlib.LUDestroyOpcode. """ livelock = utils.livelock.LiveLock("bootstrap_destroy") cfg = config.GetConfig(None, livelock) modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup runner = rpc.BootstrapRunner() master_name = cfg.GetNodeName(master_uuid) master_params = cfg.GetMasterNetworkParameters() master_params.uuid = master_uuid ems = cfg.GetUseExternalMipScript() result = runner.call_node_deactivate_master_ip(master_name, master_params, ems) msg = result.fail_msg if msg: logging.warning("Could not disable the master IP: %s", msg) result = runner.call_node_stop_master(master_name) msg = result.fail_msg if msg: logging.warning("Could not disable the master role: %s", msg) result = runner.call_node_leave_cluster(master_name, modify_ssh_setup) msg = result.fail_msg if msg: logging.warning( "Could not shutdown the node daemon and cleanup" " the node: %s", msg)
def ActivateMasterIP(): # activate ip # Create a livelock file livelock = utils.livelock.LiveLock("masterd_activate_ip") cfg = config.GetConfig(None, livelock) master_params = cfg.GetMasterNetworkParameters() ems = cfg.GetUseExternalMipScript() runner = rpc.BootstrapRunner() # we use the node name, as the configuration is only available here yet result = runner.call_node_activate_master_ip( cfg.GetNodeName(master_params.uuid), master_params, ems) msg = result.fail_msg if msg: logging.error("Can't activate master IP address: %s", msg)
def CheckMasterd(options, args): """Initial checks whether to run or exit with a failure. """ if args: # masterd doesn't take any arguments print >> sys.stderr, ("Usage: %s [-f] [-d]" % sys.argv[0]) sys.exit(constants.EXIT_FAILURE) ssconf.CheckMaster(options.debug) try: options.uid = pwd.getpwnam(constants.MASTERD_USER).pw_uid options.gid = grp.getgrnam(constants.DAEMONS_GROUP).gr_gid except KeyError: print >> sys.stderr, ( "User or group not existing on system: %s:%s" % (constants.MASTERD_USER, constants.DAEMONS_GROUP)) sys.exit(constants.EXIT_FAILURE) # Determine static runtime architecture information runtime.InitArchInfo() # Check the configuration is sane before anything else try: livelock = utils.livelock.LiveLock("masterd_check") config.GetConfig(None, livelock) except errors.ConfigVersionMismatch, err: v1 = "%s.%s.%s" % version.SplitVersion(err.args[0]) v2 = "%s.%s.%s" % version.SplitVersion(err.args[1]) print >> sys.stderr, \ ("Configuration version mismatch. The current Ganeti software" " expects version %s, but the on-disk configuration file has" " version %s. This is likely the result of upgrading the" " software without running the upgrade procedure. Please contact" " your cluster administrator or complete the upgrade using the" " cfgupgrade utility, after reading the upgrade notes." % (v1, v2)) sys.exit(constants.EXIT_FAILURE)
def MasterFailover(no_voting=False): """Failover the master node. This checks that we are not already the master, and will cause the current master to cease being master, and the non-master to become new master. @type no_voting: boolean @param no_voting: force the operation without remote nodes agreement (dangerous) @returns: the pair of an exit code and warnings to display """ sstore = ssconf.SimpleStore() old_master, new_master = ssconf.GetMasterAndMyself(sstore) node_names = sstore.GetNodeList() mc_list = sstore.GetMasterCandidates() if old_master == new_master: raise errors.OpPrereqError( "This commands must be run on the node" " where you want the new master to be." " %s is already the master" % old_master, errors.ECODE_INVAL) if new_master not in mc_list: mc_no_master = [name for name in mc_list if name != old_master] raise errors.OpPrereqError( "This node is not among the nodes marked" " as master candidates. Only these nodes" " can become masters. Current list of" " master candidates is:\n" "%s" % ("\n".join(mc_no_master)), errors.ECODE_STATE) if not no_voting: vote_list = GatherMasterVotes(node_names) if vote_list: voted_master = vote_list[0][0] if voted_master is None: raise errors.OpPrereqError( "Cluster is inconsistent, most nodes did" " not respond.", errors.ECODE_ENVIRON) elif voted_master != old_master: raise errors.OpPrereqError( "I have a wrong configuration, I believe" " the master is %s but the other nodes" " voted %s. Please resync the configuration" " of this node." % (old_master, voted_master), errors.ECODE_STATE) # end checks rcode = 0 warnings = [] logging.info("Setting master to %s, old master: %s", new_master, old_master) try: # Forcefully start WConfd so that we can access the configuration result = utils.RunCmd([ pathutils.DAEMON_UTIL, "start", constants.WCONFD, "--force-node", "--no-voting", "--yes-do-it" ]) if result.failed: raise errors.OpPrereqError( "Could not start the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output), errors.ECODE_NOENT) # instantiate a real config writer, as we now know we have the # configuration data livelock = utils.livelock.LiveLock("bootstrap_failover") cfg = config.GetConfig(None, livelock, accept_foreign=True) old_master_node = cfg.GetNodeInfoByName(old_master) if old_master_node is None: raise errors.OpPrereqError( "Could not find old master node '%s' in" " cluster configuration." % old_master, errors.ECODE_NOENT) cluster_info = cfg.GetClusterInfo() new_master_node = cfg.GetNodeInfoByName(new_master) if new_master_node is None: raise errors.OpPrereqError( "Could not find new master node '%s' in" " cluster configuration." % new_master, errors.ECODE_NOENT) cluster_info.master_node = new_master_node.uuid # this will also regenerate the ssconf files, since we updated the # cluster info cfg.Update(cluster_info, logging.error) # if cfg.Update worked, then it means the old master daemon won't be # able now to write its own config file (we rely on locking in both # backend.UploadFile() and ConfigWriter._Write(); hence the next # step is to kill the old master logging.info("Stopping the master daemon on node %s", old_master) runner = rpc.BootstrapRunner() master_params = cfg.GetMasterNetworkParameters() master_params.uuid = old_master_node.uuid ems = cfg.GetUseExternalMipScript() result = runner.call_node_deactivate_master_ip(old_master, master_params, ems) msg = result.fail_msg if msg: warning = "Could not disable the master IP: %s" % (msg, ) logging.warning("%s", warning) warnings.append(warning) result = runner.call_node_stop_master(old_master) msg = result.fail_msg if msg: warning = ("Could not disable the master role on the old master" " %s, please disable manually: %s" % (old_master, msg)) logging.error("%s", warning) warnings.append(warning) except errors.ConfigurationError, err: logging.error("Error while trying to set the new master: %s", str(err)) return 1, warnings
def GetConfig(self, ec_id): return config.GetConfig(ec_id, self.livelock)
def MasterFailover(no_voting=False): """Failover the master node. This checks that we are not already the master, and will cause the current master to cease being master, and the non-master to become new master. Note: The call to MasterFailover from lib/client/gnt_cluster.py checks that a majority of nodes are healthy and responding before calling this. If this function is called from somewhere else, the caller should also verify that a majority of nodes are healthy. @type no_voting: boolean @param no_voting: force the operation without remote nodes agreement (dangerous) @returns: the pair of an exit code and warnings to display """ sstore = ssconf.SimpleStore() old_master, new_master = ssconf.GetMasterAndMyself(sstore) node_names = sstore.GetNodeList() mc_list = sstore.GetMasterCandidates() if old_master == new_master: raise errors.OpPrereqError( "This commands must be run on the node" " where you want the new master to be." " %s is already the master" % old_master, errors.ECODE_INVAL) if new_master not in mc_list: mc_no_master = [name for name in mc_list if name != old_master] raise errors.OpPrereqError( "This node is not among the nodes marked" " as master candidates. Only these nodes" " can become masters. Current list of" " master candidates is:\n" "%s" % ("\n".join(mc_no_master)), errors.ECODE_STATE) if not no_voting: vote_list = _GatherMasterVotes(node_names) if vote_list: voted_master = vote_list[0][0] if voted_master != old_master: raise errors.OpPrereqError( "I have a wrong configuration, I believe" " the master is %s but the other nodes" " voted %s. Please resync the configuration" " of this node." % (old_master, voted_master), errors.ECODE_STATE) # end checks rcode = 0 warnings = [] logging.info("Setting master to %s, old master: %s", new_master, old_master) try: # Forcefully start WConfd so that we can access the configuration result = utils.RunCmd([ pathutils.DAEMON_UTIL, "start", constants.WCONFD, "--force-node", "--no-voting", "--yes-do-it" ]) if result.failed: raise errors.OpPrereqError( "Could not start the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output), errors.ECODE_NOENT) # instantiate a real config writer, as we now know we have the # configuration data livelock = utils.livelock.LiveLock("bootstrap_failover") cfg = config.GetConfig(None, livelock, accept_foreign=True) old_master_node = cfg.GetNodeInfoByName(old_master) if old_master_node is None: raise errors.OpPrereqError( "Could not find old master node '%s' in" " cluster configuration." % old_master, errors.ECODE_NOENT) cluster_info = cfg.GetClusterInfo() new_master_node = cfg.GetNodeInfoByName(new_master) if new_master_node is None: raise errors.OpPrereqError( "Could not find new master node '%s' in" " cluster configuration." % new_master, errors.ECODE_NOENT) cluster_info.master_node = new_master_node.uuid # this will also regenerate the ssconf files, since we updated the # cluster info cfg.Update(cluster_info, logging.error) # if cfg.Update worked, then it means the old master daemon won't be # able now to write its own config file (we rely on locking in both # backend.UploadFile() and ConfigWriter._Write(); hence the next # step is to kill the old master logging.info("Stopping the master daemon on node %s", old_master) runner = rpc.BootstrapRunner() master_params = cfg.GetMasterNetworkParameters() master_params.uuid = old_master_node.uuid ems = cfg.GetUseExternalMipScript() result = runner.call_node_deactivate_master_ip(old_master, master_params, ems) msg = result.fail_msg if msg: warning = "Could not disable the master IP: %s" % (msg, ) logging.warning("%s", warning) warnings.append(warning) result = runner.call_node_stop_master(old_master) msg = result.fail_msg if msg: warning = ("Could not disable the master role on the old master" " %s, please disable manually: %s" % (old_master, msg)) logging.error("%s", warning) warnings.append(warning) except errors.ConfigurationError as err: logging.error("Error while trying to set the new master: %s", str(err)) return 1, warnings finally: # stop WConfd again: result = utils.RunCmd( [pathutils.DAEMON_UTIL, "stop", constants.WCONFD]) if result.failed: warning = ("Could not stop the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) logging.error("%s", warning) rcode = 1 logging.info("Checking master IP non-reachability...") master_ip = sstore.GetMasterIP() total_timeout = 30 # Here we have a phase where no master should be running def _check_ip(expected): if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT) != expected: raise utils.RetryAgain() try: utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[False]) except utils.RetryTimeout: warning = ("The master IP is still reachable after %s seconds," " continuing but activating the master IP on the current" " node will probably fail" % total_timeout) logging.warning("%s", warning) warnings.append(warning) rcode = 1 if jstore.CheckDrainFlag(): logging.info("Undraining job queue") jstore.SetDrainFlag(False) logging.info("Starting the master daemons on the new master") result = rpc.BootstrapRunner().call_node_start_master_daemons( new_master, no_voting) msg = result.fail_msg if msg: logging.error( "Could not start the master role on the new master" " %s, please check: %s", new_master, msg) rcode = 1 # Finally verify that the new master managed to set up the master IP # and warn if it didn't. try: utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[True]) except utils.RetryTimeout: warning = ("The master IP did not come up within %s seconds; the" " cluster should still be working and reachable via %s," " but not via the master IP address" % (total_timeout, new_master)) logging.warning("%s", warning) warnings.append(warning) rcode = 1 logging.info("Master failed over from %s to %s", old_master, new_master) return rcode, warnings
def CheckAgreement(): """Check the agreement on who is the master. The function uses a very simple algorithm: we must get more positive than negative answers. Since in most of the cases we are the master, we'll use our own config file for getting the node list. In the future we could collect the current node list from our (possibly obsolete) known nodes. In order to account for cold-start of all nodes, we retry for up to a minute until we get a real answer as the top-voted one. If the nodes are more out-of-sync, for now manual startup of the master should be attempted. Note that for a even number of nodes cluster, we need at least half of the nodes (beside ourselves) to vote for us. This creates a problem on two-node clusters, since in this case we require the other node to be up too to confirm our status. """ myself = netutils.Hostname.GetSysName() # Create a livelock file livelock = utils.livelock.LiveLock("masterd_check_agreement") #temp instantiation of a config writer, used only to get the node list cfg = config.GetConfig(None, livelock) node_names = cfg.GetNodeNames(cfg.GetNodeList()) del cfg retries = 6 while retries > 0: votes = bootstrap.GatherMasterVotes(node_names) if not votes: # empty node list, this is a one node cluster return True if votes[0][0] is None: retries -= 1 time.sleep(10) continue break if retries == 0: logging.critical( "Cluster inconsistent, most of the nodes didn't answer" " after multiple retries. Aborting startup") logging.critical("Use the --no-voting option if you understand what" " effects it has on the cluster state") return False # here a real node is at the top of the list all_votes = sum(item[1] for item in votes) top_node, top_votes = votes[0] result = False if top_node != myself: logging.critical( "It seems we are not the master (top-voted node" " is %s with %d out of %d votes)", top_node, top_votes, all_votes) elif top_votes < all_votes - top_votes: logging.critical( "It seems we are not the master (%d votes for," " %d votes against)", top_votes, all_votes - top_votes) else: result = True return result