def _Connect(sock, address, timeout, allow_non_master): sock.settimeout(timeout) try: sock.connect(address) except socket.timeout as err: raise errors.TimeoutError("Connect timed out: %s" % str(err)) except socket.error as err: error_code = err.args[0] if error_code in (errno.ENOENT, errno.ECONNREFUSED): if not allow_non_master: # Verify if we're actually on the master node before trying # again. ss = ssconf.SimpleStore() try: master, myself = ssconf.GetMasterAndMyself(ss=ss) except ganeti.errors.ConfigurationError: raise errors.NoMasterError(address) if master != myself: raise errors.NoMasterError(address) raise utils.RetryAgain() elif error_code in (errno.EPERM, errno.EACCES): raise errors.PermissionError(address) elif error_code == errno.EAGAIN: # Server's socket backlog is full at the moment raise utils.RetryAgain() raise
def GetClient(): """Connects to the a luxi socket and returns a client. """ try: client = luxi.Client(address=pathutils.QUERY_SOCKET) except NoMasterError: ss = ssconf.SimpleStore() # Try to read ssconf file try: ss.GetMasterNode() except errors.ConfigurationError: raise errors.OpPrereqError( "Cluster not initialized or this machine is" " not part of a cluster", errors.ECODE_INVAL) master, myself = ssconf.GetMasterAndMyself(ss=ss) if master != myself: raise errors.OpPrereqError( "This is not the master node, please connect" " to node '%s' and rerun the command" % master, errors.ECODE_INVAL) raise return client
def GetMaster(): """Returns the current master node. This is a separate function in bootstrap since it's needed by gnt-cluster, and instead of importing directly ssconf, it's better to abstract it in bootstrap, where we do use ssconf in other functions too. """ sstore = ssconf.SimpleStore() old_master, _ = ssconf.GetMasterAndMyself(sstore) return old_master
def GetClient(query=True): """Connects to the a luxi socket and returns a client. @type query: boolean @param query: this signifies that the client will only be used for queries; if the build-time parameter enable-split-queries is enabled, then the client will be connected to the query socket instead of the masterd socket """ override_socket = os.getenv(constants.LUXI_OVERRIDE, "") if override_socket: if override_socket == constants.LUXI_OVERRIDE_MASTER: address = pathutils.MASTER_SOCKET elif override_socket == constants.LUXI_OVERRIDE_QUERY: address = pathutils.QUERY_SOCKET else: address = override_socket elif query: address = pathutils.QUERY_SOCKET else: address = None # TODO: Cache object? try: client = luxi.Client(address=address) except NoMasterError: ss = ssconf.SimpleStore() # Try to read ssconf file try: ss.GetMasterNode() except errors.ConfigurationError: raise errors.OpPrereqError( "Cluster not initialized or this machine is" " not part of a cluster", errors.ECODE_INVAL) master, myself = ssconf.GetMasterAndMyself(ss=ss) if master != myself: raise errors.OpPrereqError( "This is not the master node, please connect" " to node '%s' and rerun the command" % master, errors.ECODE_INVAL) raise return client
def MasterFailover(no_voting=False): """Failover the master node. This checks that we are not already the master, and will cause the current master to cease being master, and the non-master to become new master. @type no_voting: boolean @param no_voting: force the operation without remote nodes agreement (dangerous) @returns: the pair of an exit code and warnings to display """ sstore = ssconf.SimpleStore() old_master, new_master = ssconf.GetMasterAndMyself(sstore) node_names = sstore.GetNodeList() mc_list = sstore.GetMasterCandidates() if old_master == new_master: raise errors.OpPrereqError( "This commands must be run on the node" " where you want the new master to be." " %s is already the master" % old_master, errors.ECODE_INVAL) if new_master not in mc_list: mc_no_master = [name for name in mc_list if name != old_master] raise errors.OpPrereqError( "This node is not among the nodes marked" " as master candidates. Only these nodes" " can become masters. Current list of" " master candidates is:\n" "%s" % ("\n".join(mc_no_master)), errors.ECODE_STATE) if not no_voting: vote_list = GatherMasterVotes(node_names) if vote_list: voted_master = vote_list[0][0] if voted_master is None: raise errors.OpPrereqError( "Cluster is inconsistent, most nodes did" " not respond.", errors.ECODE_ENVIRON) elif voted_master != old_master: raise errors.OpPrereqError( "I have a wrong configuration, I believe" " the master is %s but the other nodes" " voted %s. Please resync the configuration" " of this node." % (old_master, voted_master), errors.ECODE_STATE) # end checks rcode = 0 warnings = [] logging.info("Setting master to %s, old master: %s", new_master, old_master) try: # Forcefully start WConfd so that we can access the configuration result = utils.RunCmd([ pathutils.DAEMON_UTIL, "start", constants.WCONFD, "--force-node", "--no-voting", "--yes-do-it" ]) if result.failed: raise errors.OpPrereqError( "Could not start the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output), errors.ECODE_NOENT) # instantiate a real config writer, as we now know we have the # configuration data livelock = utils.livelock.LiveLock("bootstrap_failover") cfg = config.GetConfig(None, livelock, accept_foreign=True) old_master_node = cfg.GetNodeInfoByName(old_master) if old_master_node is None: raise errors.OpPrereqError( "Could not find old master node '%s' in" " cluster configuration." % old_master, errors.ECODE_NOENT) cluster_info = cfg.GetClusterInfo() new_master_node = cfg.GetNodeInfoByName(new_master) if new_master_node is None: raise errors.OpPrereqError( "Could not find new master node '%s' in" " cluster configuration." % new_master, errors.ECODE_NOENT) cluster_info.master_node = new_master_node.uuid # this will also regenerate the ssconf files, since we updated the # cluster info cfg.Update(cluster_info, logging.error) # if cfg.Update worked, then it means the old master daemon won't be # able now to write its own config file (we rely on locking in both # backend.UploadFile() and ConfigWriter._Write(); hence the next # step is to kill the old master logging.info("Stopping the master daemon on node %s", old_master) runner = rpc.BootstrapRunner() master_params = cfg.GetMasterNetworkParameters() master_params.uuid = old_master_node.uuid ems = cfg.GetUseExternalMipScript() result = runner.call_node_deactivate_master_ip(old_master, master_params, ems) msg = result.fail_msg if msg: warning = "Could not disable the master IP: %s" % (msg, ) logging.warning("%s", warning) warnings.append(warning) result = runner.call_node_stop_master(old_master) msg = result.fail_msg if msg: warning = ("Could not disable the master role on the old master" " %s, please disable manually: %s" % (old_master, msg)) logging.error("%s", warning) warnings.append(warning) except errors.ConfigurationError, err: logging.error("Error while trying to set the new master: %s", str(err)) return 1, warnings
class Transport: """Low-level transport class. This is used on the client side. This could be replaced by any other class that provides the same semantics to the Client. This means: - can send messages and receive messages - safe for multithreading """ def __init__(self, address, timeouts=None): """Constructor for the Client class. Arguments: - address: a valid address the the used transport class - timeout: a list of timeouts, to be used on connect and read/write There are two timeouts used since we might want to wait for a long time for a response, but the connect timeout should be lower. If not passed, we use a default of 10 and respectively 60 seconds. Note that on reading data, since the timeout applies to an invidual receive, it might be that the total duration is longer than timeout value passed (we make a hard limit at twice the read timeout). """ self.address = address if timeouts is None: self._ctimeout, self._rwtimeout = DEF_CTMO, DEF_RWTO else: self._ctimeout, self._rwtimeout = timeouts self.socket = None self._buffer = "" self._msgs = collections.deque() try: self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # Try to connect try: utils.Retry(self._Connect, 1.0, self._ctimeout, args=(self.socket, address, self._ctimeout)) except utils.RetryTimeout: raise errors.TimeoutError("Connect timed out") self.socket.settimeout(self._rwtimeout) except (socket.error, errors.NoMasterError): if self.socket is not None: self.socket.close() self.socket = None raise @staticmethod def _Connect(sock, address, timeout): sock.settimeout(timeout) try: sock.connect(address) except socket.timeout, err: raise errors.TimeoutError("Connect timed out: %s" % str(err)) except socket.error, err: error_code = err.args[0] if error_code in (errno.ENOENT, errno.ECONNREFUSED): # Verify if we're acutally on the master node before trying # again. ss = ssconf.SimpleStore() try: master, myself = ssconf.GetMasterAndMyself(ss=ss) except ganeti.errors.ConfigurationError: raise errors.NoMasterError(address) if master != myself: raise errors.NoMasterError(address) raise utils.RetryAgain() elif error_code in (errno.EPERM, errno.EACCES): raise errors.PermissionError(address) elif error_code == errno.EAGAIN: # Server's socket backlog is full at the moment raise utils.RetryAgain() raise
def MasterFailover(no_voting=False): """Failover the master node. This checks that we are not already the master, and will cause the current master to cease being master, and the non-master to become new master. @type no_voting: boolean @param no_voting: force the operation without remote nodes agreement (dangerous) """ sstore = ssconf.SimpleStore() old_master, new_master = ssconf.GetMasterAndMyself(sstore) node_names = sstore.GetNodeList() mc_list = sstore.GetMasterCandidates() if old_master == new_master: raise errors.OpPrereqError( "This commands must be run on the node" " where you want the new master to be." " %s is already the master" % old_master, errors.ECODE_INVAL) if new_master not in mc_list: mc_no_master = [name for name in mc_list if name != old_master] raise errors.OpPrereqError( "This node is not among the nodes marked" " as master candidates. Only these nodes" " can become masters. Current list of" " master candidates is:\n" "%s" % ("\n".join(mc_no_master)), errors.ECODE_STATE) if not no_voting: vote_list = GatherMasterVotes(node_names) if vote_list: voted_master = vote_list[0][0] if voted_master is None: raise errors.OpPrereqError( "Cluster is inconsistent, most nodes did" " not respond.", errors.ECODE_ENVIRON) elif voted_master != old_master: raise errors.OpPrereqError( "I have a wrong configuration, I believe" " the master is %s but the other nodes" " voted %s. Please resync the configuration" " of this node." % (old_master, voted_master), errors.ECODE_STATE) # end checks rcode = 0 logging.info("Setting master to %s, old master: %s", new_master, old_master) try: # instantiate a real config writer, as we now know we have the # configuration data cfg = config.ConfigWriter(accept_foreign=True) old_master_node = cfg.GetNodeInfoByName(old_master) if old_master_node is None: raise errors.OpPrereqError( "Could not find old master node '%s' in" " cluster configuration." % old_master, errors.ECODE_NOENT) cluster_info = cfg.GetClusterInfo() new_master_node = cfg.GetNodeInfoByName(new_master) if new_master_node is None: raise errors.OpPrereqError( "Could not find new master node '%s' in" " cluster configuration." % new_master, errors.ECODE_NOENT) cluster_info.master_node = new_master_node.uuid # this will also regenerate the ssconf files, since we updated the # cluster info cfg.Update(cluster_info, logging.error) except errors.ConfigurationError, err: logging.error("Error while trying to set the new master: %s", str(err)) return 1
def MasterFailover(no_voting=False): """Failover the master node. This checks that we are not already the master, and will cause the current master to cease being master, and the non-master to become new master. Note: The call to MasterFailover from lib/client/gnt_cluster.py checks that a majority of nodes are healthy and responding before calling this. If this function is called from somewhere else, the caller should also verify that a majority of nodes are healthy. @type no_voting: boolean @param no_voting: force the operation without remote nodes agreement (dangerous) @returns: the pair of an exit code and warnings to display """ sstore = ssconf.SimpleStore() old_master, new_master = ssconf.GetMasterAndMyself(sstore) node_names = sstore.GetNodeList() mc_list = sstore.GetMasterCandidates() if old_master == new_master: raise errors.OpPrereqError( "This commands must be run on the node" " where you want the new master to be." " %s is already the master" % old_master, errors.ECODE_INVAL) if new_master not in mc_list: mc_no_master = [name for name in mc_list if name != old_master] raise errors.OpPrereqError( "This node is not among the nodes marked" " as master candidates. Only these nodes" " can become masters. Current list of" " master candidates is:\n" "%s" % ("\n".join(mc_no_master)), errors.ECODE_STATE) if not no_voting: vote_list = _GatherMasterVotes(node_names) if vote_list: voted_master = vote_list[0][0] if voted_master != old_master: raise errors.OpPrereqError( "I have a wrong configuration, I believe" " the master is %s but the other nodes" " voted %s. Please resync the configuration" " of this node." % (old_master, voted_master), errors.ECODE_STATE) # end checks rcode = 0 warnings = [] logging.info("Setting master to %s, old master: %s", new_master, old_master) try: # Forcefully start WConfd so that we can access the configuration result = utils.RunCmd([ pathutils.DAEMON_UTIL, "start", constants.WCONFD, "--force-node", "--no-voting", "--yes-do-it" ]) if result.failed: raise errors.OpPrereqError( "Could not start the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output), errors.ECODE_NOENT) # instantiate a real config writer, as we now know we have the # configuration data livelock = utils.livelock.LiveLock("bootstrap_failover") cfg = config.GetConfig(None, livelock, accept_foreign=True) old_master_node = cfg.GetNodeInfoByName(old_master) if old_master_node is None: raise errors.OpPrereqError( "Could not find old master node '%s' in" " cluster configuration." % old_master, errors.ECODE_NOENT) cluster_info = cfg.GetClusterInfo() new_master_node = cfg.GetNodeInfoByName(new_master) if new_master_node is None: raise errors.OpPrereqError( "Could not find new master node '%s' in" " cluster configuration." % new_master, errors.ECODE_NOENT) cluster_info.master_node = new_master_node.uuid # this will also regenerate the ssconf files, since we updated the # cluster info cfg.Update(cluster_info, logging.error) # if cfg.Update worked, then it means the old master daemon won't be # able now to write its own config file (we rely on locking in both # backend.UploadFile() and ConfigWriter._Write(); hence the next # step is to kill the old master logging.info("Stopping the master daemon on node %s", old_master) runner = rpc.BootstrapRunner() master_params = cfg.GetMasterNetworkParameters() master_params.uuid = old_master_node.uuid ems = cfg.GetUseExternalMipScript() result = runner.call_node_deactivate_master_ip(old_master, master_params, ems) msg = result.fail_msg if msg: warning = "Could not disable the master IP: %s" % (msg, ) logging.warning("%s", warning) warnings.append(warning) result = runner.call_node_stop_master(old_master) msg = result.fail_msg if msg: warning = ("Could not disable the master role on the old master" " %s, please disable manually: %s" % (old_master, msg)) logging.error("%s", warning) warnings.append(warning) except errors.ConfigurationError as err: logging.error("Error while trying to set the new master: %s", str(err)) return 1, warnings finally: # stop WConfd again: result = utils.RunCmd( [pathutils.DAEMON_UTIL, "stop", constants.WCONFD]) if result.failed: warning = ("Could not stop the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) logging.error("%s", warning) rcode = 1 logging.info("Checking master IP non-reachability...") master_ip = sstore.GetMasterIP() total_timeout = 30 # Here we have a phase where no master should be running def _check_ip(expected): if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT) != expected: raise utils.RetryAgain() try: utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[False]) except utils.RetryTimeout: warning = ("The master IP is still reachable after %s seconds," " continuing but activating the master IP on the current" " node will probably fail" % total_timeout) logging.warning("%s", warning) warnings.append(warning) rcode = 1 if jstore.CheckDrainFlag(): logging.info("Undraining job queue") jstore.SetDrainFlag(False) logging.info("Starting the master daemons on the new master") result = rpc.BootstrapRunner().call_node_start_master_daemons( new_master, no_voting) msg = result.fail_msg if msg: logging.error( "Could not start the master role on the new master" " %s, please check: %s", new_master, msg) rcode = 1 # Finally verify that the new master managed to set up the master IP # and warn if it didn't. try: utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[True]) except utils.RetryTimeout: warning = ("The master IP did not come up within %s seconds; the" " cluster should still be working and reachable via %s," " but not via the master IP address" % (total_timeout, new_master)) logging.warning("%s", warning) warnings.append(warning) rcode = 1 logging.info("Master failed over from %s to %s", old_master, new_master) return rcode, warnings