def _GroupWatcher(opts): """Main function for per-group watcher process. """ group_uuid = opts.nodegroup.lower() if not utils.UUID_RE.match(group_uuid): raise errors.GenericError( "Node group parameter (%s) must be given a UUID," " got '%s'" % (cli.NODEGROUP_OPT_NAME, group_uuid)) logging.info("Watcher for node group '%s'", group_uuid) known_groups = _LoadKnownGroups() # Check if node group is known if group_uuid not in known_groups: raise errors.GenericError("Node group '%s' is not known by ssconf" % group_uuid) # Group UUID has been verified and should not contain any dangerous # characters state_path = pathutils.WATCHER_GROUP_STATE_FILE % group_uuid inst_status_path = pathutils.WATCHER_GROUP_INSTANCE_STATUS_FILE % group_uuid logging.debug("Using state file %s", state_path) # Global watcher statefile = state.OpenStateFile(state_path) # pylint: disable=E0602 if not statefile: return constants.EXIT_FAILURE notepad = state.WatcherState(statefile) # pylint: disable=E0602 try: # Connect to master daemon client = GetLuxiClient(False) _CheckMaster(client) (nodes, instances, locks) = _GetGroupData(client, group_uuid) # Update per-group instance status file _UpdateInstanceStatus(inst_status_path, instances.values()) _MergeInstanceStatus(pathutils.INSTANCE_STATUS_FILE, pathutils.WATCHER_GROUP_INSTANCE_STATUS_FILE, known_groups) restart_needed, started = _CheckInstances(client, notepad, instances, locks) _CheckDisks(client, notepad, nodes, instances, started) if not opts.no_verify_disks: _VerifyDisks(client, group_uuid, nodes, instances) except Exception, err: logging.info("Not updating status file due to failure: %s", err) raise
def testEncodeException(self): self.assertEqualValues(errors.EncodeException(Exception("Foobar")), ("Exception", ("Foobar", ))) err = errors.GenericError(True, 100, "foo", ["x", "y"]) self.assertEqualValues(errors.EncodeException(err), ("GenericError", (True, 100, "foo", ["x", "y"])))
def handle_datagram(self, payload, ip, port): payload = payload.decode("utf-8") self.received.append(payload) if payload == "terminate": os.kill(os.getpid(), signal.SIGTERM) elif payload == "error": raise errors.GenericError("error")
def handle_message(self, handler, message, message_id): self.messages.setdefault(handler.client_id, []) # We should just check that the message_ids are monotonically increasing. # If in the unit tests we never remove messages from the received queue, # though, we can just require that the queue length is the same as the # message id, before pushing the message to it. This forces a more # restrictive check, but we can live with this for now. self.assertEquals(len(self.messages[handler.client_id]), message_id) self.messages[handler.client_id].append(message) if message == "error": raise errors.GenericError("error") self.countTerminate("message_terminate_count")
def CheckNodeCertificate(cert, _noded_cert_file=pathutils.NODED_CERT_FILE): """Checks the local node daemon certificate against given certificate. Both certificates must be signed with the same key (as stored in the local L{pathutils.NODED_CERT_FILE} file). No error is raised if no local certificate can be found. @type cert: OpenSSL.crypto.X509 @param cert: X509 certificate object @raise errors.X509CertError: When an error related to X509 occurred @raise errors.GenericError: When the verification failed """ try: noded_pem = utils_io.ReadFile(_noded_cert_file) except EnvironmentError as err: if err.errno != errno.ENOENT: raise logging.debug("Node certificate file '%s' was not found", _noded_cert_file) return try: noded_cert = \ OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, noded_pem) except Exception as err: raise errors.X509CertError(_noded_cert_file, "Unable to load certificate: %s" % err) try: noded_key = \ OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, noded_pem) except Exception as err: raise errors.X509CertError(_noded_cert_file, "Unable to load private key: %s" % err) # Check consistency of server.pem file try: X509CertKeyCheck(noded_cert, noded_key) except OpenSSL.SSL.Error: # This should never happen as it would mean the certificate in server.pem # is out of sync with the private key stored in the same file raise errors.X509CertError( _noded_cert_file, "Certificate does not match with private key") # Check with supplied certificate with local key try: X509CertKeyCheck(cert, noded_key) except OpenSSL.SSL.Error: raise errors.GenericError("Given cluster certificate does not match" " local key")
def testRaiseInnerWithExc(self): retry_arg = "my_important_debugging_message" try: try: utils.Retry(self._RaiseRetryAgainWithArg, 0.01, 0.02, args=[[errors.GenericError(retry_arg, retry_arg)]], wait_fn=self._wait_fn, _time_fn=self._time_fn) except utils.RetryTimeout, err: err.RaiseInner() else:
def CheckRemoteExportDiskInfo(cds, disk_index, disk_info): """Verifies received disk information for an export. @type cds: string @param cds: Cluster domain secret @type disk_index: number @param disk_index: Index of disk (included in hash) @type disk_info: sequence @param disk_info: Disk information sent by remote peer """ try: (host, port, magic, hmac_digest, hmac_salt) = disk_info except (TypeError, ValueError), err: raise errors.GenericError("Invalid data: %s" % err)