def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug('Running cmd (SSH): %s', cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=cmd) return (stdout, stderr)
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' message = _('%(description)s\n' 'Command: %(cmd)s\n' 'Exit code: %(exit_code)s\n' 'Stdout: %(stdout)r\n' 'Stderr: %(stderr)r') % { 'description': description, 'cmd': cmd, 'exit_code': exit_code, 'stdout': stdout, 'stderr': stderr } super(ProcessExecutionError, self).__init__(message)
def __call__(self, message_data): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ # It is important to clear the context here, because at this point # the previous context is stored in local.store.context if hasattr(local.store, 'context'): del local.store.context rpc_common._safe_log(LOG.debug, 'received %s', message_data) self.msg_id_cache.check_duplicate_message(message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) version = message_data.get('version') namespace = message_data.get('namespace') if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data, connection_pool=self.connection_pool) return self.pool.spawn_n(self._process_data, ctxt, version, method, namespace, args)
def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None): """Wraps the sending of messages. Dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) LOG.debug("Sending message(s) to: %s", queues) # Don't stack if we have no matchmaker results if not queues: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, msg, timeout, envelope, _msg_id) return return method(_addr, context, _topic, msg, timeout, envelope)
def _process_data(self, message_data): msg_id = message_data.pop('_msg_id', None) waiter = self._call_waiters.get(msg_id) if not waiter: LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s' ', message : %(data)s'), {'msg_id': msg_id, 'data': message_data}) LOG.warn(_('_call_waiters: %s') % str(self._call_waiters)) else: waiter.put(message_data)
def _get_not_supported_column(col_name_col_instance, column_name): try: column = col_name_col_instance[column_name] except KeyError: msg = _("Please specify column %s in col_name_col_instance " "param. It is required because column has unsupported " "type by sqlite).") raise ColumnError(msg % column_name) if not isinstance(column, Column): msg = _("col_name_col_instance param has wrong type of " "column instance for column %s It should be instance " "of sqlalchemy.Column.") raise ColumnError(msg % column_name) return column
def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__(None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def acquire(self): basedir = os.path.dirname(self.fname) if not os.path.exists(basedir): fileutils.ensure_tree(basedir) LOG.info(_LI('Created lock path: %s'), basedir) self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() LOG.debug('Got file lock "%s"', self.fname) return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise threading.ThreadError(_("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % { 'filename': self.fname, 'exception': e, })
class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ msg_fmt = _('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__(None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def acquire(self): basedir = os.path.dirname(self.fname) if not os.path.exists(basedir): fileutils.ensure_tree(basedir) LOG.info(_LI('Created lock path: %s'), basedir) self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() LOG.debug('Got file lock "%s"', self.fname) return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise threading.ThreadError( _("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % { 'filename': self.fname, 'exception': e, })
def publisher(waiter): LOG.info(_LI("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while(True): data = self.topic_proxy[topic].get() out_sock.send(data, copy=False)
def deprecated(self, msg, *args, **kwargs): """Call this method when a deprecated feature is used. If the system is configured for fatal deprecations then the message is logged at the 'critical' level and :class:`DeprecatedConfig` will be raised. Otherwise, the message will be logged (once) at the 'warn' level. :raises: :class:`DeprecatedConfig` if the system is configured for fatal deprecations. """ stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) # Using a list because a tuple with dict can't be stored in a set. sent_args = self._deprecated_messages_sent.setdefault(msg, list()) if args in sent_args: # Already logged this message, so don't log it again. return sent_args.append(args) self.warn(stdmsg, *args, **kwargs)
def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True): """Upgrade or downgrade a database. Function runs the upgrade() or downgrade() functions in change scripts. :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository. :param version: Database will upgrade/downgrade until this version. If None - database will update to the latest available version. :param init_version: Initial database version :param sanity_check: Require schema sanity checking for all tables """ if version is not None: try: version = int(version) except ValueError: raise exception.DbMigrationError( message=_("version should be an integer")) current_version = db_version(engine, abs_path, init_version) repository = _find_migrate_repo(abs_path) if sanity_check: _db_schema_sanity_check(engine) if version is None or version > current_version: return versioning_api.upgrade(engine, repository, version) else: return versioning_api.downgrade(engine, repository, version)
def read(self, i=None): result = self.data.read(i) self.bytes_read += len(result) if self.bytes_read > self.limit: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) return result
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = _get_ctxt().socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} LOG.debug("Connecting to %(addr)s with %(type)s", str_data) LOG.debug("-> Subscribed to %(subscribe)s", str_data) LOG.debug("-> bind: %(bind)s", str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def bool_from_string(subject, strict=False, default=False): """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when `strict=False`, anything else returns the value specified by 'default'. Useful for JSON-decoded stuff and config file parsing. If `strict=True`, unrecognized values, including None, will raise a ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, six.string_types): subject = str(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = _("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return default
def model_query(context, model, session, args=None, project_only=False, read_deleted=None): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param model: Model to query. Must be a subclass of ModelBase. :type model: models.ModelBase :param session: The session to use. :type session: sqlalchemy.orm.session.Session :param args: Arguments to query. If None - model is used. :type args: tuple :param project_only: If present and context is user-type, then restrict query to match the context's project_id. If set to 'allow_none', restriction includes project_id = None. :type project_only: bool :param read_deleted: If present, overrides context's read_deleted field. :type read_deleted: bool Usage: ..code:: python result = (utils.model_query(context, models.Instance, session=session) .filter_by(uuid=instance_uuid) .all()) query = utils.model_query( context, Node, session=session, args=(func.count(Node.id), func.sum(Node.ram)) ).filter_by(project_id=project_id) """ if not read_deleted: if hasattr(context, 'read_deleted'): # NOTE(viktors): some projects use `read_deleted` attribute in # their contexts instead of `show_deleted`. read_deleted = context.read_deleted else: read_deleted = context.show_deleted if not issubclass(model, models.ModelBase): raise TypeError(_("model should be a subclass of ModelBase")) query = session.query(model) if not args else session.query(*args) query = _read_deleted_filter(query, model, read_deleted) query = _project_filter(query, model, context, project_only) return query
def add_call_waiter(self, waiter, msg_id): self._num_call_waiters += 1 if self._num_call_waiters > self._num_call_waiters_wrn_threshold: LOG.warn(_('Number of call waiters is greater than warning ' 'threshold: %d. There could be a MulticallProxyWaiter ' 'leak.') % self._num_call_waiters_wrn_threshold) self._num_call_waiters_wrn_threshold *= 2 self._call_waiters[msg_id] = waiter
def test_underscore_lazy(self): # set lazy off gettextutils.USE_LAZY = False gettextutils.enable_lazy() result = gettextutils._('blah') self.assertIsInstance(result, gettextutils.Message) self.assertEqual('essential', result.domain)
def __iter__(self): for chunk in self.data: self.bytes_read += len(chunk) if self.bytes_read > self.limit: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) else: yield chunk
def _read_deleted_filter(query, db_model, read_deleted): if 'deleted' not in db_model.__table__.columns: raise ValueError( _("There is no `deleted` column in `%s` table. " "Project doesn't use soft-deleted feature.") % db_model.__name__) default_deleted_value = db_model.__table__.c.deleted.default.arg if read_deleted == 'no': query = query.filter(db_model.deleted == default_deleted_value) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter(db_model.deleted != default_deleted_value) else: raise ValueError( _("Unrecognized read_deleted value '%s'") % read_deleted) return query
def _read_deleted_filter(query, db_model, read_deleted): if 'deleted' not in db_model.__table__.columns: raise ValueError(_("There is no `deleted` column in `%s` table. " "Project doesn't use soft-deleted feature.") % db_model.__name__) default_deleted_value = db_model.__table__.c.deleted.default.arg if read_deleted == 'no': query = query.filter(db_model.deleted == default_deleted_value) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter(db_model.deleted != default_deleted_value) else: raise ValueError(_("Unrecognized read_deleted value '%s'") % read_deleted) return query
def __call__(self, req): if req.content_length > CONF.max_request_body_size: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) if req.content_length is None and req.is_body_readable: limiter = LimitingReader(req.body_file, CONF.max_request_body_size) req.body_file = limiter return self.application
def string_to_bytes(text, unit_system='IEC', return_int=False): """Converts a string into an float representation of bytes. The units supported for IEC :: Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) KB, KiB, MB, MiB, GB, GiB, TB, TiB The units supported for SI :: kb(it), Mb(it), Gb(it), Tb(it) kB, MB, GB, TB Note that the SI unit system does not support capital letter 'K' :param text: String input for bytes size conversion. :param unit_system: Unit system for byte size conversion. :param return_int: If True, returns integer representation of text in bytes. (default: decimal) :returns: Numerical representation of text in bytes. :raises ValueError: If text has an invalid value. """ try: base, reg_ex = UNIT_SYSTEM_INFO[unit_system] except KeyError: msg = _('Invalid unit system: "%s"') % unit_system raise ValueError(msg) match = reg_ex.match(text) if match: magnitude = float(match.group(1)) unit_prefix = match.group(2) if match.group(3) in ['b', 'bit']: magnitude /= 8 else: msg = _('Invalid string format: %s') % text raise ValueError(msg) if not unit_prefix: res = magnitude else: res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) if return_int: return int(math.ceil(res)) return res
def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: mm = CONF.rpc_zmq_matchmaker if mm.endswith('matchmaker.MatchMakerRing'): mm.replace('matchmaker', 'matchmaker_ring') LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use' ' %(new)s instead') % dict( orig=CONF.rpc_zmq_matchmaker, new=mm)) matchmaker = importutils.import_object(mm, *args, **kwargs) return matchmaker
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_LE("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def _run(self, name, method_type, args, kwargs, func): if method_type not in ('pre', 'post'): msg = _("Wrong type of hook method. " "Only 'pre' and 'post' type allowed") raise ValueError(msg) for extension in self.api.extensions: obj = extension.obj hook_method = getattr(obj, method_type, None) if hook_method: msg = (_("Running %(name)s %(type)s-hook: %(obj)s") % {'name': name, 'type': method_type, 'obj': obj}) LOG.debug(msg) try: if func: hook_method(func, *args, **kwargs) else: hook_method(*args, **kwargs) except Exception: LOG.exception(_LE('Error during %s-hook') % method_type)
class LogConfigError(Exception): message = _('Error loading logging config %(log_config)s: %(err_msg)s') def __init__(self, log_config, err_msg): self.log_config = log_config self.err_msg = err_msg def __str__(self): return self.message % dict(log_config=self.log_config, err_msg=self.err_msg)
def reconnect(self): """Handles reconnecting and re-establishing queues. Will retry up to self.max_retries number of times. self.max_retries = 0 means to retry forever. Sleep between tries, starting at self.interval_start seconds, backing off self.interval_stepping number of seconds each attempt. """ attempt = 0 while True: params = self.params_list[next(self.next_broker_indices)] attempt += 1 try: self._connect(params) return except (IOError, self.connection_errors) as e: pass except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info.update(params) if self.max_retries and attempt == self.max_retries: msg = _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info LOG.error(msg) raise rpc_common.RPCException(msg) if attempt == 1: sleep_time = self.interval_start or 1 elif attempt > 1: sleep_time += self.interval_stepping if self.interval_max: sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time LOG.error( _LE('AMQP server on %(hostname)s:%(port)d is ' 'unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException(_('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception( _LE("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def reconnect(self): """Handles reconnecting and re-establishing queues. Will retry up to self.max_retries number of times. self.max_retries = 0 means to retry forever. Sleep between tries, starting at self.interval_start seconds, backing off self.interval_stepping number of seconds each attempt. """ attempt = 0 while True: params = self.params_list[next(self.next_broker_indices)] attempt += 1 try: self._connect(params) return except (IOError, self.connection_errors) as e: pass except Exception as e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info.update(params) if self.max_retries and attempt == self.max_retries: msg = _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info LOG.error(msg) raise rpc_common.RPCException(msg) if attempt == 1: sleep_time = self.interval_start or 1 elif attempt > 1: sleep_time += self.interval_stepping if self.interval_max: sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is ' 'unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
def is_enabled(): cert_file = CONF.ssl.cert_file key_file = CONF.ssl.key_file ca_file = CONF.ssl.ca_file use_ssl = cert_file or key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError(_("Unable to find key_file : %s") % key_file) if use_ssl and (not cert_file or not key_file): raise RuntimeError( _("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) return use_ssl
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _("Unexpected error while running command.") if exit_code is None: exit_code = '-' message = _('%(description)s\n' 'Command: %(cmd)s\n' 'Exit code: %(exit_code)s\n' 'Stdout: %(stdout)r\n' 'Stderr: %(stderr)r') % {'description': description, 'cmd': cmd, 'exit_code': exit_code, 'stdout': stdout, 'stderr': stderr} super(ProcessExecutionError, self).__init__(message)
def _extract_bytes(self, details): # Replace it with the byte amount real_size = self.SIZE_RE.search(details) if not real_size: raise ValueError(_('Invalid input value "%s".') % details) magnitude = real_size.group(1) unit_of_measure = real_size.group(2) bytes_info = real_size.group(3) if bytes_info: return int(real_size.group(4)) elif not unit_of_measure: return int(magnitude) return strutils.string_to_bytes('%s%sB' % (magnitude, unit_of_measure), return_int=True)
def test_underscore_non_lazy(self): # set lazy off gettextutils.USE_LAZY = False if six.PY3: self.mox.StubOutWithMock(gettextutils._t, 'gettext') gettextutils._t.gettext('blah').AndReturn('translated blah') else: self.mox.StubOutWithMock(gettextutils._t, 'ugettext') gettextutils._t.ugettext('blah').AndReturn('translated blah') self.mox.ReplayAll() result = gettextutils._('blah') self.assertEqual('translated blah', result)
def _project_filter(query, db_model, context, project_only): if project_only and 'project_id' not in db_model.__table__.columns: raise ValueError(_("There is no `project_id` column in `%s` table.") % db_model.__name__) if request_context.is_user_context(context) and project_only: if project_only == 'allow_none': is_none = None query = query.filter(or_(db_model.project_id == context.project_id, db_model.project_id == is_none)) else: query = query.filter(db_model.project_id == context.project_id) return query
def _project_filter(query, db_model, context, project_only): if project_only and 'project_id' not in db_model.__table__.columns: raise ValueError( _("There is no `project_id` column in `%s` table.") % db_model.__name__) if request_context.is_user_context(context) and project_only: if project_only == 'allow_none': is_none = None query = query.filter( or_(db_model.project_id == context.project_id, db_model.project_id == is_none)) else: query = query.filter(db_model.project_id == context.project_id) return query
def set_rules(self, rules, overwrite=True, use_conf=False): """Create a new Rules object based on the provided dict of rules. :param rules: New rules to use. It should be an instance of dict. :param overwrite: Whether to overwrite current rules or update them with the new rules. :param use_conf: Whether to reload rules from cache or config file. """ if not isinstance(rules, dict): raise TypeError(_("Rules must be an instance of dict or Rules, " "got %s instead") % type(rules)) self.use_conf = use_conf if overwrite: self.rules = Rules(rules, self.default_rule) else: self.rules.update(rules)
def _extract_details(self, root_cmd, root_details, lines_after): real_details = root_details if root_cmd == 'backing_file': # Replace it with the real backing file backing_match = self.BACKING_FILE_RE.match(root_details) if backing_match: real_details = backing_match.group(2).strip() elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']: # Replace it with the byte amount (if we can convert it) if root_details == 'None': real_details = 0 else: real_details = self._extract_bytes(root_details) elif root_cmd == 'file_format': real_details = real_details.strip().lower() elif root_cmd == 'snapshot_list': # Next line should be a header, starting with 'ID' if not lines_after or not lines_after[0].startswith("ID"): msg = _("Snapshot list encountered but no header found!") raise ValueError(msg) del lines_after[0] real_details = [] # This is the sprintf pattern we will try to match # "%-10s%-20s%7s%20s%15s" # ID TAG VM SIZE DATE VM CLOCK (current header) while lines_after: line = lines_after[0] line_pieces = line.split() if len(line_pieces) != 6: break # Check against this pattern in the final position # "%02d:%02d:%02d.%03d" date_pieces = line_pieces[5].split(":") if len(date_pieces) != 3: break real_details.append({ 'id': line_pieces[0], 'tag': line_pieces[1], 'vm_size': line_pieces[2], 'date': line_pieces[3], 'vm_clock': line_pieces[4] + " " + line_pieces[5], }) del lines_after[0] return real_details
def __init__(self, requested, permitted): msg = _("Length of %(given)d is too long, max = %(maximum)d") message = msg % {'given': requested, 'maximum': permitted} super(CryptoutilsException, self).__init__(message)
def raise_invalid_topology_version(conf): msg = (_("Invalid value for qpid_topology_version: %d") % conf.qpid_topology_version) LOG.error(msg) raise Exception(msg)
def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise ColumnError(_("Unsupported id columns type"))
def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] assert(len(sort_dirs) == len(sort_keys)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): try: sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] except KeyError: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise InvalidSortKey() query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(len(sort_keys)): crit_attrs = [] for j in range(i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) if sort_dirs[i] == 'desc': crit_attrs.append((model_attr < marker_values[i])) else: crit_attrs.append((model_attr > marker_values[i])) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query
def execute(*cmd, **kwargs): """Helper method to shell out and execute a command through subprocess. Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. :type process_input: string :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. :type loglevel: int. (Should be stdlib_logging.DEBUG or stdlib_logging.INFO) :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` """ process_input = kwargs.pop('process_input', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=_('Command requested root, but did not ' 'specify a root helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: attempts -= 1 try: LOG.log(loglevel, 'Running cmd (subprocess): %s', ' '.join(cmd)) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': preexec_fn = None close_fds = False else: preexec_fn = _subprocess_setup close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, shell=shell) result = None for _i in six.moves.range(20): # NOTE(russellb) 20 is an arbitrary number of retries to # prevent any chance of looping forever here. try: if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() except OSError as e: if e.errno in (errno.EAGAIN, errno.EINTR): continue raise break obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 LOG.log(loglevel, 'Result was %s' % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result raise ProcessExecutionError(exit_code=_returncode, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) return result except ProcessExecutionError: if not attempts: raise else: LOG.log(loglevel, '%r failed. Retrying.', cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0)