def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug( _('Dynamic looping call sleeping for %.02f ' 'seconds'), idle) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def to_bytes(text, default=0): """Converts a string into an integer of bytes. Looks at the last characters of the text to determine what conversion is needed to turn the input text into a byte number. Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) :param text: String input for bytes size conversion. :param default: Default return value when text is blank. """ match = BYTE_REGEX.search(text) if match: magnitude = int(match.group(1)) mult_key_org = match.group(2) if not mult_key_org: return magnitude elif text: msg = _('Invalid string format: %s') % text raise TypeError(msg) else: return default mult_key = mult_key_org.lower().replace('b', '', 1) multiplier = BYTE_MULTIPLIERS.get(mult_key) if multiplier is None: msg = _('Unknown byte multiplier: %s') % mult_key_org raise TypeError(msg) return magnitude * multiplier
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug(_('Running cmd (SSH): %s'), cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug(_('Result was %s') % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=cmd) return (stdout, stderr)
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug(_('Dynamic looping call sleeping for %.02f ' 'seconds'), idle) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) return idle_for
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def wrapper(*args, **kwargs): try: path = lock_path or os.environ.get("MISTRAL_LOCK_PATH") lock = lockfile.FileLock(os.path.join(path, lock_prefix)) with lock: LOG.debug(_('Got lock "%s"') % f.__name__) return f(*args, **kwargs) finally: LOG.debug(_('Lock released "%s"') % f.__name__)
def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): LOG.debug(_('Got semaphore / lock "%(function)s"'), {'function': f.__name__}) return f(*args, **kwargs) finally: LOG.debug(_('Semaphore / lock released "%(function)s"'), {'function': f.__name__})
def _get_not_supported_column(col_name_col_instance, column_name): try: column = col_name_col_instance[column_name] except KeyError: msg = _("Please specify column %s in col_name_col_instance " "param. It is required because column has unsupported " "type by sqlite).") raise ColumnError(msg % column_name) if not isinstance(column, Column): msg = _("col_name_col_instance param has wrong type of " "column instance for column %s It should be instance " "of sqlalchemy.Column.") raise ColumnError(msg % column_name) return column
def bool_from_string(subject, strict=False): """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when `strict=False`, anything else is considered False. Useful for JSON-decoded stuff and config file parsing. If `strict=True`, unrecognized values, including None, will raise a ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, six.string_types): subject = str(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join("'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = _("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % { 'val': subject, 'acceptable': acceptable } raise ValueError(msg) else: return False
def acquire(self): basedir = os.path.dirname(self.fname) if not os.path.exists(basedir): fileutils.ensure_tree(basedir) LOG.info(_LI('Created lock path: %s'), basedir) self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() LOG.debug('Got file lock "%s"', self.fname) return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise threading.ThreadError(_("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % { 'filename': self.fname, 'exception': e, })
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or this_exc_message != last_exc_message): logging.exception( _('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True): """Upgrade or downgrade a database. Function runs the upgrade() or downgrade() functions in change scripts. :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository. :param version: Database will upgrade/downgrade until this version. If None - database will update to the latest available version. :param init_version: Initial database version :param sanity_check: Require schema sanity checking for all tables """ if version is not None: try: version = int(version) except ValueError: raise exception.DbMigrationError( message=_("version should be an integer")) current_version = db_version(engine, abs_path, init_version) repository = _find_migrate_repo(abs_path) if sanity_check: _db_schema_sanity_check(engine) if version is None or version > current_version: return versioning_api.upgrade(engine, repository, version) else: return versioning_api.downgrade(engine, repository, version)
def deprecated(self, msg, *args, **kwargs): """Call this method when a deprecated feature is used. If the system is configured for fatal deprecations then the message is logged at the 'critical' level and :class:`DeprecatedConfig` will be raised. Otherwise, the message will be logged (once) at the 'warn' level. :raises: :class:`DeprecatedConfig` if the system is configured for fatal deprecations. """ stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) # Using a list because a tuple with dict can't be stored in a set. sent_args = self._deprecated_messages_sent.setdefault(msg, list()) if args in sent_args: # Already logged this message, so don't log it again. return sent_args.append(args) self.warn(stdmsg, *args, **kwargs)
def __exit__(self, exc_type, exc_val, exc_tb): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname)
def db_sync(abs_path, version=None, init_version=0): """Upgrade or downgrade a database. Function runs the upgrade() or downgrade() functions in change scripts. :param abs_path: Absolute path to migrate repository. :param version: Database will upgrade/downgrade until this version. If None - database will update to the latest available version. :param init_version: Initial database version """ if version is not None: try: version = int(version) except ValueError: raise exception.DbMigrationError( message=_("version should be an integer")) current_version = db_version(abs_path, init_version) repository = _find_migrate_repo(abs_path) if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version)
def _db_schema_sanity_check(engine): """Ensure all database tables were created with required parameters. :param engine: SQLAlchemy engine instance for a given database """ if engine.name == 'mysql': onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' 'from information_schema.TABLES ' 'where TABLE_SCHEMA=%s and ' 'TABLE_COLLATION NOT LIKE "%%utf8%%"') # NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic # versioning tables from the tables we need to verify utf8 status on. # Non-standard table names are not supported. EXCLUDED_TABLES = ['migrate_version', 'alembic_version'] table_names = [res[0] for res in engine.execute(onlyutf8_sql, engine.url.database) if res[0].lower() not in EXCLUDED_TABLES] if len(table_names) > 0: raise ValueError(_('Tables "%s" have non utf8 collation, ' 'please make sure all tables are CHARSET=utf8' ) % ','.join(table_names))
def deprecated(self, msg, *args, **kwargs): stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) else: self.warn(stdmsg, *args, **kwargs)
def bool_from_string(subject, strict=False, default=False): """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when `strict=False`, anything else returns the value specified by 'default'. Useful for JSON-decoded stuff and config file parsing. If `strict=True`, unrecognized values, including None, will raise a ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, six.string_types): subject = six.text_type(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = _("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return default
def __init__(cls, names, bases, dict_): """Metaclass that allows us to collect decorated periodic tasks.""" super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) # NOTE(sirp): if the attribute is not present then we must be the base # class, so, go ahead an initialize it. If the attribute is present, # then we're a subclass so make a copy of it so we don't step on our # parent's toes. try: cls._periodic_tasks = cls._periodic_tasks[:] except AttributeError: cls._periodic_tasks = [] try: cls._periodic_last_run = cls._periodic_last_run.copy() except AttributeError: cls._periodic_last_run = {} try: cls._periodic_spacing = cls._periodic_spacing.copy() except AttributeError: cls._periodic_spacing = {} for value in cls.__dict__.values(): if getattr(value, '_periodic_task', False): task = value name = task.__name__ if task._periodic_spacing < 0: LOG.info(_('Skipping periodic task %(task)s because ' 'its interval is negative'), {'task': name}) continue if not task._periodic_enabled: LOG.info(_('Skipping periodic task %(task)s because ' 'it is disabled'), {'task': name}) continue # A periodic spacing of zero indicates that this task should # be run every pass if task._periodic_spacing == 0: task._periodic_spacing = None cls._periodic_tasks.append((name, task)) cls._periodic_spacing[name] = task._periodic_spacing cls._periodic_last_run[name] = task._periodic_last_run
def _wrap(*args, **kw): try: return f(*args, **kw) except Exception as e: if not isinstance(e, Error): logging.exception(_('Uncaught exception')) raise Error(str(e)) raise
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: logging.error(_('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb)
def string_to_bytes(text, unit_system='IEC', return_int=False): """Converts a string into an float representation of bytes. The units supported for IEC :: Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) KB, KiB, MB, MiB, GB, GiB, TB, TiB The units supported for SI :: kb(it), Mb(it), Gb(it), Tb(it) kB, MB, GB, TB Note that the SI unit system does not support capital letter 'K' :param text: String input for bytes size conversion. :param unit_system: Unit system for byte size conversion. :param return_int: If True, returns integer representation of text in bytes. (default: decimal) :returns: Numerical representation of text in bytes. :raises ValueError: If text has an invalid value. """ try: base, reg_ex = UNIT_SYSTEM_INFO[unit_system] except KeyError: msg = _('Invalid unit system: "%s"') % unit_system raise ValueError(msg) match = reg_ex.match(text) if match: magnitude = float(match.group(1)) unit_prefix = match.group(2) if match.group(3) in ['b', 'bit']: magnitude /= 8 else: msg = _('Invalid string format: %s') % text raise ValueError(msg) if not unit_prefix: res = magnitude else: res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) if return_int: return int(math.ceil(res)) return res
class LogConfigError(Exception): message = _('Error loading logging config %(log_config)s: %(err_msg)s') def __init__(self, log_config, err_msg): self.log_config = log_config self.err_msg = err_msg def __str__(self): return self.message % dict(log_config=self.log_config, err_msg=self.err_msg)
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info( _("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def _ping_listener(dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL connections checked out of the pool are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ try: dbapi_conn.cursor().execute('select 1') except dbapi_conn.OperationalError as ex: if ex.args[0] in (2006, 2013, 2014, 2045, 2055): LOG.warn(_('Got mysql server has gone away: %s'), ex) raise sqla_exc.DisconnectionError("Database server went away") else: raise
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def _find_facility_from_conf(): facility_names = logging.handlers.SysLogHandler.facility_names facility = getattr(logging.handlers.SysLogHandler, CONF.syslog_log_facility, None) if facility is None and CONF.syslog_log_facility in facility_names: facility = facility_names.get(CONF.syslog_log_facility) if facility is None: valid_facilities = facility_names.keys() consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] valid_facilities.extend(consts) raise TypeError(_('syslog facility must be one of: %s') % ', '.join("'%s'" % fac for fac in valid_facilities)) return facility
def db_version(abs_path, init_version): """Show the current version of the repository. :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError: meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: db_version_control(abs_path, init_version) return versioning_api.db_version(get_engine(), repository) else: # Some pre-Essex DB's may not be version controlled. # Require them to upgrade using Essex first. raise exception.DbMigrationError( message=_("Upgrade DB using Essex release first."))
def read_cached_file(filename, force_reload=False): """Read from a file if it has been modified. :param force_reload: Whether to reload the file. :returns: A tuple with a boolean specifying if the data is fresh or not. """ global _FILE_CACHE if force_reload and filename in _FILE_CACHE: del _FILE_CACHE[filename] reloaded = False mtime = os.path.getmtime(filename) cache_info = _FILE_CACHE.setdefault(filename, {}) if not cache_info or mtime > cache_info.get('mtime', 0): LOG.debug(_("Reloading cached file %s") % filename) with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime reloaded = True return (reloaded, cache_info['data'])
def db_version(engine, abs_path, init_version): """Show the current version of the repository. :param engine: SQLAlchemy engine instance for a given database :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) try: return versioning_api.db_version(engine, repository) except versioning_exceptions.DatabaseNotControlledError: meta = sqlalchemy.MetaData() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0 or 'alembic_version' in tables: db_version_control(engine, abs_path, version=init_version) return versioning_api.db_version(engine, repository) else: raise exception.DbMigrationError( message=_( "The database is not under version control, but has " "tables. Please stamp the current version of the schema " "manually."))
def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.OperationalError as e: _raise_if_deadlock_error(e, get_engine().name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, get_engine().name) raise exception.DBError(e) except Exception as e: LOG.exception(_('DB exception wrapped.')) raise exception.DBError(e)
def create_engine(sql_connection, sqlite_fk=False): """Return a new SQLAlchemy engine.""" # NOTE(geekinutah): At this point we could be connecting to the normal # db handle or the slave db handle. Things like # _wrap_db_error aren't going to work well if their # backends don't match. Let's check. _assert_matching_drivers() connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = { "pool_recycle": CONF.database.idle_timeout, "echo": False, 'convert_unicode': True, } # Map our SQL debug level to SQLAlchemy's options if CONF.database.connection_debug >= 100: engine_args['echo'] = 'debug' elif CONF.database.connection_debug >= 50: engine_args['echo'] = True if "sqlite" in connection_dict.drivername: if sqlite_fk: engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["poolclass"] = NullPool if CONF.database.connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} else: if CONF.database.max_pool_size is not None: engine_args['pool_size'] = CONF.database.max_pool_size if CONF.database.max_overflow is not None: engine_args['max_overflow'] = CONF.database.max_overflow if CONF.database.pool_timeout is not None: engine_args['pool_timeout'] = CONF.database.pool_timeout engine = sqlalchemy.create_engine(sql_connection, **engine_args) sqlalchemy.event.listen(engine, 'checkin', _thread_yield) if 'mysql' in connection_dict.drivername: sqlalchemy.event.listen(engine, 'checkout', _ping_listener) elif 'sqlite' in connection_dict.drivername: if not CONF.sqlite_synchronous: sqlalchemy.event.listen(engine, 'connect', _synchronous_switch_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) if (CONF.database.connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb'): _patch_mysqldb_with_stacktrace_comments() try: engine.connect() except sqla_exc.OperationalError as e: if not _is_db_connection_error(e.args[0]): raise remaining = CONF.database.max_retries if remaining == -1: remaining = 'infinite' while True: msg = _('SQL connection failed. %s attempts left.') LOG.warn(msg % remaining) if remaining != 'infinite': remaining -= 1 time.sleep(CONF.database.retry_interval) try: engine.connect() break except sqla_exc.OperationalError as e: if (remaining != 'infinite' and remaining == 0) or \ not _is_db_connection_error(e.args[0]): raise return engine
def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. :param lock_path: The lock_path keyword argument is used to specify a special location for external lock files to live. If nothing is set, then CONF.lock_path is used as a default. """ with _semaphores_lock: try: sem = _semaphores[name] except KeyError: sem = threading.Semaphore() _semaphores[name] = sem with sem: LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) # NOTE(mikal): I know this looks odd if not hasattr(local.strong_store, 'locks_held'): local.strong_store.locks_held = [] local.strong_store.locks_held.append(name) try: if external and not CONF.disable_process_locking: LOG.debug(_('Attempting to grab file lock "%(lock)s"'), {'lock': name}) # We need a copy of lock_path because it is non-local local_lock_path = lock_path or CONF.lock_path if not local_lock_path: raise cfg.RequiredOptError('lock_path') if not os.path.exists(local_lock_path): fileutils.ensure_tree(local_lock_path) LOG.info(_('Created lock path: %s'), local_lock_path) def add_prefix(name, prefix): if not prefix: return name sep = '' if prefix.endswith('-') else '-' return '%s%s%s' % (prefix, sep, name) # NOTE(mikal): the lock name cannot contain directory # separators lock_file_name = add_prefix(name.replace(os.sep, '_'), lock_file_prefix) lock_file_path = os.path.join(local_lock_path, lock_file_name) try: lock = InterProcessLock(lock_file_path) with lock as lock: LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), {'lock': name, 'path': lock_file_path}) yield lock finally: LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), {'lock': name, 'path': lock_file_path}) else: yield sem finally: local.strong_store.locks_held.remove(name)
def execute(*cmd, **kwargs): """Helper method to shell out and execute a command through subprocess. Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. :type proces_input: string :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. :type loglevel: int. (Should be stdlib_logging.DEBUG or stdlib_logging.INFO) :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` """ process_input = kwargs.pop('process_input', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=('Command requested root, but did not specify a root ' 'helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: attempts -= 1 try: LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd)) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': preexec_fn = None close_fds = False else: preexec_fn = _subprocess_setup close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, shell=shell) result = None if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 LOG.log(loglevel, _('Result was %s') % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result raise ProcessExecutionError(exit_code=_returncode, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) return result except ProcessExecutionError: if not attempts: raise else: LOG.log(loglevel, _('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0)
def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. :param lock_path: The lock_path keyword argument is used to specify a special location for external lock files to live. If nothing is set, then CONF.lock_path is used as a default. """ with _semaphores_lock: try: sem = _semaphores[name] except KeyError: sem = threading.Semaphore() _semaphores[name] = sem with sem: LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) # NOTE(mikal): I know this looks odd if not hasattr(local.strong_store, 'locks_held'): local.strong_store.locks_held = [] local.strong_store.locks_held.append(name) try: if external and not CONF.disable_process_locking: LOG.debug(_('Attempting to grab file lock "%(lock)s"'), {'lock': name}) # We need a copy of lock_path because it is non-local local_lock_path = lock_path or CONF.lock_path if not local_lock_path: raise cfg.RequiredOptError('lock_path') if not os.path.exists(local_lock_path): fileutils.ensure_tree(local_lock_path) LOG.info(_('Created lock path: %s'), local_lock_path) def add_prefix(name, prefix): if not prefix: return name sep = '' if prefix.endswith('-') else '-' return '%s%s%s' % (prefix, sep, name) # NOTE(mikal): the lock name cannot contain directory # separators lock_file_name = add_prefix(name.replace(os.sep, '_'), lock_file_prefix) lock_file_path = os.path.join(local_lock_path, lock_file_name) try: lock = InterProcessLock(lock_file_path) with lock as lock: LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), { 'lock': name, 'path': lock_file_path }) yield lock finally: LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), { 'lock': name, 'path': lock_file_path }) else: yield sem finally: local.strong_store.locks_held.remove(name)
def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise ColumnError(_("Unsupported id columns type"))
def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) assert (not (sort_dir and sort_dirs)) # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] assert (len(sort_dirs) == len(sort_keys)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): try: sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] except KeyError: raise ValueError( _("Unknown sort direction, " "must be 'desc' or 'asc'")) try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise InvalidSortKey() query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(0, len(sort_keys)): crit_attrs = [] for j in range(0, i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) if sort_dirs[i] == 'desc': crit_attrs.append((model_attr < marker_values[i])) else: crit_attrs.append((model_attr > marker_values[i])) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query
class DBInvalidUnicodeParameter(Exception): message = _("Invalid Parameter: " "Unicode is not supported by the current database.")
class InvalidPeriodicTaskArg(Exception): message = _("Unexpected argument for periodic task creation: %(arg)s.")
class InvalidSortKey(Exception): message = _("Sort key supplied was not valid.")
def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] assert(len(sort_dirs) == len(sort_keys)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): try: sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] except KeyError: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise InvalidSortKey() query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(0, len(sort_keys)): crit_attrs = [] for j in range(0, i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) if sort_dirs[i] == 'desc': crit_attrs.append((model_attr < marker_values[i])) else: crit_attrs.append((model_attr > marker_values[i])) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query