def _migrate_up(self, version, with_data=False): """Migrate up to a new version of the db. :param version: id of revision to upgrade. :type version: str :keyword with_data: Whether to verify the applied changes with data, see :ref:`auxiliary-dynamic-methods`. :type with_data: Bool """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(self.migrate_engine) self.migration_api.upgrade(self.migrate_engine, self.REPOSITORY, version) self.assertEqual(version, self.migration_api.db_version(self.migrate_engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(self.migrate_engine, data) except exc.DbMigrationError: LOG.error(_LE("Failed to migrate to version %s on engine %s") % (version, self.migrate_engine)) raise
def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session), ( '_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def _migrate_up(self, version, with_data=False): """Migrate up to a new version of the db. :param version: id of revision to upgrade. :type version: str :keyword with_data: Whether to verify the applied changes with data, see :ref:`auxiliary-dynamic-methods`. :type with_data: Bool """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr(self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(self.migrate_engine) self.migration_api.upgrade(self.migrate_engine, self.REPOSITORY, version) self.assertEqual( version, self.migration_api.db_version(self.migrate_engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(self.migrate_engine, data) except exc.DbMigrationError: msg = _LE("Failed to migrate to version %(ver)s on engine %(eng)s") LOG.error(msg, {"ver": version, "eng": self.migrate_engine}) raise
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr(self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual( version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error( _LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session ), ('_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual(version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error(_LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect): """Filter for remaining DBAPIErrors and wrap if they represent a disconnect error. """ if is_disconnect: raise exception.DBConnectionError(error) else: LOG.exception( _LE('DBAPIError exception wrapped from %s') % error) raise exception.DBError(error)
def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect): """Filter for remaining DBAPIErrors. Filter for remaining DBAPIErrors and wrap if they represent a disconnect error. """ if is_disconnect: raise exception.DBConnectionError(error) else: LOG.exception(_LE('DBAPIError exception wrapped from %s') % error) raise exception.DBError(error)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min(next_interval * 2, self.max_retry_interval)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min( next_interval * 2, self.max_retry_interval )
def downgrade(self, version): try: #version for migrate should be valid int - else skip if version in ('base', None): version = self.init_version version = int(version) return migration.db_sync(self.engine, self.repository, version, init_version=self.init_version) except ValueError: LOG.error( _LE('Migration number for migrate plugin must be valid ' 'integer or empty, if you want to downgrade ' 'to initial state')) raise
def downgrade(self, version): try: # version for migrate should be valid int - else skip if version in ('base', None): version = self.init_version version = int(version) return migration.db_sync( self.engine, self.repository, version, init_version=self.init_version) except ValueError: LOG.error( _LE('Migration number for migrate plugin must be valid ' 'integer or empty, if you want to downgrade ' 'to initial state') ) raise
def _api(self): if not self._db_api: with self._lock: if not self._db_api: db_api = api.DBAPI.from_config( conf=self._conf, backend_mapping=self._backend_mapping) if self._conf.database.use_tpool: try: from eventlet import tpool except ImportError: LOG.exception(_LE("'eventlet' is required for " "TpoolDbapiWrapper.")) raise self._db_api = tpool.Proxy(db_api) else: self._db_api = db_api return self._db_api
def _raise_for_all_others(error, match, engine_name, is_disconnect): LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(error)