def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr(self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual( version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error( _LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def _wrap(self, *args, **kwargs): try: return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual(version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error(_LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min(next_interval * 2, self.max_retry_interval)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min( next_interval * 2, self.max_retry_interval )