def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session ), ('_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual(version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error(_LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr(self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual( version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error( _LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or this_exc_message != last_exc_message): logging.exception( _LE('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def release(self): try: self.unlock() self.lockfile.close() LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception(_LE("Could not release the acquired lock `%s`"), self.fname)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: logging.error(_LE('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min(next_interval * 2, self.max_retry_interval)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min( next_interval * 2, self.max_retry_interval )