def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # Check if due, if not skip idle_for = min(idle_for, spacing) if last_run is not None: delta = last_run + spacing - time.time() if delta > 0: idle_for = min(idle_for, delta) continue LOG.debug("Running periodic task %(full_task_name)s", {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = _nearest_boundary( last_run, spacing) try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) return idle_for
def _wait_for_exit_or_signal(self, ready_callback=None): status = None signo = 0 LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: if ready_callback: ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code finally: self.stop() if rpc: try: rpc.cleanup() except Exception: # We're shutting down, so it doesn't matter at this point. LOG.exception(_LE('Exception during rpc cleanup.')) return status, signo
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug( 'Dynamic looping call sleeping for %.02f ' 'seconds', idle) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _wrap(self, *args, **kwargs): try: return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # Check if due, if not skip idle_for = min(idle_for, spacing) if last_run is not None: delta = last_run + spacing - time.time() if delta > 0: idle_for = min(idle_for, delta) continue LOG.debug("Running periodic task %(full_task_name)s", {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = _nearest_boundary( last_run, spacing) try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), { "full_task_name": full_task_name, "e": e }) time.sleep(0) return idle_for
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = _ts() self.f(*self.args, **self.kw) end = _ts() if not self._running: break delay = end - start - interval if delay > 0: LOG.warn(_LW('task %(func_name)s run outlasted ' 'interval by %(delay).2f sec'), {'func_name': repr(self.f), 'delay': delay}) greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr(self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual( version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error( _LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug('Dynamic looping call %(func_name)s sleeping ' 'for %(idle).02f seconds', {'func_name': repr(self.f), 'idle': idle}) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual(version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error(_LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or this_exc_message != last_exc_message): logging.exception( _LE('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def release(self): try: self.unlock() self.lockfile.close() LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception(_LE("Could not release the acquired lock `%s`"), self.fname)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: if self.reraise: logging.error( _LE('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: if self.reraise: logging.error( _LE("Original exception being dropped: %s"), traceback.format_exception(self.type_, self.value, self.tb), ) return False if self.reraise: six.reraise(self.type_, self.value, self.tb)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min( next_interval * 2, self.max_retry_interval )
def downgrade(self, version): try: #version for migrate should be valid int - else skip if version in ('base', None): version = self.init_version version = int(version) return migration.db_sync( self.engine, self.repository, version, init_version=self.init_version) except ValueError: LOG.error( _LE('Migration number for migrate plugin must be valid ' 'integer or empty, if you want to downgrade ' 'to initial state') ) raise
def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Child caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_LE('Unhandled exception')) status = 2 finally: launcher.stop() return status, signo
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if cur_time - last_log_time > 60 or this_exc_message != last_exc_message: logging.exception(_LE("Unexpected exception occurred %d time(s)... " "retrying.") % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)