def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) return idle_for
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), locals()) time.sleep(0) return idle_for
def soft_delete(self, synchronize_session='evaluate'): return self.update( { 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }, synchronize_session=synchronize_session)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, **kwargs): """Initialize this RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn( _('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, basestring): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('volume')] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self) if overwrite or not hasattr(local.store, 'context'): self.update_store()
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException(_('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception( _("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop('enabled', True) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = timeutils.utcnow() return f
def soft_delete(self, session=None): """Mark this object as deleted.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, **kwargs): """Initialize this RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn( _('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, basestring): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token if service_catalog: # Only include required parts of service_catalog self.service_catalog = [ s for s in service_catalog if s.get('type') in ('volume') ] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self) if overwrite or not hasattr(local.store, 'context'): self.update_store()
def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session)