def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), locals()) time.sleep(0) return idle_for
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _model_query(context, model, filters, fields=None): filters = filters or {} model_filters = [] if "shared" in filters and True in filters["shared"]: return model_filters # Inject the tenant id if none is set. We don't need unqualified queries if not (context.is_admin or "tenant_id" in filters): filters["tenant_id"] = [context.tenant_id] if filters.get("name"): model_filters.append(model.name.in_(filters["name"])) if filters.get("network_id"): model_filters.append(model.network_id.in_(filters["network_id"])) if filters.get("mac_address"): model_filters.append(model.mac_address.in_(filters["mac_address"])) if filters.get("tenant_id"): model_filters.append(model.tenant_id.in_(filters["tenant_id"])) if filters.get("id"): model_filters.append(model.id.in_(filters["id"])) if filters.get("reuse_after"): reuse_after = filters["reuse_after"] reuse = (timeutils.utcnow() - datetime.timedelta(seconds=reuse_after)) model_filters.append(model.deallocated_at <= reuse) if filters.get("subnet_id"): model_filters.append(model.subnet_id == filters["subnet_id"]) if filters.get("deallocated"): model_filters.append(model.deallocated == filters["deallocated"]) if filters.get("device_id"): model_filters.append(models.Port.device_id.in_(filters["device_id"])) if filters.get("address"): model_filters.append(model.address == filters["address"]) if filters.get("version"): model_filters.append(model.ip_version == filters["version"]) if filters.get("ip_address"): model_filters.append(model.address == int(filters["ip_address"])) if filters.get("mac_address_range_id"): model_filters.append(model.mac_address_range_id == filters["mac_address_range_id"]) if filters.get("cidr"): model_filters.append(model.cidr == filters["cidr"]) return model_filters
def deallocate_mac_address(self, context, address): mac = db_api.mac_address_find(context, address=address, scope=db_api.ONE) if not mac: raise exceptions.NotFound( message="No MAC address %s found" % netaddr.EUI(address)) db_api.mac_address_update(context, mac, deallocated=True, deallocated_at=timeutils.utcnow())
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_("task run outlasted interval by %s sec") % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone, e: self.stop() done.send(e.retvalue)
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException(_("%s not in valid priorities") % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict( message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow()), ) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception( _("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload) )
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone, e: self.stop() done.send(e.retvalue)
def update_fixed_ip_lease_expiration(self, context, network_id, ip_address, lease_remaining): expiration = timeutils.utcnow() + datetime.timedelta(lease_remaining) query = context.session.query(models_v2.IPAllocation) query = query.filter_by(network_id=network_id, ip_address=ip_address) try: fixed_ip = query.one() fixed_ip.expiration = expiration except exc.NoResultFound: LOG.debug("No fixed IP found that matches the network %s and " "ip address %s.", network_id, ip_address)
class AgentExtRpcCallback(object): """Processes the rpc report in plugin implementations.""" RPC_API_VERSION = '1.0' START_TIME = timeutils.utcnow() def report_state(self, context, **kwargs): """Report state from agent to server. """ time = kwargs['time'] time = timeutils.parse_strtime(time) if self.START_TIME > time: LOG.debug(_("Message with invalid timestamp received")) return agent_state = kwargs['agent_state']['agent_state'] plugin = manager.QuantumManager.get_plugin() plugin.create_or_update_agent(context, agent_state)
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_("task run outlasted interval by %s sec") % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_("in fixed duration looping call")) done.send_exception(*sys.exc_info()) return else: done.send(True)
def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop('enabled', True) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = timeutils.utcnow() return f
def create_or_update_agent(self, context, agent): """Create or update agent according to report.""" with context.session.begin(subtransactions=True): res_keys = ['agent_type', 'binary', 'host', 'topic'] res = dict((k, agent[k]) for k in res_keys) configurations_dict = agent.get('configurations', {}) res['configurations'] = jsonutils.dumps(configurations_dict) current_time = timeutils.utcnow() try: agent_db = self._get_agent_by_type_and_host( context, agent['agent_type'], agent['host']) res['heartbeat_timestamp'] = current_time if agent.get('start_flag'): res['started_at'] = current_time agent_db.update(res) except ext_agent.AgentNotFoundByTypeHost: res['created_at'] = current_time res['started_at'] = current_time res['heartbeat_timestamp'] = current_time res['admin_state_up'] = True agent_db = Agent(**res) context.session.add(agent_db)
def deallocated(self, val): self._deallocated = val self.deallocated_at = None if val: self.deallocated_at = timeutils.utcnow()
def soft_delete(self, session=None): """Mark this object as deleted.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session)
def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session)
def _default_allocation_expiration(): return (timeutils.utcnow() + datetime.timedelta(seconds=cfg.CONF.dhcp_lease_duration))