def _backup_will_satisfy(self, most_recent, last_backup, now, frequency, fudge_factor=0.04): # If the backup doesn't exist, it's won't satisfy if not most_recent: return False # Get backup metadata and time since backup (mr_ts, __, __) = self._backup_metadata_get(most_recent) mr_delta = timeutils.delta_seconds(mr_ts, now) # If the backup isn't recent enough, it won't satisfy if mr_delta >= frequency: return False # Check that the backup was done long enough after the last # backup for this schedule if last_backup: (lb_ts, __, __) = self._backup_metadata_get(last_backup) spacing = timeutils.delta_seconds(lb_ts, mr_ts) # Allow a bit of a fudge factor to encourage backup # schedules not to get out of sync. if spacing < (frequency * (1.0 - fudge_factor)): return False # Everything looks OK return True
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def apply(self, context, resource, provision_doc): timeout = CONF.orchestration.validate_boot_timeout check_interval = CONF.orchestration.validate_boot_check_interval provisioning_result = provision_doc.instances.instance_host_map instance_uuids = provisioning_result.keys() filters = {'uuid': [uuid for uuid in instance_uuids], 'vm_state': vm_states.ACTIVE} start = datetime.datetime.now() all_instances_active = False while timeutils.delta_seconds(start, datetime.datetime.now())\ <= timeout: active_instances = self.conductor_api.instance_get_all_by_filters( context, filters) if len(active_instances) == len(provisioning_result): all_instances_active = True LOG.debug("All instances ACTIVE. Request tracking %s complete", resource.tracking_id) self.backend_driver.resource_tracker_update(context, resource.tracking_id, {'status': states.COMPLETED}) break time.sleep(check_interval) if not all_instances_active: # Check hypervisor ack # revert pass return orc_utils.DictableObject()
def apply(self, context, resource, provision_doc, **kwargs): # TODO: Wait a given amount of time, periodically checking the database # to see if the instance has came online, if after X amount of time # it has not came online then ack the hypervisor directly to check # if its online, if that doesn't work, bail out by performing different # types of reconciliation in the revert method here... timeout = CONF.orchestration.validate_boot_timeout check_interval = CONF.orchestration.validate_boot_check_interval provisioning_result = provision_doc.instances.instance_host_map instance_uuids = provisioning_result.keys() filters = {'uuid': [uuid for uuid in instance_uuids], 'vm_state': vm_states.ACTIVE} start = datetime.datetime.now() all_instances_active = False while timeutils.delta_seconds(start, datetime.datetime.now())\ <= timeout: active_instances = self.conductor_api.instance_get_all_by_filters( context, filters) if len(active_instances) == len(provisioning_result): all_instances_active = True LOG.debug("All instances ACTIVE. Request tracking %s complete", resource.tracking_id) self.db.resource_tracker_update(context, resource.tracking_id, {'status': states.COMPLETED}) break time.sleep(check_interval) if not all_instances_active: # Check hypervisor ack # revert pass return orc_utils.DictableObject()
def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), locals()) eventlet.sleep(0) return idle_for
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), locals()) time.sleep(0) return idle_for
def _backup_is_current(self, backup, now, frequency): # If the backup doesn't exist, it's not current :) if not backup: return False # Get backup metadata (ts, __, __) = self._backup_metadata_get(backup) # Is the timestamp within the range? delta = timeutils.delta_seconds(ts, now) return delta < frequency
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone, e: self.stop() done.send(e.retvalue)
def is_up(self, service_ref): """Moved from nova.utils Check whether a service is up based on last heartbeat. """ last_heartbeat = service_ref['updated_at'] or service_ref['created_at'] if isinstance(last_heartbeat, basestring): # NOTE(russellb) If this service_ref came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) # Timestamps in DB are UTC. elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s', {'lhb': str(last_heartbeat), 'el': str(elapsed)}) return abs(elapsed) <= CONF.service_down_time
def _backup_needed_by(self, backup, schedules, now): # List of schedules needing this backup needed_by = [] # Get backup metadata (ts, __, satisfies) = self._backup_metadata_get(backup) # For each schedule, for schedule in schedules: # Get schedule metadata (schedule_id, __, retention, active) = self._schedule_metadata_get(schedule) # If this backup was made for this schedule, if schedule_id in satisfies: # If this schedule is inactive, or if # this backup is within the retention period, delta = timeutils.delta_seconds(ts, now) if active == False or delta < retention: # Add the schedule to the list needed_by.append(schedule_id) # Return list return needed_by