def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = ".".join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) return idle_for
def test_service_is_up(self): serv = self.useFixture( ServiceFixture(self._host, self._binary, self._topic)).serv serv.start() service_ref = db.service_get_by_args(self._ctx, self._host, self._binary) fake_now = 1000 down_time = 15 self.flags(service_down_time=down_time) self.mox.StubOutWithMock(timeutils, 'utcnow_ts') self.servicegroup_api = servicegroup.API() hostkey = str("%s:%s" % (self._topic, self._host)) # Up (equal) timeutils.utcnow_ts().AndReturn(fake_now) timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1) self.mox.ReplayAll() self.servicegroup_api._driver.mc.set(hostkey, timeutils.utcnow(), time=down_time) result = self.servicegroup_api.service_is_up(service_ref) self.assertTrue(result) self.mox.ResetAll() # Up timeutils.utcnow_ts().AndReturn(fake_now) timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2) self.mox.ReplayAll() self.servicegroup_api._driver.mc.set(hostkey, timeutils.utcnow(), time=down_time) result = self.servicegroup_api.service_is_up(service_ref) self.assertTrue(result) self.mox.ResetAll() # Down timeutils.utcnow_ts().AndReturn(fake_now) timeutils.utcnow_ts().AndReturn(fake_now + down_time) self.mox.ReplayAll() self.servicegroup_api._driver.mc.set(hostkey, timeutils.utcnow(), time=down_time) result = self.servicegroup_api.service_is_up(service_ref) self.assertFalse(result) self.mox.ResetAll() # Down timeutils.utcnow_ts().AndReturn(fake_now) timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1) self.mox.ReplayAll() self.servicegroup_api._driver.mc.set(hostkey, timeutils.utcnow(), time=down_time) result = self.servicegroup_api.service_is_up(service_ref) self.assertFalse(result) self.mox.ResetAll()
def test_memcached_driver(self): serv = self.useFixture( ServiceFixture(self._host, self._binary, self._topic)).serv serv.start() service_ref = db.service_get_by_args(self._ctx, self._host, self._binary) hostkey = str("%s:%s" % (self._topic, self._host)) self.servicegroup_api._driver.mc.set(hostkey, timeutils.utcnow(), time=self.down_time) self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) self.useFixture(test.TimeOverride()) timeutils.advance_time_seconds(self.down_time + 1) self.servicegroup_api._driver._report_state(serv) service_ref = db.service_get_by_args(self._ctx, self._host, self._binary) self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) serv.stop() timeutils.advance_time_seconds(self.down_time + 1) service_ref = db.service_get_by_args(self._ctx, self._host, self._binary) self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
def soft_delete(self, synchronize_session='evaluate'): return self.update( { 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }, synchronize_session=synchronize_session)
def test_get_all(self): host1 = self._host + '_1' host2 = self._host + '_2' host3 = self._host + '_3' serv1 = self.useFixture( ServiceFixture(host1, self._binary, self._topic)).serv serv1.start() serv2 = self.useFixture( ServiceFixture(host2, self._binary, self._topic)).serv serv2.start() serv3 = self.useFixture( ServiceFixture(host3, self._binary, self._topic)).serv serv3.start() db.service_get_by_args(self._ctx, host1, self._binary) db.service_get_by_args(self._ctx, host2, self._binary) db.service_get_by_args(self._ctx, host3, self._binary) host1key = str("%s:%s" % (self._topic, host1)) host2key = str("%s:%s" % (self._topic, host2)) host3key = str("%s:%s" % (self._topic, host3)) self.servicegroup_api._driver.mc.set(host1key, timeutils.utcnow(), time=self.down_time) self.servicegroup_api._driver.mc.set(host2key, timeutils.utcnow(), time=self.down_time) self.servicegroup_api._driver.mc.set(host3key, timeutils.utcnow(), time=-1) services = self.servicegroup_api.get_all(self._topic) self.assertIn(host1, services) self.assertIn(host2, services) self.assertNotIn(host3, services) service_id = self.servicegroup_api.get_one(self._topic) self.assertIn(service_id, services)
def test_service_is_up(self): fts_func = datetime.datetime.fromtimestamp fake_now = 1000 down_time = 15 self.flags(service_down_time=down_time) self.mox.StubOutWithMock(timeutils, 'utcnow') self.servicegroup_api = servicegroup.API() # Up (equal) timeutils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - self.down_time), 'created_at': fts_func(fake_now - self.down_time)} self.mox.ReplayAll() result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Up timeutils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - self.down_time + 1), 'created_at': fts_func(fake_now - self.down_time + 1)} self.mox.ReplayAll() result = self.servicegroup_api.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Down timeutils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - self.down_time - 3), 'created_at': fts_func(fake_now - self.down_time - 3)} self.mox.ReplayAll() result = self.servicegroup_api.service_is_up(service) self.assertFalse(result)
def network_delete(context, gid, network_id): session = get_session() network_ref = session.query(models.Network)\ .filter(models.Network.deleted == 0)\ .filter(models.Network.gid == gid)\ .filter(models.Network.network_id == network_id)\ .first() values = {} values["deleted"] = 1 values["deleted_at"] = timeutils.utcnow() values["status"] = "DELETING" network_ref.update(values) network_ref.save(session) return dict(network_ref)
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), { "full_task_name": full_task_name, "e": e }) time.sleep(0) return idle_for
def process_delete(context, gid, pid): session = get_session() process_ref = session.query(models.Process). \ filter_by(deleted=0). \ filter_by(gid=gid). \ filter_by(pid=pid). \ first() if process_ref is None: raise exception.ProcessNotFound(pid=pid) process_ref.update({"deleted": 1, 'deleted_at': timeutils.utcnow(), "status": "DELETING"}) process_ref.save(session) return _get_process_dict(process_ref)
def is_up(self, service_ref): """Moved from rack.utils Check whether a service is up based on last heartbeat. """ last_heartbeat = service_ref['updated_at'] or service_ref['created_at'] if isinstance(last_heartbeat, six.string_types): last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: last_heartbeat = last_heartbeat.replace(tzinfo=None) elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= self.service_down_time if not is_up: msg = _('Seems service is down. Last heartbeat was %(lhb)s. ' 'Elapsed time is %(el)s') LOG.debug(msg, {'lhb': str(last_heartbeat), 'el': str(elapsed)}) return is_up
def securitygroup_delete(context, gid, securitygroup_id): session = get_session() securitygroup_ref = session.query(models.Securitygroup). \ filter_by(deleted=0). \ filter_by(gid=gid). \ filter_by(securitygroup_id=securitygroup_id). \ first() if securitygroup_ref is None: raise exception.SecuritygroupNotFound( securitygroup_id=securitygroup_id) securitygroup_ref.update({"deleted": 1, 'deleted_at': timeutils.utcnow(), "status": "DELETING"}) securitygroup_ref.save(session) return dict(securitygroup_ref)
def group_delete(context, gid): session = get_session() group_ref = session.query(models.Group)\ .filter_by(deleted=0)\ .filter_by(gid=gid)\ .first() if group_ref is None: raise exception.GroupNotFound(gid=gid) values = { "status": "DELETING", "deleted": 1, "deleted_at": timeutils.utcnow() } group_ref.update(values) group_ref.save(session) return dict(group_ref)
def process_delete(context, gid, pid): session = get_session() process_ref = session.query(models.Process). \ filter_by(deleted=0). \ filter_by(gid=gid). \ filter_by(pid=pid). \ first() if process_ref is None: raise exception.ProcessNotFound(pid=pid) process_ref.update({ "deleted": 1, 'deleted_at': timeutils.utcnow(), "status": "DELETING" }) process_ref.save(session) return _get_process_dict(process_ref)
def keypair_delete(context, gid, keypair_id): session = get_session() keypair_ref = session.query(models.Keypair)\ .filter_by(gid=gid)\ .filter_by(keypair_id=keypair_id)\ .filter_by(deleted=0)\ .first() if keypair_ref is None: raise exception.KeypairNotFound(keypair_id=keypair_id) values = { "status": "DELETING", "deleted": 1, "deleted_at": timeutils.utcnow() } keypair_ref.update(values) keypair_ref.save(session) return dict(keypair_ref)
def securitygroup_delete(context, gid, securitygroup_id): session = get_session() securitygroup_ref = session.query(models.Securitygroup). \ filter_by(deleted=0). \ filter_by(gid=gid). \ filter_by(securitygroup_id=securitygroup_id). \ first() if securitygroup_ref is None: raise exception.SecuritygroupNotFound( securitygroup_id=securitygroup_id) securitygroup_ref.update({ "deleted": 1, 'deleted_at': timeutils.utcnow(), "status": "DELETING" }) securitygroup_ref.save(session) return dict(securitygroup_ref)
def decorator(f): # Test for old style invocation if "ticks_between_runs" in kwargs: raise InvalidPeriodicTaskArg(arg="ticks_between_runs") # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop("external_process_ok", False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop("enabled", True) # Control frequency f._periodic_spacing = kwargs.pop("spacing", 0) f._periodic_immediate = kwargs.pop("run_immediately", False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = timeutils.utcnow() return f
def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop('enabled', True) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = timeutils.utcnow() return f
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """This method is used to drop all old rows that have the same values for columns in uc_columns. """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(list(columns_for_group_by)) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_("Deleted duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def _report_state(self, service): """Update the state of this service in the datastore.""" context.get_admin_context() try: key = "%(topic)s:%(host)s" % service.service_ref # memcached has data expiration time capability. # set(..., time=CONF.service_down_time) uses it and # reduces key-deleting code. self.mc.set(str(key), timeutils.utcnow(), time=CONF.service_down_time) # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): service.model_disconnected = False LOG.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(service, 'model_disconnected', False): service.model_disconnected = True LOG.exception(_('model server went away'))
def last_completed_audit_period(unit=None, before=None): """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. before: Give the audit period most recently completed before <timestamp>. Defaults to now. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous. """ if not unit: unit = CONF.instance_usage_audit_period offset = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) if before is not None: rightnow = before else: rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end)
def soft_delete(self, session): """Mark this object as deleted.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, **kwargs): """:param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn( _('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token if service_catalog: # Only include required parts of service_catalog self.service_catalog = [ s for s in service_catalog if s.get('type') in ('identity', 'image', 'network', 'compute') ] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self) if overwrite or not hasattr(local.store, 'context'): self.update_store()
def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session)
def __init__( self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, **kwargs ): """:param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_("Arguments dropped when creating context: %s") % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token if service_catalog: # Only include required parts of service_catalog self.service_catalog = [ s for s in service_catalog if s.get("type") in ("identity", "image", "network", "compute") ] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self) if overwrite or not hasattr(local.store, "context"): self.update_store()
class TimestampMixin(object): created_at = Column(DateTime, default=lambda: timeutils.utcnow()) updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())