def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), {"full_task_name": full_task_name, "e": e}) time.sleep(0) return idle_for
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn( _('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def restore_resource(self, data): action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Restored' % data.resource_type.capitalize() self.master_api.resource_restore(request.context, self._id, action_time, remarks)
def delete_resource(self, data): """Update the order when delete the resource""" action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Deleted' % data.resource_type.capitalize() self.master_api.resource_deleted(request.context, self._id, action_time, remarks)
def delete_resource(self, data): """Update the order when delete the resource""" action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Deleted' % data.resource_type.capitalize() self.master_api.resource_deleted(request.context, self._id, action_time, remarks)
def stop_resource(self, data): action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) if data.resource_type == 'instance': self.master_api.instance_stopped(request.context, self._id, action_time) else: remarks = '%s Has Been Stopped' % data.resource_type.capitalize() self.master_api.resource_stopped(request.context, self._id, action_time, remarks)
def stop_resource(self, data): action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) if data.resource_type == 'instance': self.master_api.instance_stopped(request.context, self._id, action_time) else: remarks = '%s Has Been Stopped' % data.resource_type.capitalize() self.master_api.resource_stopped(request.context, self._id, action_time, remarks)
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select( columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1 ) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info( _("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name) ) if use_soft_delete: delete_statement = ( table.update() .where(delete_condition) .values( { "deleted": literal_column("id"), "updated_at": literal_column("updated_at"), "deleted_at": timeutils.utcnow(), } ) ) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def will_expire_soon(expires, stale_duration=None): """Determines if expiration is about to occur. :return: boolean : true if expiration is within the given duration """ stale_duration = (STALE_TOKEN_DURATION if stale_duration is None else int(stale_duration)) norm_expires = timeutils.normalize_time(expires) soon = (timeutils.utcnow() + datetime.timedelta(seconds=stale_duration)) return norm_expires < soon
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException(_('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception( _("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def _pre_deduct(self, order_id): LOG.warn("Prededucting order: %s", order_id) try: with self._get_lock(order_id): # check resource and order before deduct order = self.gclient.get_order(order_id) # do not deduct doctor project for now if order['project_id'] in cfg.CONF.ignore_tenants: return method = self.RESOURCE_GET_MAP[order['type']] resource = method(order['resource_id'], order['region_id']) if not resource: LOG.warn("The resource(%s|%s) has been deleted", order['type'], order['resource_id']) return if isinstance(order['cron_time'], basestring): cron_time = timeutils.parse_strtime( order['cron_time'], fmt=ISO8601_UTC_TIME_FORMAT) else: cron_time = order['cron_time'] remarks = 'Hourly Billing' now = timeutils.utcnow() if now - cron_time >= datetime.timedelta(hours=1): result = self.gclient.create_bill(order_id, action_time=now, remarks=remarks) else: result = self.gclient.create_bill(order_id, remarks=remarks) # Order is owed if result['type'] == const.BILL_ORDER_OWED: self._stop_owed_resource(result['resource_type'], result['resource_id'], result['region_id']) self._create_date_job(order_id, result['resource_type'], result['resource_id'], result['region_id'], result['date_time']) # Account is charged but order is still owed elif result['type'] == const.BILL_OWED_ACCOUNT_CHARGED: self._delete_date_job(order_id) except Exception as e: LOG.warn("Some exceptions happen when deducting order: %s, " "for reason: %s", order_id, e)
def _pre_deduct(self, order_id): LOG.warn("Prededucting order: %s", order_id) try: with self._get_lock(order_id): # check resource and order before deduct order = self.gclient.get_order(order_id) # do not deduct doctor project for now if order['project_id'] in cfg.CONF.ignore_tenants: return method = self.RESOURCE_GET_MAP[order['type']] resource = method(order['resource_id'], order['region_id']) if not resource: LOG.warn("The resource(%s|%s) has been deleted", order['type'], order['resource_id']) return if isinstance(order['cron_time'], basestring): cron_time = timeutils.parse_strtime( order['cron_time'], fmt=ISO8601_UTC_TIME_FORMAT) else: cron_time = order['cron_time'] remarks = 'Hourly Billing' now = timeutils.utcnow() if now - cron_time >= datetime.timedelta(hours=1): result = self.gclient.create_bill(order_id, action_time=now, remarks=remarks) else: result = self.gclient.create_bill(order_id, remarks=remarks) # Order is owed if result['type'] == const.BILL_ORDER_OWED: self._stop_owed_resource(result['resource_type'], result['resource_id'], result['region_id']) self._create_date_job(order_id, result['resource_type'], result['resource_id'], result['region_id'], result['date_time']) # Account is charged but order is still owed elif result['type'] == const.BILL_OWED_ACCOUNT_CHARGED: self._delete_date_job(order_id) except Exception as e: LOG.warn( "Some exceptions happen when deducting order: %s, " "for reason: %s", order_id, e)
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" idle_for = DEFAULT_INTERVAL for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) now = timeutils.utcnow() spacing = self._periodic_spacing[task_name] last_run = self._periodic_last_run[task_name] # If a periodic task is _nearly_ due, then we'll run it early if spacing is not None and last_run is not None: due = last_run + datetime.timedelta(seconds=spacing) if not timeutils.is_soon(due, 0.2): idle_for = min(idle_for, timeutils.delta_seconds(now, due)) continue if spacing is not None: idle_for = min(idle_for, spacing) LOG.debug(_("Running periodic task %(full_task_name)s"), {"full_task_name": full_task_name}) self._periodic_last_run[task_name] = timeutils.utcnow() try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s: %(e)s"), { "full_task_name": full_task_name, "e": e }) time.sleep(0) return idle_for
def resize_resource(self, data): """Update the order when resize the resource""" action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Resized' % data.resource_type.capitalize() self._validate_resize(data.as_dict()) if data.resource_type == 'instance': self.master_api.instance_resized(request.context, self._id, action_time, data.new_flavor, data.old_flavor, data.service, data.region_id, remarks) else: self.master_api.resource_resized(request.context, self._id, action_time, data.quantity, remarks)
def resize_resource(self, data): """Update the order when resize the resource""" action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Resized' % data.resource_type.capitalize() self._validate_resize(data.as_dict()) if data.resource_type == 'instance': self.master_api.instance_resized(request.context, self._id, action_time, data.new_flavor, data.old_flavor, data.service, data.region_id, remarks) else: self.master_api.resource_resized(request.context, self._id, action_time, data.quantity, remarks)
def post(self, data): conn = pecan.request.db_conn try: order = conn.create_order(request.context, **data.as_dict()) if order.unit in ['month', 'year']: self.master_api.create_monthly_job( request.context, order.order_id, timeutils.isotime(order.cron_time)) else: action_time = \ gringutils.format_datetime( timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Created.' % order.type.capitalize() self.master_api.resource_created(request.context, order.order_id, action_time, remarks) except Exception as e: LOG.exception('Fail to create order: %s, for reason %s' % (data.as_dict(), e))
def post(self, data): conn = pecan.request.db_conn try: order = conn.create_order(request.context, **data.as_dict()) if order.unit in ['month', 'year']: self.master_api.create_monthly_job( request.context, order.order_id, timeutils.isotime(order.cron_time)) else: action_time = \ gringutils.format_datetime( timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Created.' % order.type.capitalize() self.master_api.resource_created(request.context, order.order_id, action_time, remarks) except Exception as e: LOG.exception('Fail to create order: %s, for reason %s' % (data.as_dict(), e))
def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop('enabled', True) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = timeutils.utcnow() return f
def decorator(f): # Test for old style invocation if 'ticks_between_runs' in kwargs: raise InvalidPeriodicTaskArg(arg='ticks_between_runs') # Control if run at all f._periodic_task = True f._periodic_external_ok = kwargs.pop('external_process_ok', False) if f._periodic_external_ok and not CONF.run_external_periodic_tasks: f._periodic_enabled = False else: f._periodic_enabled = kwargs.pop('enabled', True) # Control frequency f._periodic_spacing = kwargs.pop('spacing', 0) f._periodic_immediate = kwargs.pop('run_immediately', False) if f._periodic_immediate: f._periodic_last_run = None else: f._periodic_last_run = timeutils.utcnow() return f
def soft_delete(self, session=None): """Mark this object as deleted.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session)
def start_resource(self, data): action_time = \ gringutils.format_datetime(timeutils.strtime(timeutils.utcnow())) remarks = '%s Has Been Started' % data.resource_type.capitalize() self.master_api.resource_started(request.context, self._id, action_time, remarks)
def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session)