def __init__(self, parent): super(PeriodicAlarmUpdate, self).__init__() self.parent = parent self.context = context.get_admin_context() self._stop = threading.Event() self.interval = CONF.snmp.alarm_audit_interval_time self.system_last_update = datetime.datetime.now()
def __init__(self, *args, **kwargs): LOG.debug(_('AlarmAggregateManager initialization...')) super(AlarmAggregateManager, self).\ __init__(service_name="alarm_aggregate_manager", *args, **kwargs) self.context = context.get_admin_context() self.alarm_update_thread = PeriodicAlarmUpdate(self) self.alarm_update_thread.start()
def send_notification(self, system): LOG.debug("Sending update request for %s" % (system)) try: ctx = context.get_admin_context() self.rpc_client.update_alarm_summary(ctx, system) except Exception: LOG.error('Failed to send update for system %s' % system) return self.system_last_updates[system] = datetime.datetime.now()
def service_registry_cleanup(self): ctx = context.get_admin_context() time_window = (2 * cfg.CONF.report_interval) services = service_obj.Service.get_all(ctx) for svc in services: if svc['id'] == self.engine_id: continue if timeutils.is_older_than(svc['updated_at'], time_window): # < time_line: # hasn't been updated, assuming it's died. LOG.info('Service %s was aborted', svc['id']) service_obj.Service.delete(ctx, svc['id'])
def service_registry_report(self): ctx = context.get_admin_context() try: svc = service_obj.Service.update(ctx, self.engine_id) # if svc is None, means it's not created. if svc is None: service_obj.Service.create(ctx, self.engine_id, self.host, 'dcorch-engine', self.topic) except Exception as ex: LOG.error('Service %(service_id)s update failed: %(error)s', { 'service_id': self.engine_id, 'error': ex })
def __init__(self, *args, **kwargs): LOG.debug(_('QuotaManager initialization...')) super(QuotaManager, self).__init__(service_name="quota_manager", *args, **kwargs) self.context = context.get_admin_context() self.endpoints = endpoint_cache.EndpointCache() # This lock is used to ensure we only have one quota sync audit at # a time. For better efficiency we could use per-project locks # and/or the ReaderWriterLock from the "fastener" package. self.quota_audit_lock = threading.Lock()
def __init__(self, app, conf): super(PatchAPIController, self).__init__(app) self.ctxt = context.get_admin_context() self._default_dispatcher = APIDispatcher(app) self.rpc_client = dcmanager_rpc_client.ManagerClient() self.response_hander_map = { proxy_consts.PATCH_ACTION_UPLOAD: self.patch_upload_req, proxy_consts.PATCH_ACTION_UPLOAD_DIR: self.patch_upload_dir_req, proxy_consts.PATCH_ACTION_DELETE: self.patch_delete_req, proxy_consts.PATCH_ACTION_APPLY: self.notify, proxy_consts.PATCH_ACTION_COMMIT: self.notify, proxy_consts.PATCH_ACTION_REMOVE: self.notify, }
def __init__(self, subcloud_engine): super(SyncThread, self).__init__() self.endpoint_type = None # endpoint type in keystone self.subcloud_engine = subcloud_engine # engine that owns this obj self.thread = None # thread running sync() self.audit_thread = None self.status = STATUS_NEW # protected by condition lock self.audit_status = None # todo: needed? self.condition = threading.Condition() # used to wake up the thread self.ctxt = context.get_admin_context() self.sync_handler_map = {} self.master_region_name = consts.CLOUD_0 self.audit_resources = [] self.log_extra = { "instance": self.subcloud_engine.subcloud.region_name + ": " } self.dcmanager_rpc_client = dcmanager_rpc_client.ManagerClient() self.sync_status = dcmanager_consts.SYNC_STATUS_UNKNOWN self.subcloud_managed = False self.sc_admin_session = None self.admin_session = None self.ks_client = None
def init_gsm(self): ctxt = context.get_admin_context() self.gsm = GenericSyncManager() self.gsm.init_from_db(ctxt)
def __init__(self, app, conf): super(APIController, self).__init__(app) self.ctxt = k_context.get_admin_context() self._default_dispatcher = APIDispatcher(app) self.rpc_client = rpc_client.EngineClient() self.response_hander_map = {}