def delete_escalation_policy(self, escalation_policy_id, domain_id): escalation_policy_vo: EscalationPolicy = self.get_escalation_policy(escalation_policy_id, domain_id) if escalation_policy_vo.is_default: raise ERROR_DEFAULT_ESCALATION_POLICY_NOT_ALLOW_DELETION(escalation_policy_id=escalation_policy_id) cache.delete(f'escalation-policy-condition:{domain_id}:{escalation_policy_id}') escalation_policy_vo.delete()
def update_webhook_by_vo(self, params, webhook_vo): def _rollback(old_data): _LOGGER.info(f'[update_webhook_by_vo._rollback] Revert Data : ' f'{old_data["webhook_id"]}') webhook_vo.update(old_data) self.transaction.add_rollback(_rollback, webhook_vo.to_dict()) updated_vo: Webhook = webhook_vo.update(params) cache.delete(f'webhook-data:{updated_vo.webhook_id}') return updated_vo
def update_escalation_policy_by_vo(self, params, escalation_policy_vo): def _rollback(old_data): _LOGGER.info(f'[update_escalation_policy_by_vo._rollback] Revert Data : ' f'{old_data["escalation_policy_id"]}') escalation_policy_vo.update(old_data) self.transaction.add_rollback(_rollback, escalation_policy_vo.to_dict()) updated_vo: EscalationPolicy = escalation_policy_vo.update(params) cache.delete(f'escalation-policy-condition:{updated_vo.domain_id}:{updated_vo.escalation_policy_id}') return updated_vo
def create(self, params): """Create event Args: params (dict): { 'webhook_id': 'str', 'access_key': 'str', 'data': 'str' } Returns: event_vo (object) """ webhook_data = self._get_webhook_data(params['webhook_id']) self._check_access_key(params['access_key'], webhook_data['access_key']) self._check_webhook_state(webhook_data) try: webhook_plugin_mgr: WebhookPluginManager = self.locator.get_manager('WebhookPluginManager') endpoint, updated_version = webhook_plugin_mgr.get_webhook_plugin_endpoint({ 'plugin_id': webhook_data['plugin_id'], 'version': webhook_data['plugin_version'], 'upgrade_mode': webhook_data['plugin_upgrade_mode'] }, webhook_data['domain_id']) if updated_version: _LOGGER.debug(f'[create] upgrade plugin version: {webhook_data["plugin_version"]} -> {updated_version}') webhook_vo: Webhook = self.webhook_mgr.get_webhook(webhook_data['webhook_id'], webhook_data['domain_id']) webhook_plugin_mgr.upgrade_webhook_plugin_version(webhook_vo, endpoint, updated_version) cache.delete(f'webhook-data:{webhook_data["webhook_id"]}') webhook_plugin_mgr.initialize(endpoint) response = webhook_plugin_mgr.parse_event(webhook_data['plugin_options'], params['data']) except Exception as e: if not isinstance(e, ERROR_BASE): e = ERROR_UNKNOWN(message=str(e)) _LOGGER.error(f'[create] Event parsing failed: {e.message}', exc_info=True) response = self._create_error_event(webhook_data['name'], e.message) for event_data in response.get('results', []): # TODO: Check event data using schematics _LOGGER.debug(f'[Event.create] event_data: {event_data}') self._create_event(event_data, params['data'], webhook_data)
def _release_lock(self, domain_id, name): try: key = f"supervisor:{domain_id}:{name}" return cache.delete(key) except Exception as e: _LOGGER.debug(f'[_release_lock] {key}, {e}') return False
def delete_project_alert_config(self, project_id, domain_id): project_alert_config_vo: ProjectAlertConfig = self.get_project_alert_config( project_id, domain_id) cache.delete(f'project-alert-options:{domain_id}:{project_id}') cache.delete(f'escalation-policy-info:{domain_id}:{project_id}') cache.delete(f'auto-recovery::{domain_id}:{project_id}') project_alert_config_vo.delete()
def _delete_job_task_stat_cache(self, job_id, job_task_id, domain_id): """ Delete cache Args: - kind: CREATED | UPDATED | ERROR cache key - job_task_stat:<job_id>:<job_task_id>:created = N - job_task_stat:<job_id>:<job_task_id>:updated = M - job_task_stat:<job_id>:<job_task_id<:failure = X """ try: key = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:CREATED' cache.delete(key) key = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:UPDATED' cache.delete(key) key = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:FAILURE' cache.delete(key) except Exception as e: _LOGGER.error(f'[_delete_job_task_stat_cache] {e}')
def update_project_alert_config_by_vo(self, params, project_alert_config_vo): def _rollback(old_data): _LOGGER.info( f'[update_project_alert_config_by_vo._rollback] Revert Data : ' f'{old_data["project_id"]}') project_alert_config_vo.update(old_data) self.transaction.add_rollback(_rollback, project_alert_config_vo.to_dict()) updated_vo: ProjectAlertConfig = project_alert_config_vo.update(params) cache.delete( f'project-alert-options:{updated_vo.domain_id}:{updated_vo.project_id}' ) cache.delete( f'escalation-policy-info:{updated_vo.domain_id}:{updated_vo.project_id}' ) cache.delete( f'auto-recovery:{updated_vo.domain_id}:{updated_vo.project_id}') return updated_vo
def delete_webhook(self, webhook_id, domain_id): webhook_vo: Webhook = self.get_webhook(webhook_id, domain_id) cache.delete(f'webhook-data:{webhook_vo.webhook_id}') webhook_vo.delete()
def _set_refresh_token_cache(self, new_refresh_key): if cache.is_set(): if self.old_refresh_key: cache.delete(f'refresh-token:{self.old_refresh_key}') cache.set(f'refresh-token:{new_refresh_key}', '', expire=self.CONST_REFRESH_TIMEOUT)
def _rollback(alert_id, access_key): _LOGGER.info(f'[_make_callback_url._rollback] ' f'Delete cache : {alert_id} ' f'({access_key})') cache.delete(f'alert-notification-callback:{alert_id}:{access_key}')
def _watchdog_job_task_stat(self, param): """ WatchDog for cache stat 1) Update to DB 2) Update JobTask status param = { 'job_id': job_id, 'job_task_id': job_task_id, 'domain_id': domain_id, 'total_count': total_count } """ # Wait a little, may be working task exist _LOGGER.debug( f'[_watchdog_job_task_stat] WatchDog Start: {param["job_task_id"]}' ) time.sleep(WATCHDOG_WAITING_TIME) domain_id = param['domain_id'] job_id = param['job_id'] job_task_id = param['job_task_id'] try: key_created = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:CREATED' value_created = cache.get(key_created) cache.delete(key_created) except: value_created = 0 try: key_updated = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:UPDATED' value_updated = cache.get(key_updated) cache.delete(key_updated) except: value_updated = 0 try: key_failure = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:FAILURE' value_failure = cache.get(key_failure) cache.delete(key_failure) except: value_failure = 0 # Update to DB stat_result = { 'total_count': param['total_count'], 'created_count': value_created, 'updated_count': value_updated, 'failure_count': value_failure } _LOGGER.debug(f'[_watchdog_job_task_stat] stat: {stat_result}') try: if stat_result['failure_count'] > 0: JOB_TASK_STATE = 'FAILURE' else: JOB_TASK_STATE = 'SUCCESS' self._update_job_task(job_task_id, JOB_TASK_STATE, domain_id, stat=stat_result) except Exception as e: # error pass finally: # Close remained task self.job_mgr.decrease_remained_tasks(job_id, domain_id)