コード例 #1
0
    def _check_access_key(alert_id, access_key):
        domain_id = cache.get(f'alert-notification-callback:{alert_id}:{access_key}')

        if domain_id is None:
            raise ERROR_PERMISSION_DENIED()

        return domain_id
コード例 #2
0
 def _get_lock(self, domain_id, name):
     try:
         key = f"supervisor:{domain_id}:{name}"
         return cache.get(key)
     except Exception as e:
         _LOGGER.debug(f'[_get_lock] {key}, {e}')
         return False
コード例 #3
0
ファイル: filter_manager.py プロジェクト: whdalsrnt/inventory
 def _get_filer_cache(self, collector_id, secret_id):
     key = f'collector-filter:{collector_id}:{secret_id}'
     try:
         data = cache.get(key)
         _LOGGER.debug(f'[cache_filter] {key} : {data}')
         return data
     except Exception as e:
         # May be no_cache
         return None
コード例 #4
0
    def check_refreshable(self, refresh_key, ttl):
        if self.CONST_REFRESH_ONCE:
            if cache.is_set() and cache.get(f'refresh-token:{refresh_key}') is None:
                raise ERROR_INVALID_REFRESH_TOKEN()

        if ttl == 0:
            raise ERROR_REFRESH_COUNT()

        self.is_authenticated = True
        self.old_refresh_key = refresh_key
コード例 #5
0
    def collecting_resources(self, plugin_info, secret_id, filters, domain_id,
                             **kwargs):
        """ This is single call of real plugin with endpoint

        All parameter should be primitive type(Json), not object.
        Because this method will be executed by worker.
        Args:
            plugin_info(dict)
            kwargs: {
                'job_id': 'str',
                'use_cache': bool
            }
        """

        # Check Job State first, if job state is canceled, stop process
        job_task_id = kwargs['job_task_id']

        job_id = kwargs['job_id']
        job_vo = self.job_mgr.get(job_id, domain_id)
        if self.job_mgr.should_cancel(job_id, domain_id):
            self.job_mgr.decrease_remained_tasks(job_id, domain_id)
            self._update_job_task(job_task_id, 'FAILURE', domain_id)
            raise ERROR_COLLECT_CANCELED(job_id=job_id)

        # Create proper connector
        connector = self._get_connector(plugin_info, domain_id)

        collect_filter = filters
        try:
            # use_cache
            use_cache = kwargs['use_cache']
            if use_cache:
                key = f'collector-filter:{kwargs["collector_id"]}:{secret_id}'
                value = cache.get(key)
                _LOGGER.debug(
                    f'[collecting_resources] cache -> {key}: {value}')
                if value:
                    collect_filter.update(value)
            else:
                _LOGGER.debug(f'[collecting_resources] no cache mode')

        except Exception as e:
            _LOGGER.debug(f'[collecting_resources] cache error,{e}')

        try:
            secret_mgr = self.locator.get_manager('SecretManager')
            secret_data = secret_mgr.get_secret_data(secret_id, domain_id)
            self.secret = secret_mgr.get_secret(secret_id, domain_id)

        except ERROR_BASE as e:
            _LOGGER.error(
                f'[collecting_resources] fail to get secret_data: {secret_id}')
            self.job_task_mgr.add_error(job_task_id, domain_id, e.error_code,
                                        e.message, {
                                            'resource_type': 'secret.Secret',
                                            'resource_id': secret_id
                                        })
            self.job_mgr.decrease_remained_tasks(job_id, domain_id)
            raise ERROR_COLLECTOR_SECRET(plugin_info=plugin_info,
                                         param=secret_id)

        except Exception as e:
            _LOGGER.error(
                f'[collecting_resources] fail to get secret_data: {secret_id}')
            self.job_task_mgr.add_error(job_task_id, domain_id,
                                        'ERROR_COLLECTOR_SECRET', e, {
                                            'resource_type': 'secret.Secret',
                                            'resource_id': secret_id
                                        })
            self.job_mgr.decrease_remained_tasks(job_id, domain_id)
            raise ERROR_COLLECTOR_SECRET(plugin_info=plugin_info,
                                         param=secret_id)

        try:
            # Update JobTask (In-progress)
            self._update_job_task(job_task_id,
                                  'IN_PROGRESS',
                                  domain_id,
                                  secret=self.secret)
        except Exception as e:
            _LOGGER.error(
                f'[collecing_resources] fail to update job_task: {e}')

        ##########################################################
        # Call method
        ##########################################################
        try:
            _LOGGER.debug('[collect] Before call collect')
            results = connector.collect(plugin_info['options'],
                                        secret_data.data, collect_filter)
            _LOGGER.debug('[collect] generator: %s' % results)

        except ERROR_BASE as e:
            _LOGGER.error(
                f'[collecting_resources] fail to get secret_data: {secret_id}')
            self.job_task_mgr.add_error(job_task_id, domain_id, e.error_code,
                                        e.message, {
                                            'resource_type': 'secret.Secret',
                                            'resource_id': secret_id
                                        })
            self.job_mgr.decrease_remained_tasks(job_id, domain_id)
            raise ERROR_COLLECTOR_COLLECTING(plugin_info=plugin_info,
                                             filters=collect_filter)

        except Exception as e:
            self.job_task_mgr.add_error(job_task_id, domain_id,
                                        'ERROR_COLLECTOR_COLLECTING', e, {
                                            'resource_type': 'secret.Secret',
                                            'resource_id': secret_id
                                        })
            self.job_mgr.decrease_remained_tasks(job_id, domain_id)
            raise ERROR_COLLECTOR_COLLECTING(plugin_info=plugin_info,
                                             filters=collect_filter)

        ##############################################################
        # Processing Result
        # Type 1: use_db_queue == False, processing synchronously
        # Type 2: use_db_queue == True, processing asynchronously
        ##############################################################
        JOB_TASK_STATE = 'SUCCESS'
        stat = {}
        ERROR = False
        plugin_id = plugin_info.get('plugin_id', None)
        try:
            stat = self._process_results(results, job_id, job_task_id,
                                         kwargs['collector_id'], secret_id,
                                         plugin_id, domain_id)
            if stat['failure_count'] > 0:
                JOB_TASK_STATE = 'FAILURE'

        except ERROR_BASE as e:
            _LOGGER.error(f'[collecting_resources] {e}', exc_info=True)
            self.job_task_mgr.add_error(job_task_id, domain_id, e.error_code,
                                        e.message, {
                                            'resource_type': 'secret.Secret',
                                            'resource_id': secret_id
                                        })
            JOB_TASK_STATE = 'FAILURE'
            ERROR = True

        except Exception as e:
            _LOGGER.error(f'[collecting_resources] {e}', exc_info=True)
            self.job_task_mgr.add_error(job_task_id, domain_id,
                                        'ERROR_COLLECTOR_COLLECTING', e, {
                                            'resource_type': 'secret.Secret',
                                            'resource_id': secret_id
                                        })
            JOB_TASK_STATE = 'FAILURE'
            ERROR = True

        finally:
            # update collection_state which is not found
            cleanup_mode = self._need_update_collection_state(
                plugin_info, filters)
            _LOGGER.debug(
                f'[collecting_resources] #### cleanup support {cleanup_mode}')
            if cleanup_mode and JOB_TASK_STATE == 'SUCCESS':
                resource_types = self._get_supported_resource_types(
                    plugin_info)
                disconnected_count = 0
                deleted_count = 0
                for resource_type in resource_types:
                    (a, b) = self._update_colleciton_state(
                        resource_type, secret_id, kwargs['collector_id'],
                        job_id, domain_id)
                    disconnected_count += a
                    deleted_count += b
                _LOGGER.debug(
                    f'[collecting_resources] disconnected, delete => {disconnected_count}, {deleted_count}'
                )
                stat['disconnected_count'] = disconnected_count
                stat['deleted_count'] = deleted_count
            else:
                _LOGGER.debug(
                    f'[collecting_resources] skip garbage_colleciton, {cleanup_mode}, {JOB_TASK_STATE}'
                )

            if self.use_db_queue and ERROR == False:
                # WatchDog will finalize the task
                # if ERROR occurred, there is no data to processing
                pass
            else:
                if self.use_db_queue:
                    # delete cache
                    self._delete_job_task_stat_cache(job_id, job_task_id,
                                                     domain_id)
                # Update Statistics of JobTask
                self._update_job_task(job_task_id,
                                      JOB_TASK_STATE,
                                      domain_id,
                                      stat=stat)
                # Update Job
                self.job_mgr.decrease_remained_tasks(kwargs['job_id'],
                                                     domain_id)

        return True
コード例 #6
0
    def _watchdog_job_task_stat(self, param):
        """ WatchDog for cache stat
        1) Update to DB
        2) Update JobTask status
        param = {
            'job_id': job_id,
            'job_task_id': job_task_id,
            'domain_id': domain_id,
            'total_count': total_count
            }
        """
        # Wait a little, may be working task exist
        _LOGGER.debug(
            f'[_watchdog_job_task_stat] WatchDog Start: {param["job_task_id"]}'
        )
        time.sleep(WATCHDOG_WAITING_TIME)
        domain_id = param['domain_id']
        job_id = param['job_id']
        job_task_id = param['job_task_id']

        try:
            key_created = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:CREATED'
            value_created = cache.get(key_created)
            cache.delete(key_created)
        except:
            value_created = 0
        try:
            key_updated = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:UPDATED'
            value_updated = cache.get(key_updated)
            cache.delete(key_updated)
        except:
            value_updated = 0
        try:
            key_failure = f'job_task_stat:{domain_id}:{job_id}:{job_task_id}:FAILURE'
            value_failure = cache.get(key_failure)
            cache.delete(key_failure)
        except:
            value_failure = 0
        # Update to DB
        stat_result = {
            'total_count': param['total_count'],
            'created_count': value_created,
            'updated_count': value_updated,
            'failure_count': value_failure
        }
        _LOGGER.debug(f'[_watchdog_job_task_stat] stat: {stat_result}')
        try:
            if stat_result['failure_count'] > 0:
                JOB_TASK_STATE = 'FAILURE'
            else:
                JOB_TASK_STATE = 'SUCCESS'
            self._update_job_task(job_task_id,
                                  JOB_TASK_STATE,
                                  domain_id,
                                  stat=stat_result)
        except Exception as e:
            # error
            pass
        finally:
            # Close remained task
            self.job_mgr.decrease_remained_tasks(job_id, domain_id)
コード例 #7
0
    def collecting_resources(self, plugin_info, secret_id, filters, domain_id,
                             **kwargs):
        """ This is single call of real plugin with endpoint

        All parameter should be primitive type(Json), not object.
        Because this method will be executed by worker.
        Args:
            plugin_info(dict)
            kwargs: {
                'job_id': 'str',
                'use_cache': bool
            }
        """

        # Check Job State first, if job state is canceled, stop process
        job_mgr = self.locator.get_manager('JobManager')
        if job_mgr.is_canceled(kwargs['job_id'], domain_id):
            raise ERROR_COLLECT_CANCELED(job_id=kwargs['job_id'])

        # Create proper connector
        connector = self._get_connector(plugin_info, domain_id)

        collect_filter = filters
        try:
            # use_cache
            use_cache = kwargs['use_cache']
            if use_cache:
                key = f'collector-filter:{kwargs["collector_id"]}:{secret_id}'
                value = cache.get(key)
                _LOGGER.debug(
                    f'[collecting_resources] cache -> {key}: {value}')
                if value:
                    collect_filter.update(value)
            else:
                _LOGGER.debug(f'[collecting_resources] no cache mode')

        except Exception as e:
            _LOGGER.debug(f'[collecting_resources] cache error,{e}')

        try:
            secret_mgr = self.locator.get_manager('SecretManager')
            secret_data = secret_mgr.get_secret_data(secret_id, domain_id)
            self.secret = secret_mgr.get_secret(secret_id, domain_id)
        except Exception as e:
            _LOGGER.error(
                f'[collecting_resources] fail to get secret_data: {secret_id}')
            raise ERROR_COLLECTOR_SECRET(plugin_info=plugin_info,
                                         param=secret_id)

        # Call method
        try:
            results = connector.collect(plugin_info['options'],
                                        secret_data.data, collect_filter)
            _LOGGER.debug('[collect] generator: %s' % results)
        except Exception as e:
            raise ERROR_COLLECTOR_COLLECTING(plugin_info=plugin_info,
                                             filters=collect_filter)

        try:
            self._process_results(results, kwargs['job_id'],
                                  kwargs['collector_id'], secret_id, domain_id)
        except Exception as e:
            _LOGGER.error(f'[collecting_resources] {e}')

        finally:
            job_mgr.decrease_remained_tasks(kwargs['job_id'], domain_id)

        return True