示例#1
0
 def push_task(self):
     # Create Task
     tasks = self.create_task()
     _LOGGER.debug(f'[push_task] task: {len(tasks)}')
     for task in tasks:
         try:
             validate(task, schema=SPACEONE_TASK_SCHEMA)
             json_task = json.dumps(task)
             _LOGGER.debug(f'[push_task] Task schema: {task}')
             queue.put(self.queue, json_task)
         except Exception as e:
             _LOGGER.debug(f'[push_task] Task schema: {task}, {e}')
示例#2
0
 def push_task(self, schedule_id, interval_info):
     # schedule_id: schedule_2222
     # interval_info: {integval: 30, domain_id: dom-2222, collector_id: collector-3333}
     task = self._create_job_request(schedule_id, interval_info)
     _LOGGER.debug(f'[push_task] {task["name"]}')
     try:
         validate(task, schema=SPACEONE_TASK_SCHEMA)
         json_task = json.dumps(task)
         _LOGGER.debug(f'[push_task] Task schema: {task}')
         queue.put(self.queue, json_task)
     except Exception as e:
         print(e)
         _LOGGER.debug(f'[push_task] Task schema: {task}, {e}')
示例#3
0
    def list(self, params):
        """ List alert notes

        Args:
            params (dict): {}

        Returns:
            None
        """

        print(f"Call DomainService.list {str(params)}", self.transaction.id)
        """
        task_sample = {
            'name': 'monitoring_alert_schedule',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [{
                'locator': 'SERVICE',
                'name': 'DomainService',
                'metadata': self._metadata,
                'method': 'list',
                'params': {
                    'params': {
                        'from': 'monitoring_alert_scheduler'
                    }
                }
            }]
        }
        """

        task = {
            'name':
            'monitoring_alert_schedule_recursive',
            'version':
            'v1',
            'executionEngine':
            'BaseWorker',
            'stages': [{
                'locator': 'SERVICE',
                'name': 'DomainService',
                'metadata': self.transaction.meta,
                'method': 'list2',
                'params': {
                    'params': {
                        'from': 'DomainService.list'
                    }
                }
            }]
        }

        queue.put('monitoring_q', utils.dump_json(task))
示例#4
0
 def _create_db_update_task(self, res, param):
     """ Create Asynchronous Task
     """
     try:
         # Push Queue
         task = {
             'method': '_process_single_result',
             'res': res,
             'param': param,
             'meta': self.transaction.meta
         }
         json_task = json.dumps(task)
         queue.put(self.db_queue, json_task)
         return True
     except Exception as e:
         _LOGGER.error(f'[_create_db_update_task] {e}')
         return False
示例#5
0
    def push_task(self, task_name, class_name, method, params):
        task = {
            'name': task_name,
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [{
                'locator': 'SERVICE',
                'name': class_name,
                'metadata': self.transaction.meta,
                'method': method,
                'params': {
                    'params': params
                }
            }]
        }

        queue.put('monitoring_q', utils.dump_json(task))
示例#6
0
 def _create_db_update_task_watchdog(self, total_count, job_id, job_task_id,
                                     domain_id):
     """ Create Asynchronous Task
     """
     try:
         # Push Queue
         param = {
             'job_id': job_id,
             'job_task_id': job_task_id,
             'domain_id': domain_id,
             'total_count': total_count
         }
         task = {
             'method': '_watchdog_job_task_stat',
             'res': {},
             'param': param,
             'meta': self.transaction.meta
         }
         json_task = json.dumps(task)
         queue.put(self.db_queue, json_task)
         return True
     except Exception as e:
         _LOGGER.error(f'[_create_db_update_task_watchdog] {e}')
         return False
示例#7
0
    def collect(self, params):
        """
        Args:
            params: {
                'collector_id': str
                'filter': dict
                'secret_id': str
                'collect_mode': str
                'use_cache': bool
                'domain_id': str
            }
        """
        collector_id = params['collector_id']
        domain_id = params['domain_id']
        # collect_mode = params.get('collect_mode', 'ALL')

        collector_vo = self.get_collector(collector_id, domain_id)
        collector_dict = collector_vo.to_dict()
        # Check collector state (if disabled, raise error)
        if collector_dict['state'] == 'DISABLED':
            raise ERROR_COLLECTOR_STATE(state='DISABLED')

        # TODO: get Queue from config

        # Create Job
        job_mgr = self.locator.get_manager('JobManager')
        created_job = job_mgr.create_job(collector_vo, params)

        # Create JobTask
        job_task_mgr = self.locator.get_manager('JobTaskManager')
        # Create Pipeline & Push
        try:
            secret_id = params.get('secret_id', None)
            plugin_mgr = self.locator.get_manager('PluginManager')
            secret_list = plugin_mgr.get_secrets_from_plugin_info(
                                                        collector_dict['plugin_info'],
                                                        domain_id,
                                                        secret_id
                                                    )
            _LOGGER.debug(f'[collector] number of secret: {len(secret_list)}')
            if len(secret_list) == 0:
                # nothing to do
                job_mgr.make_success_by_vo(created_job)
                return created_job

        except Exception as e:
            _LOGGER.debug(f'[collect] failed in Secret Patch stage: {e}')
            job_mgr.add_error(created_job.job_id, domain_id,
                              'ERROR_COLLECT_INITIALIZE',
                              e,
                              params)
            job_mgr.make_error_by_vo(created_job)
            raise ERROR_COLLECT_INITIALIZE(stage='Secret Patch', params=params)

        # Apply Filter Format
        try:
            filter_mgr = self.locator.get_manager('FilterManager')
            filters = params.get('filter', {})
            plugin_info = collector_dict['plugin_info']
            collect_filter, secret_list = filter_mgr.get_collect_filter(filters,
                                                                        plugin_info,
                                                                        secret_list)
            _LOGGER.debug(f'[collector] filter from API: {filters}')
            _LOGGER.debug(f'[collector] filter for collector: {collect_filter}')
            _LOGGER.debug(f'[collector] number of secret after filter transform: {len(secret_list)}')
        except Exception as e:
            _LOGGER.debug(f'[collect] failed on Filter Transform stage: {e}')
            job_mgr.add_error(created_job.job_id, domain_id,
                              'ERROR_COLLECT_INITIALIZE',
                              e,
                              params)
            job_mgr.make_error_by_vo(created_job)
            raise ERROR_COLLECT_INITIALIZE(stage='Filter Format', params=params)

        # Make in-progress
        try:
            job_mgr.make_inprogress_by_vo(created_job)
        except Exception as e:
            _LOGGER.debug(f'[collect] {e}')
            _LOGGER.debug(f'[collect] fail to change {collector_id} job state to in-progress')

        # Loop all secret_list
        self.secret_mgr = self.locator.get_manager('SecretManager')
        secret_len = len(secret_list)
        count = 0
        for secret_id in secret_list:
            count += 1
            # Do collect per secret
            try:
                # Make Pipeline, then push
                # parameter of pipeline
                job_mgr.increase_total_tasks_by_vo(created_job)
                job_mgr.increase_remained_tasks_by_vo(created_job)

                # Create JobTask
                secret_info = self._get_secret_info(secret_id, domain_id)
                job_task_vo = job_task_mgr.create_job_task(created_job, secret_info, domain_id)

                req_params = self._make_collecting_parameters(collector_dict=collector_dict,
                                                              secret_id=secret_id,
                                                              domain_id=domain_id,
                                                              job_vo=created_job,
                                                              job_task_vo=job_task_vo,
                                                              collect_filter=collect_filter,
                                                              params=params
                                                              )
                # Update Job
                _LOGGER.debug(f'[collect] params for collecting: {req_params}')
                # Make SpaceONE Template Pipeline
                task = self._create_task(req_params, domain_id)
                queue_name = self._get_queue_name(name='collect_queue')

                if task and queue_name:
                    # Push to queue
                    _LOGGER.warning(f'####### Asynchronous collect {count}/{secret_len} ########')
                    validate(task, schema=SPACEONE_TASK_SCHEMA)
                    json_task = json.dumps(task)
                    queue.put(queue_name, json_task)
                else:
                    # Do synchronus collect
                    _LOGGER.warning(f'####### Synchronous collect {count}/{secret_len} ########')
                    collecting_mgr = self.locator.get_manager('CollectingManager')
                    collecting_mgr.collecting_resources(**req_params)

            except ERROR_BASE as e:
                # Do not exit, just book-keeping
                job_mgr.add_error(created_job.job_id, domain_id,
                                  e.error_code,
                                  e.message,
                                  {'secret_id': secret_id}
                                  )
                _LOGGER.error(f'####### collect failed {count}/{secret_len} ##########')
                _LOGGER.error(f'[collect] collecting failed with {secret_id}: {e}')


            except Exception as e:
                # Do not exit, just book-keeping
                job_mgr.add_error(created_job.job_id, domain_id,
                                  'ERROR_COLLECTOR_COLLECTING',
                                  e,
                                  {'secret_id': secret_id}
                                  )
                _LOGGER.error(f'[collect] collecting failed with {secret_id}: {e}')

        # Update Timestamp
        self._update_last_collected_time(collector_vo.collector_id, domain_id)
        return created_job
示例#8
0
    def collect(self, params):
        """
        Args:
            params: {
                'collector_id': str
                'filter': dict
                'secret_id': str
                'collect_mode': str
                'use_cache': bool
                'domain_id': str
            }
        """
        collector_id = params['collector_id']
        domain_id = params['domain_id']
        collect_mode = params.get('collect_mode', 'ALL')

        collector_vo = self.get_collector(collector_id, domain_id)
        collector_dict = collector_vo.to_dict()
        # TODO: get Queue from config

        # Create Job
        job_mgr = self.locator.get_manager('JobManager')
        created_job = job_mgr.create_job(collector_vo, params)

        # Make in-progress
        try:
            job_mgr.make_inprgress(created_job.job_id, domain_id)
        except Exception as e:
            _LOGGER.debug(f'[collect] {e}')
            _LOGGER.debug(
                f'[collect] fail to change {collector_id} job state to in-progress'
            )

        # Create Pipeline & Push
        try:
            secret_id = params.get('secret_id', None)
            plugin_mgr = self.locator.get_manager('PluginManager')
            secret_list = plugin_mgr.get_secrets_from_plugin_info(
                collector_dict['plugin_info'], domain_id, secret_id)
            _LOGGER.debug(f'[collector] number of secret: {len(secret_list)}')
        except Exception as e:
            _LOGGER.debug(f'[collect] failed in Secret Patch stage: {e}')
            job_mgr.make_failure(created_job.job_id, domain_id)
            raise ERROR_COLLECT_INITIALIZE(stage='Secret Patch',
                                           params={params})

        # Apply Filter Format
        try:
            filter_mgr = self.locator.get_manager('FilterManager')
            filters = params.get('filter', {})
            plugin_info = collector_dict['plugin_info']
            collect_filter, secret_list = filter_mgr.get_collect_filter(
                filters, plugin_info, secret_list)
            _LOGGER.debug(
                f'[collector] number of secret after filter transform: {len(secret_list)}'
            )
        except Exception as e:
            _LOGGER.debug(f'[collect] failed on Filter Transform stage: {e}')
            job_mgr.make_failure(created_job.job_id, domain_id)
            raise ERROR_COLLECT_INITIALIZE(stage='Filter Format',
                                           params={params})

        # Loop all secret_list
        for secret_id in secret_list:
            # Do collect per secret
            try:
                # TODO:
                # Make Pipeline, then push
                # parameter of pipeline
                req_params = self._make_collecting_parameters(
                    collector_dict=collector_dict,
                    secret_id=secret_id,
                    domain_id=domain_id,
                    job_vo=created_job,
                    collect_filter=collect_filter,
                    params=params)
                _LOGGER.debug(f'[collect] params for collecting: {req_params}')
                job_mgr.increase_remained_tasks(created_job.job_id, domain_id)

                # TODO: Push to Queue
                # Make SpaceONE Template Pipeline
                task = self._create_task(req_params, domain_id)
                queue_name = self._get_queue_name(name='collect_queue')

                if task and queue_name:
                    # Push to queue
                    _LOGGER.debug('####### Asynchronous collect ########')
                    validate(task, schema=SPACEONE_TASK_SCHEMA)
                    json_task = json.dumps(task)
                    queue.put(queue_name, json_task)
                else:
                    # Do synchronus collect
                    _LOGGER.debug('####### Synchronous collect ########')
                    collecting_mgr = self.locator.get_manager(
                        'CollectingManager')
                    collecting_mgr.collecting_resources(**req_params)

            except Exception as e:
                # Do not exit, just book-keeping
                _LOGGER.error(
                    f'[collect] collecting failed with {secret_id}: {e}')

        # Update Timestamp
        self._update_last_collected_time(collector_vo.collector_id, domain_id)
        return created_job