示例#1
0
class SingleTask:

    def __init__(self, single_task: dict):
        self.locator = single_task['locator']
        self.name = single_task['name']
        self.metadata = single_task['metadata']
        self.method = single_task['method']
        self.params = single_task['params']
        transaction = Transaction(meta=self.metadata)
        self._locator = Locator(transaction)

    def execute(self):
        """ Run method
        """
        print('run single task')
        try:
            if self.locator == 'SERVICE':
                caller = self._locator.get_service(self.name, self.metadata)
            elif self.locator == 'MANAGER':
                caller = self._locator.get_manager(self.name)

        except Exception as e:
            _LOGGER.debug(f'[SingleTask] fail at locator {e}')
            raise ERROR_TASK_LOCATOR(locator=self.locator, name=self.name)

        try:
            print(f'[SingleTask] request: {self.name}.{self.method} {self.params}')
            _LOGGER.debug(f'[SingleTask] request: {self.name}.{self.method} {self.params}')
            method = getattr(caller, self.method)
            resp = method(**self.params)
            _LOGGER.debug(f'[SingleTask] response: {resp}')
            return resp
        except Exception as e:
            _LOGGER.error(f'[SingleTask] fail to execute method: {self.method}, params: {self.params}, {e}')
            raise ERROR_TASK_METHOD(name=self.name, method=self.method, params=self.params)
示例#2
0
async def webhook(webhook_id: str, access_key: str, request: Request):
    locator = Locator()
    data = await request.json()

    event_service = locator.get_service('EventService')
    event_service.create({
        'webhook_id': webhook_id,
        'access_key': access_key,
        'data': data or {}
    })

    return {}
示例#3
0
async def create_event(webhook_id: str, access_key: str, request: Request):
    locator = Locator()
    try:
        try:
            data = await request.json()
        except Exception as e:
            _LOGGER.debug(f'JSON Parsing Error: {e}')
            raise ERROR_UNKNOWN(message='JSON Parsing Error: Request body requires JSON format.')

        event_service: EventService = locator.get_service('EventService')
        event_service.create({
            'webhook_id': webhook_id,
            'access_key': access_key,
            'data': data or {}
        })
        return {}
    except ERROR_BASE as e:
        raise HTTPException(status_code=500, detail=e.message)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f'Unknown Error: {str(e)}')
示例#4
0
def _update_alert_state(alert_id, access_key, state):
    locator = Locator()
    try:

        alert_service: AlertService = locator.get_service('AlertService')
        alert_vo: Alert = alert_service.update_state({
            'alert_id': alert_id,
            'access_key': access_key,
            'state': state
        })

        return {
            'alert_number': alert_vo.alert_number,
            'alert_id': alert_vo.alert_id,
            'title': alert_vo.title,
            'state': alert_vo.state,
            'assignee': alert_vo.assignee,
            'urgency': alert_vo.urgency
        }

    except ERROR_BASE as e:
        raise HTTPException(status_code=500, detail=e.message)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f'Unknown Error: {str(e)}')
示例#5
0
class CleanupScheduler(HourlyScheduler):
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,            # Last check_count time
            'index': 0,                # index
            'hour': cur.hour,           # previous hour
            'started_at': 0,            # start time of push_token
            'ended_at': 0               # end time of execution in this tick
            }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        # self.check_global_configuration()
        domains = self.list_domains()
        result = []
        for domain in domains:
            stp = self._create_job_request(domain)
            result.append(stp)
        return result

    def list_domains(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find scheduled collector
            metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
            cleanup_svc = self.locator.get_service('CleanupService', metadata)
            params = {}
            resp = cleanup_svc.list_domains(params)
            _LOGGER.debug(f'[list_domain] num of domains: {resp["total_count"]}')
            return resp['results']
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.now()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
            }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.now()
        self.count['ended_at'] = cur

    def _create_job_request(self, domain):
        """ Based on domain, create Job Request

        Returns:
            jobs: SpaceONE Pipeline Template
        """
        _LOGGER.debug(f'[_create_job_request] domain: {domain}')
        metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
        sched_job = {
            'locator': 'SERVICE',
            'name': 'CleanupService',
            'metadata': metadata,
            'method': 'update_collection_state',
            'params': {'params': {
                            'options': {},
                            'domain_id': domain['domain_id']
                            }
                       }
            }

        update_job_state = {
            'locator': 'SERVICE',
            'name': 'CleanupService',
            'metadata': metadata,
            'method': 'update_job_state',
            'params': {'params': {
                            'options': {},
                            'domain_id': domain['domain_id']
                            }
                       }
            }

        stp = {'name': 'inventory_cleanup_schedule',
               'version': 'v1',
               'executionEngine': 'BaseWorker',
               'stages': [sched_job, update_job_state]}
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
示例#6
0
class InventoryHourlyScheduler(HourlyScheduler):
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,  # Last check_count time
            'index': 0,  # index
            'hour': cur.hour,  # previous hour
            'started_at': 0,  # start time of push_token
            'ended_at': 0  # end time of execution in this tick
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        # self.check_global_configuration()
        schedules = self.list_schedules()
        result = []
        for schedule in schedules:
            try:
                stp = self._create_job_request(schedule)
                result.append(stp)
            except Exception as e:
                _LOGGER.error(f'[create_task] check schedule {schedule}')

        return result

    def list_schedules(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find scheduled collector
            collector_svc = self.locator.get_service('CollectorService')
            schedule = {'hour': self.count['hour']}
            _LOGGER.debug(f'[push_token] schedule: {schedule}')
            schedule_vos, total = collector_svc.scheduled_collectors(
                {'schedule': schedule})
            _LOGGER.debug(f'[push_token] scheduled count: {total}')
            return schedule_vos
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.now()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
        }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.now()
        self.count['ended_at'] = cur

    def _create_job_request(self, scheduler_vo):
        """ Based on scheduler_vo, create Job Request

        Args:
            scheduler_vo: Scheduler VO
                - scheduler_id
                - name
                - collector: Reference of Collector
                - schedule
                - filter
                - collector_mode
                - created_at
                - last_scheduled_at
                - domain_id
                }

        Returns:
            jobs: SpaceONE Pipeline Template

        Because if collector_info has credential_group_id,
        we have to iterate all credentials in the credential_group
        """
        _LOGGER.debug(f'[_create_job_request] scheduler_vo: {scheduler_vo}')
        plugin_info = scheduler_vo.collector.plugin_info
        _LOGGER.debug(f'plugin_info: {plugin_info}')
        domain_id = scheduler_vo.domain_id
        metadata = {
            'token': self.TOKEN,
            'service': 'inventory',
            'resource': 'Collector',
            'verb': 'collect',
            'authorization': False,
            'domain_id': self.domain_id
        }
        sched_job = {
            'locator': 'SERVICE',
            'name': 'CollectorService',
            'metadata': metadata,
            'method': 'collect',
            'params': {
                'params': {
                    'collector_id': scheduler_vo.collector.collector_id,
                    # if filter
                    # contact credential
                    'collect_mode': 'ALL',
                    'filter': {},
                    'domain_id': domain_id
                }
            }
        }
        stp = {
            'name': 'inventory_collect_schedule',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [sched_job]
        }
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
示例#7
0
class CleanupScheduler(HourlyScheduler):
    """ Clean-up unused plugins
    Some plugins are not used after upgrade
    Delete unused plugins, if last get_endpoint time is larger than limit
    """
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        cur = datetime.datetime.utcnow()
        count = {
            'previous': cur,
            'index': 0,
            'hour': cur.hour,
            'started_at': 0,
            'end_at': 0
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == '':
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        domains = self.list_domains()
        result = []
        for domain in domains:
            stp = self._create_job_request(domain)
            result.append(stp)
        return result

    def list_domains(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find schedule
            metadata = {
                'token': self.TOKEN,
                'service': 'plugin',
                'resource': 'Supervisor',
                'verb': 'list_domains',
                'authorization': True,
                'mutation': True,
                'domain_id': self.domain_id
            }
            svc = self.locator.get_service('SupervisorService', metadata)
            params = {}
            resp = svc.list_domains(params)
            _LOGGER.debug(
                f'[list_domain] num of domains: {resp["total_count"]}')
            return resp['results']
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.utcnow()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
        }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.utcnow()
        self.count['ended_at'] = cur

    def _create_job_request(self, domain):
        """ Based on domain, create Job Request
        Returns:
            jobs: SpaceONE Pipeline Template
        """
        _LOGGER.debug(f'[_create_job_request] domain: {domain}')
        metadata = {
            'token': self.TOKEN,
            'service': 'plugin',
            'resource': 'Supervisor',
            'verb': 'cleanup_plugins',
            'authorization': True,
            'mutation': True,
            'domain_id': self.domain_id
        }
        sched_job = {
            'locator': 'SERVICE',
            'name': 'SupervisorService',
            'metadata': metadata,
            'method': 'cleanup_plugins',
            'params': {
                'params': {
                    'domain_id': domain['domain_id']
                }
            }
        }

        stp = {
            'name': 'cleanup_unused_plugins',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [sched_job]
        }
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
示例#8
0
class InventoryIntervalScheduler(IntervalScheduler):
    def __init__(self, queue, interval):
        super().__init__(queue, interval)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)
        self.schedule_info = {}
        self.idx = 0

    def run(self):
        # Every specific interval, check schedule
        schedule.every(self.config).seconds.do(self._check_interval)
        while True:
            schedule.run_pending()
            time.sleep(1)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,  # Last check_count time
            'index': 0,  # index
            'hour': cur.hour,  # previous hour
            'started_at': 0,  # start time of push_token
            'ended_at': 0  # end time of execution in this tick
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def push_task(self, schedule_id, interval_info):
        # schedule_id: schedule_2222
        # interval_info: {integval: 30, domain_id: dom-2222, collector_id: collector-3333}
        task = self._create_job_request(schedule_id, interval_info)
        _LOGGER.debug(f'[push_task] {task["name"]}')
        try:
            validate(task, schema=SPACEONE_TASK_SCHEMA)
            json_task = json.dumps(task)
            _LOGGER.debug(f'[push_task] Task schema: {task}')
            queue.put(self.queue, json_task)
        except Exception as e:
            print(e)
            _LOGGER.debug(f'[push_task] Task schema: {task}, {e}')

    def _check_interval(self):
        """ Check interval schedule
        Then run schedule
        """
        # patch schedule and sync
        # create new interval Schedule per schedule_id
        #interval_schedules = {schedule-1234: {interval: 20, domain_id: dom-1234, collector_id: collector-5678},
        #                      schedule_2222: {integval: 30, domain_id: dom-2222, collector_id: collector-3333}
        #                       }
        interval_schedules = self._get_interval_schedules()
        # Update Scheule based on DB
        schedule_ids = []
        for schedule_id, interval_info in interval_schedules.items():
            schedule_ids.append(schedule_id)
            try:
                interval_value = interval_info['interval']
                # Create New scheduler
                if schedule_id not in self.schedule_info:
                    _LOGGER.debug(
                        f'[_check_interval] create {schedule_id}, at every {interval_value}'
                    )
                    self.schedule_info[schedule_id] = interval_info
                    job = schedule.every(interval_value).seconds.do(
                        self.push_task, schedule_id, interval_info)
                    # Add tag for each job
                    job.tag(schedule_id)
                # Sync previous scheduler
                else:
                    previous_interval_info = self.schedule_info[schedule_id]
                    previous_interval = previous_interval_info['interval']
                    if interval_value != previous_interval:
                        _LOGGER.debug(
                            f'[_check_interval] modify {schedule_id} interval {previous_interval} to {interval_value}'
                        )
                        # delete job and re-create
                        schedule.default_scheduler.clear(tag=schedule_id)
                        job = schedule.every(interval_value).seconds.do(
                            self.push_task, schedule_id, interval_info)
                        job.tag(schedule_id)
                        # Update self.schedule_info
                        self.schedule_info[schedule_id] = interval_info
                    else:
                        _LOGGER.debug(
                            f'[_check_interval] continue {schedule_id}, at every {previous_interval}'
                        )
            except Exception as e:
                _LOGGER.error(f'[_check_interval] contact to developer {e}')
        # Delete garbage
        _LOGGER.debug(
            f'[_check_interval] gargabe collector: {len(schedule.default_scheduler.jobs) - 1}'
        )
        for job in schedule.default_scheduler.jobs:
            if job.tags == set():
                continue
            exist = False
            for schedule_id in schedule_ids:
                if schedule_id in job.tags:
                    print(f'exist: {schedule_id}')
                    exist = True
                    break

            if exist == False:
                # This Job is gargage
                _LOGGER.debug(f'[_check_interval] remove job: {job}')
                schedule.default_scheduler.cancel_job(job)

    def _get_interval_schedules(self):
        """ Find all interval schedules from inventory.Collector.schedule
        """
        schedule_vos = self._list_schedules()
        found_schedules = {}
        for schedule_vo in schedule_vos:
            try:
                interval_value = _get_interval_value(schedule_vo)
                found_schedules.update({
                    schedule_vo.schedule_id: {
                        'interval': interval_value,
                        'domain_id': schedule_vo.domain_id,
                        'collector_id': schedule_vo.collector.collector_id
                    }
                })
            except Exception as e:
                _LOGGER.error(f'[_get_interval_schedules] {e}')
        _LOGGER.debug(f'[_get_interval_schedules] found: {found_schedules}')
        return found_schedules

    def _list_schedules(self):
        try:
            # Loop all domain, then find scheduled collector
            collector_svc = self.locator.get_service('CollectorService')
            schedule = {'interval': None}
            schedule_vos, total = collector_svc.scheduled_collectors(
                {'schedule': schedule})
            return schedule_vos
        except Exception as e:
            _LOGGER.error(e)
            return []

    def _create_job_request(self, schedule_id, interval_info):
        """ Based on scheduler_vo, create Job Request

        Args:
            scheduler_vo: Scheduler VO
                - scheduler_id
                - name
                - collector: Reference of Collector
                - schedule
                - filter
                - collector_mode
                - created_at
                - last_scheduled_at
                - domain_id
                }

        Returns:
            jobs: SpaceONE Pipeline Template

        """
        _LOGGER.debug(f'[_create_job_request] {interval_info}')
        domain_id = interval_info['domain_id']
        metadata = {
            'token': self.TOKEN,
            'service': 'inventory',
            'resource': 'Collector',
            'verb': 'collect',
            'authorization': True,
            'domain_id': self.domain_id
        }
        sched_job = {
            'locator': 'SERVICE',
            'name': 'CollectorService',
            'metadata': metadata,
            'method': 'collect',
            'params': {
                'params': {
                    'collector_id': interval_info['collector_id'],
                    # if filter
                    # contact credential
                    'collect_mode': 'ALL',
                    'filter': {},
                    'domain_id': domain_id
                }
            }
        }
        stp = {
            'name': 'inventory_collect_by_interval_schedule',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [sched_job]
        }
        #_LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
示例#9
0
class StatHourlyScheduler(HourlyScheduler):
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,  # Last check_count time
            'index': 0,  # index
            'hour': cur.hour,  # previous hour
            'started_at': 0,  # start time of push_token
            'ended_at': 0  # end time of execution in this tick
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        # self.check_global_configuration()
        domains = self.list_domains()
        result = []
        for domain in domains:
            stp = self._create_job_request(domain)
            result.append(stp)
        return result

    def list_domains(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find schedule
            metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
            schedule_svc = self.locator.get_service('ScheduleService',
                                                    metadata)
            params = {}
            resp = schedule_svc.list_domains(params)
            _LOGGER.debug(
                f'[list_domain] num of domains: {resp["total_count"]}')
            return resp['results']
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.now()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
        }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.now()
        self.count['ended_at'] = cur

    def _list_schedule(self, hour, domain_id):
        """ List statistics.Schedule
        """
        params = {
            'query': {
                'filter': [{
                    'k': 'schedule.hours',
                    'v': hour,
                    'o': 'eq'
                }],
            },
            'domain_id': domain_id
        }
        metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
        schedule_svc = self.locator.get_service('ScheduleService', metadata)
        schedules, total_count = schedule_svc.list(params)
        _LOGGER.debug(
            f'[_list_schedule] schedules: {schedules}, total_count: {total_count}'
        )
        return schedules

    def _create_job_request(self, domain):
        """ Based on domain, create Job Request

        Returns:
            jobs: SpaceONE Pipeline Template
        """
        _LOGGER.debug(f'[_create_job_request] domain: {domain}')
        metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
        schedules = self._list_schedule(self.count['hour'],
                                        domain['domain_id'])
        sched_jobs = []
        for schedule in schedules:
            sched_job = {
                'locator': 'SERVICE',
                'name': 'HistoryService',
                'metadata': metadata,
                'method': 'create',
                'params': {
                    'params': {
                        'schedule_id': schedule.schedule_id,
                        'domain_id': domain['domain_id']
                    }
                }
            }
            sched_jobs.append(sched_job)

        stp = {
            'name': 'statistics_hourly_schedule',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': sched_jobs
        }
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp

    @staticmethod
    def _create_schedule_params(schedule, domain_id):
        dict_schedule = dict(schedule.to_dict())
        _LOGGER.debug(f'[_create_schedule_params] schedule: {schedule}')

        required_params = [
            'schedule_id', 'data_source_id', 'resource_type', 'query', 'join',
            'formulas', 'domain_id'
        ]
        result = {
            'schedule_id': dict_schedule['schedule_id'],
            'domain_id': domain_id
        }
        print('#' * 30)
        for param in required_params:
            print(f'check : {param}')
            if param in dict_schedule['options']:
                result[param] = dict_schedule['options'][param]
        _LOGGER.debug(f'[_create_schedule_params] params: {result}')
        return result
示例#10
0
class SpaceOneScheduler(Scheduler):
    #: how often should we sync in schedule information
    #: from the backend mongo database
    UPDATE_INTERVAL = datetime.timedelta(seconds=5)

    Entry = SpaceOneScheduleEntry
    Service = None
    _metadata: dict = None

    @property
    def metadata(self):
        if self._metadata is None:
            token = config.get_global('CELERY',{}).get('auth',{}).get('token')
            if token:
                self._metadata = {'token': token, }
            else:
                self._metadata = {}
        return self._metadata

    def __init__(self, *args, **kwargs):
        self.transaction = Transaction()
        self.locator = Locator(self.transaction)
        if hasattr(current_app.conf, "spaceone_scheduler_service"):
            self.service_name = current_app.conf.get("spaceone_scheduler_service")
        else:
            raise SpaceOneSchedulerError("can not find CELERY.spaceone_scheduler_service config")

        self.Service = self.locator.get_service(self.service_name, metadata=self.metadata)
        self._schedule = {}
        self._last_updated = None

        Scheduler.__init__(self, *args, **kwargs)
        self.max_interval = (kwargs.get('max_interval')
                             or self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or 5)

    def setup_schedule(self):
        pass

    def requires_update(self):
        """check whether we should pull an updated schedule
        from the backend database"""
        if not self._last_updated:
            return True
        return self._last_updated + self.UPDATE_INTERVAL < datetime.datetime.now()

    def get_from_service(self):
        self.sync()
        d = {}
        schedules = self.Service.list()
        print(f"Find {len(schedules)} schedules")
        for task in schedules:
            d[task.schedule_id] = self.Entry(task)
        return d

    @property
    def schedule(self):
        if self.requires_update():
            self._schedule = self.get_from_service()
            self._last_updated = datetime.datetime.now()
        return self._schedule

    def sync(self):
        print('Writing entries...')
        values = self._schedule.values()
        for entry in values:
            entry.save(self.Service)

    @property
    def info(self):
        return f'    . service -> {self.service_name}'
示例#11
0
        _set_file_config(f)


def print_schedules(schedules: List[SpaceoneTaskData]):
    print(f"id | schedule | total_run_count | last_run")
    for sch in schedules:
        print(
            f"{sch.schedule_id} | {sch.schedule_info} | {sch.total_run_count} | {sch.last_run_at}"
        )
    print('\n\n')


if __name__ == '__main__':
    config_server()
    locator = Locator()
    svc = locator.get_service('ScheduleService')
    print('list all schedules')
    print_schedules(svc.list())

    print('add schedule')
    sch_name = f"test_sche_{randint(1, 1000)}"
    svc.add({
        'domain_id': "sample",
        'enabled': True,
        'task': 'spaceone.core.celery.tasks.test_task',
        'name': sch_name,
        'interval': {
            'period': 'seconds',
            'every': randint(6, 12)
        },
        'args': ['sample'],