def __init__(self, queue, interval):
     super().__init__(queue, interval)
     self.count = self._init_count()
     self.locator = Locator()
     self.TOKEN = self._update_token()
     self.domain_id = _get_domain_id_from_token(self.TOKEN)
     self.schedule_info = {}
     self.idx = 0
Beispiel #2
0
 def __init__(self,
              experimental_non_blocking=True,
              experimental_thread_pool=None):
     super().__init__(experimental_non_blocking, experimental_thread_pool)
     locator = Locator()
     self.actuator = locator.get_actuator('Health')
     self.actuator.add_health_update(self)
     self.actuator.check()
Beispiel #3
0
 def __init__(self, single_task: dict):
     self.locator = single_task['locator']
     self.name = single_task['name']
     self.metadata = single_task['metadata']
     self.method = single_task['method']
     self.params = single_task['params']
     transaction = Transaction(meta=self.metadata)
     self._locator = Locator(transaction)
    def __init__(
        self,
        interval=1,
    ):
        self.config = self.parse_config(interval)

        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)
Beispiel #5
0
async def webhook(webhook_id: str, access_key: str, request: Request):
    locator = Locator()
    data = await request.json()

    event_service = locator.get_service('EventService')
    event_service.create({
        'webhook_id': webhook_id,
        'access_key': access_key,
        'data': data or {}
    })

    return {}
Beispiel #6
0
    def __init__(self, *args, **kwargs):
        self.transaction = Transaction()
        self.locator = Locator(self.transaction)
        if hasattr(current_app.conf, "spaceone_scheduler_service"):
            self.service_name = current_app.conf.get("spaceone_scheduler_service")
        else:
            raise SpaceOneSchedulerError("can not find CELERY.spaceone_scheduler_service config")

        self.Service = self.locator.get_service(self.service_name, metadata=self.metadata)
        self._schedule = {}
        self._last_updated = None

        Scheduler.__init__(self, *args, **kwargs)
        self.max_interval = (kwargs.get('max_interval')
                             or self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or 5)
Beispiel #7
0
    def __init__(self,
                 metadata: dict = {},
                 transaction: Transaction = None,
                 **kwargs):
        self.func_name = None
        self.is_with_statement = False

        if transaction:
            self.transaction = transaction
        else:
            self.transaction = Transaction(metadata)

        if config.get_global('SET_LOGGING', True):
            set_logger(transaction=self.transaction)

        self.locator = Locator(self.transaction)
        self.handler = {
            'authentication': {
                'handlers': [],
                'methods': []
            },
            'authorization': {
                'handlers': [],
                'methods': []
            },
            'mutation': {
                'handlers': [],
                'methods': []
            },
            'event': {
                'handlers': [],
                'methods': []
            },
        }
Beispiel #8
0
class InventoryDBUpdater(BaseWorker):
    def __init__(self, queue, **kwargs):
        BaseWorker.__init__(self, queue, **kwargs)
        self.locator = Locator()

    def run(self):
        """ Infinite Loop
        """
        # Create Manager
        collecting_mgr = self.locator.get_manager('CollectingManager')

        while True:
            # Read from Queue
            try:
                binary_resource_info = queue.get(self.queue)
                resource_info = json.loads(binary_resource_info.decode())
                # Create Transaction
                collecting_mgr.transaction = Transaction(resource_info['meta'])
                # processing
                method = resource_info['method']
                if method == '_process_single_result':
                    collecting_mgr._process_single_result(
                        resource_info['res'], resource_info['param'])
                elif method == '_watchdog_job_task_stat':
                    collecting_mgr._watchdog_job_task_stat(
                        resource_info['param'])
                else:
                    _LOGGER.error(f'Unknown request: {resource_info}')

            except Exception as e:
                _LOGGER.error(f'[{self._name_}] failed to processing: {e}')
                continue
Beispiel #9
0
    def __init__(self, transaction: Transaction = None):

        if transaction:
            self.transaction = transaction
        else:
            self.transaction = Transaction()

        self.locator = Locator(self.transaction)
Beispiel #10
0
async def create_event(webhook_id: str, access_key: str, request: Request):
    locator = Locator()
    try:
        try:
            data = await request.json()
        except Exception as e:
            _LOGGER.debug(f'JSON Parsing Error: {e}')
            raise ERROR_UNKNOWN(message='JSON Parsing Error: Request body requires JSON format.')

        event_service: EventService = locator.get_service('EventService')
        event_service.create({
            'webhook_id': webhook_id,
            'access_key': access_key,
            'data': data or {}
        })
        return {}
    except ERROR_BASE as e:
        raise HTTPException(status_code=500, detail=e.message)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f'Unknown Error: {str(e)}')
Beispiel #11
0
    def _change_secret_id_filter(condition):
        value = condition.get('v') or condition.get('value')
        operator = condition.get('o') or condition.get('operator')

        map_query = {'filter': [{'k': 'secret_id', 'v': value, 'o': operator}]}

        locator = Locator()
        secret_group_map_model = locator.get_model('SecretGroupMap')
        map_vos, total_count = secret_group_map_model.query(**map_query)

        return {
            'k':
            'secret_group_id',
            'v':
            list(
                map(lambda map_vo: map_vo.secret_group.secret_group_id,
                    map_vos)),
            'o':
            'in'
        }
Beispiel #12
0
    def __init__(self, transaction=None, **kwargs):

        if transaction:
            self.transaction = transaction
        else:
            self.transaction = Transaction()

        self.locator = Locator(self.transaction)

        for k, v in kwargs.items():
            setattr(self, k, v)
Beispiel #13
0
class SingleTask:
    def __init__(self, single_task: dict):
        self.locator = single_task['locator']
        self.name = single_task['name']
        self.metadata = single_task['metadata']
        self.method = single_task['method']
        self.params = single_task['params']
        transaction = Transaction(meta=self.metadata)
        self._locator = Locator(transaction)

    def execute(self):
        """ Run method
        """
        try:
            if self.locator == 'SERVICE':
                caller = self._locator.get_service(self.name, self.metadata)
            elif self.locator == 'MANAGER':
                caller = self._locator.get_manager(self.name)

        except Exception as e:
            _LOGGER.debug(f'[SingleTask] fail at locator {e}')
            raise ERROR_TASK_LOCATOR(locator=self.locator, name=self.name)

        try:
            _LOGGER.debug(
                f'[SingleTask] request: {self.name}.{self.method} {self.params}'
            )
            method = getattr(caller, self.method)
            resp = method(**self.params)
            _LOGGER.debug(f'[SingleTask] response: {resp}')
            return resp
        except Exception as e:
            _LOGGER.error(
                f'[SingleTask] fail to execute method: {self.method}, params: {self.params}, {e}'
            )
            raise ERROR_TASK_METHOD(name=self.name,
                                    method=self.method,
                                    params=self.params)
Beispiel #14
0
def _update_alert_state(alert_id, access_key, state):
    locator = Locator()
    try:

        alert_service: AlertService = locator.get_service('AlertService')
        alert_vo: Alert = alert_service.update_state({
            'alert_id': alert_id,
            'access_key': access_key,
            'state': state
        })

        return {
            'alert_number': alert_vo.alert_number,
            'alert_id': alert_vo.alert_id,
            'title': alert_vo.title,
            'state': alert_vo.state,
            'assignee': alert_vo.assignee,
            'urgency': alert_vo.urgency
        }

    except ERROR_BASE as e:
        raise HTTPException(status_code=500, detail=e.message)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f'Unknown Error: {str(e)}')
Beispiel #15
0
    def __init__(self, metadata={}, transaction=None):
        self.func_name = None
        self.is_with_statement = False

        if transaction:
            self.transaction = transaction
        else:
            self.transaction = Transaction(metadata)

        set_logger(transaction=self.transaction)
        self.locator = Locator(self.transaction)
        self.handler = {
            'authentication': {'handlers': [], 'methods': []},
            'authorization': {'handlers': [], 'methods': []},
            'event': {'handlers': [], 'methods': []},
        }
Beispiel #16
0
class StatHourlyScheduler(HourlyScheduler):
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,  # Last check_count time
            'index': 0,  # index
            'hour': cur.hour,  # previous hour
            'started_at': 0,  # start time of push_token
            'ended_at': 0  # end time of execution in this tick
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        # self.check_global_configuration()
        domains = self.list_domains()
        result = []
        for domain in domains:
            stp = self._create_job_request(domain)
            result.append(stp)
        return result

    def list_domains(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find schedule
            metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
            schedule_svc = self.locator.get_service('ScheduleService',
                                                    metadata)
            params = {}
            resp = schedule_svc.list_domains(params)
            _LOGGER.debug(
                f'[list_domain] num of domains: {resp["total_count"]}')
            return resp['results']
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.now()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
        }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.now()
        self.count['ended_at'] = cur

    def _list_schedule(self, hour, domain_id):
        """ List statistics.Schedule
        """
        params = {
            'query': {
                'filter': [{
                    'k': 'schedule.hours',
                    'v': hour,
                    'o': 'eq'
                }],
            },
            'domain_id': domain_id
        }
        metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
        schedule_svc = self.locator.get_service('ScheduleService', metadata)
        schedules, total_count = schedule_svc.list(params)
        _LOGGER.debug(
            f'[_list_schedule] schedules: {schedules}, total_count: {total_count}'
        )
        return schedules

    def _create_job_request(self, domain):
        """ Based on domain, create Job Request

        Returns:
            jobs: SpaceONE Pipeline Template
        """
        _LOGGER.debug(f'[_create_job_request] domain: {domain}')
        metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
        schedules = self._list_schedule(self.count['hour'],
                                        domain['domain_id'])
        sched_jobs = []
        for schedule in schedules:
            sched_job = {
                'locator': 'SERVICE',
                'name': 'HistoryService',
                'metadata': metadata,
                'method': 'create',
                'params': {
                    'params': {
                        'schedule_id': schedule.schedule_id,
                        'domain_id': domain['domain_id']
                    }
                }
            }
            sched_jobs.append(sched_job)

        stp = {
            'name': 'statistics_hourly_schedule',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': sched_jobs
        }
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp

    @staticmethod
    def _create_schedule_params(schedule, domain_id):
        dict_schedule = dict(schedule.to_dict())
        _LOGGER.debug(f'[_create_schedule_params] schedule: {schedule}')

        required_params = [
            'schedule_id', 'data_source_id', 'resource_type', 'query', 'join',
            'formulas', 'domain_id'
        ]
        result = {
            'schedule_id': dict_schedule['schedule_id'],
            'domain_id': domain_id
        }
        print('#' * 30)
        for param in required_params:
            print(f'check : {param}')
            if param in dict_schedule['options']:
                result[param] = dict_schedule['options'][param]
        _LOGGER.debug(f'[_create_schedule_params] params: {result}')
        return result
Beispiel #17
0
 def __init__(self, *args, **kwargs):
     transaction = Transaction()
     self.locator = Locator(transaction)
Beispiel #18
0
class CleanupScheduler(HourlyScheduler):
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,            # Last check_count time
            'index': 0,                # index
            'hour': cur.hour,           # previous hour
            'started_at': 0,            # start time of push_token
            'ended_at': 0               # end time of execution in this tick
            }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        # self.check_global_configuration()
        domains = self.list_domains()
        result = []
        for domain in domains:
            stp = self._create_job_request(domain)
            result.append(stp)
        return result

    def list_domains(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find scheduled collector
            metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
            cleanup_svc = self.locator.get_service('CleanupService', metadata)
            params = {}
            resp = cleanup_svc.list_domains(params)
            _LOGGER.debug(f'[list_domain] num of domains: {resp["total_count"]}')
            return resp['results']
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.now()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
            }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.now()
        self.count['ended_at'] = cur

    def _create_job_request(self, domain):
        """ Based on domain, create Job Request

        Returns:
            jobs: SpaceONE Pipeline Template
        """
        _LOGGER.debug(f'[_create_job_request] domain: {domain}')
        metadata = {'token': self.TOKEN, 'domain_id': self.domain_id}
        sched_job = {
            'locator': 'SERVICE',
            'name': 'CleanupService',
            'metadata': metadata,
            'method': 'update_collection_state',
            'params': {'params': {
                            'options': {},
                            'domain_id': domain['domain_id']
                            }
                       }
            }

        update_job_state = {
            'locator': 'SERVICE',
            'name': 'CleanupService',
            'metadata': metadata,
            'method': 'update_job_state',
            'params': {'params': {
                            'options': {},
                            'domain_id': domain['domain_id']
                            }
                       }
            }

        stp = {'name': 'inventory_cleanup_schedule',
               'version': 'v1',
               'executionEngine': 'BaseWorker',
               'stages': [sched_job, update_job_state]}
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
Beispiel #19
0
 def __init__(self, queue, interval, minute=':00'):
     super().__init__(queue, interval, minute)
     self.count = self._init_count()
     self.locator = Locator()
     self.TOKEN = self._update_token()
     self.domain_id = _get_domain_id_from_token(self.TOKEN)
 def __init__(self, queue, interval):
     super().__init__(queue, interval)
     self.locator = Locator()
     self._init_config()
     self._create_metadata()
Beispiel #21
0
class InventoryHourlyScheduler(HourlyScheduler):
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,  # Last check_count time
            'index': 0,  # index
            'hour': cur.hour,  # previous hour
            'started_at': 0,  # start time of push_token
            'ended_at': 0  # end time of execution in this tick
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        # self.check_global_configuration()
        schedules = self.list_schedules()
        result = []
        for schedule in schedules:
            try:
                stp = self._create_job_request(schedule)
                result.append(stp)
            except Exception as e:
                _LOGGER.error(f'[create_task] check schedule {schedule}')

        return result

    def list_schedules(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find scheduled collector
            collector_svc = self.locator.get_service('CollectorService')
            schedule = {'hour': self.count['hour']}
            _LOGGER.debug(f'[push_token] schedule: {schedule}')
            schedule_vos, total = collector_svc.scheduled_collectors(
                {'schedule': schedule})
            _LOGGER.debug(f'[push_token] scheduled count: {total}')
            return schedule_vos
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.now()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
        }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.now()
        self.count['ended_at'] = cur

    def _create_job_request(self, scheduler_vo):
        """ Based on scheduler_vo, create Job Request

        Args:
            scheduler_vo: Scheduler VO
                - scheduler_id
                - name
                - collector: Reference of Collector
                - schedule
                - filter
                - collector_mode
                - created_at
                - last_scheduled_at
                - domain_id
                }

        Returns:
            jobs: SpaceONE Pipeline Template

        Because if collector_info has credential_group_id,
        we have to iterate all credentials in the credential_group
        """
        _LOGGER.debug(f'[_create_job_request] scheduler_vo: {scheduler_vo}')
        plugin_info = scheduler_vo.collector.plugin_info
        _LOGGER.debug(f'plugin_info: {plugin_info}')
        domain_id = scheduler_vo.domain_id
        metadata = {
            'token': self.TOKEN,
            'service': 'inventory',
            'resource': 'Collector',
            'verb': 'collect',
            'authorization': False,
            'domain_id': self.domain_id
        }
        sched_job = {
            'locator': 'SERVICE',
            'name': 'CollectorService',
            'metadata': metadata,
            'method': 'collect',
            'params': {
                'params': {
                    'collector_id': scheduler_vo.collector.collector_id,
                    # if filter
                    # contact credential
                    'collect_mode': 'ALL',
                    'filter': {},
                    'domain_id': domain_id
                }
            }
        }
        stp = {
            'name': 'inventory_collect_schedule',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [sched_job]
        }
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
Beispiel #22
0
class SpaceOneScheduler(Scheduler):
    #: how often should we sync in schedule information
    #: from the backend mongo database
    UPDATE_INTERVAL = datetime.timedelta(seconds=5)

    Entry = SpaceOneScheduleEntry
    Service = None
    _metadata: dict = None

    @property
    def metadata(self):
        if self._metadata is None:
            token = config.get_global('CELERY',{}).get('auth',{}).get('token')
            if token:
                self._metadata = {'token': token, }
            else:
                self._metadata = {}
        return self._metadata

    def __init__(self, *args, **kwargs):
        self.transaction = Transaction()
        self.locator = Locator(self.transaction)
        if hasattr(current_app.conf, "spaceone_scheduler_service"):
            self.service_name = current_app.conf.get("spaceone_scheduler_service")
        else:
            raise SpaceOneSchedulerError("can not find CELERY.spaceone_scheduler_service config")

        self.Service = self.locator.get_service(self.service_name, metadata=self.metadata)
        self._schedule = {}
        self._last_updated = None

        Scheduler.__init__(self, *args, **kwargs)
        self.max_interval = (kwargs.get('max_interval')
                             or self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or 5)

    def setup_schedule(self):
        pass

    def requires_update(self):
        """check whether we should pull an updated schedule
        from the backend database"""
        if not self._last_updated:
            return True
        return self._last_updated + self.UPDATE_INTERVAL < datetime.datetime.now()

    def get_from_service(self):
        self.sync()
        d = {}
        schedules = self.Service.list()
        print(f"Find {len(schedules)} schedules")
        for task in schedules:
            d[task.schedule_id] = self.Entry(task)
        return d

    @property
    def schedule(self):
        if self.requires_update():
            self._schedule = self.get_from_service()
            self._last_updated = datetime.datetime.now()
        return self._schedule

    def sync(self):
        print('Writing entries...')
        values = self._schedule.values()
        for entry in values:
            entry.save(self.Service)

    @property
    def info(self):
        return f'    . service -> {self.service_name}'
Beispiel #23
0
class CleanupScheduler(HourlyScheduler):
    """ Clean-up unused plugins
    Some plugins are not used after upgrade
    Delete unused plugins, if last get_endpoint time is larger than limit
    """
    def __init__(self, queue, interval, minute=':00'):
        super().__init__(queue, interval, minute)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)

    def _init_count(self):
        cur = datetime.datetime.utcnow()
        count = {
            'previous': cur,
            'index': 0,
            'hour': cur.hour,
            'started_at': 0,
            'end_at': 0
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == '':
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def create_task(self):
        domains = self.list_domains()
        result = []
        for domain in domains:
            stp = self._create_job_request(domain)
            result.append(stp)
        return result

    def list_domains(self):
        try:
            ok = self.check_count()
            if ok == False:
                # ERROR LOGGING
                pass
            # Loop all domain, then find schedule
            metadata = {
                'token': self.TOKEN,
                'service': 'plugin',
                'resource': 'Supervisor',
                'verb': 'list_domains',
                'authorization': True,
                'mutation': True,
                'domain_id': self.domain_id
            }
            svc = self.locator.get_service('SupervisorService', metadata)
            params = {}
            resp = svc.list_domains(params)
            _LOGGER.debug(
                f'[list_domain] num of domains: {resp["total_count"]}')
            return resp['results']
        except Exception as e:
            _LOGGER.error(e)
            return []

    def check_count(self):
        # check current count is correct or not
        cur = datetime.datetime.utcnow()
        hour = cur.hour
        # check
        if (self.count['hour'] + self.config) % 24 != hour:
            if self.count['hour'] == hour:
                _LOGGER.error('[check_count] duplicated call in the same time')
            else:
                _LOGGER.error('[check_count] missing time')

        # This is continuous task
        count = {
            'previous': cur,
            'index': self.count['index'] + 1,
            'hour': hour,
            'started_at': cur
        }
        self.count.update(count)

    def _update_count_ended_at(self):
        cur = datetime.datetime.utcnow()
        self.count['ended_at'] = cur

    def _create_job_request(self, domain):
        """ Based on domain, create Job Request
        Returns:
            jobs: SpaceONE Pipeline Template
        """
        _LOGGER.debug(f'[_create_job_request] domain: {domain}')
        metadata = {
            'token': self.TOKEN,
            'service': 'plugin',
            'resource': 'Supervisor',
            'verb': 'cleanup_plugins',
            'authorization': True,
            'mutation': True,
            'domain_id': self.domain_id
        }
        sched_job = {
            'locator': 'SERVICE',
            'name': 'SupervisorService',
            'metadata': metadata,
            'method': 'cleanup_plugins',
            'params': {
                'params': {
                    'domain_id': domain['domain_id']
                }
            }
        }

        stp = {
            'name': 'cleanup_unused_plugins',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [sched_job]
        }
        _LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
class InventoryIntervalScheduler(IntervalScheduler):
    def __init__(self, queue, interval):
        super().__init__(queue, interval)
        self.count = self._init_count()
        self.locator = Locator()
        self.TOKEN = self._update_token()
        self.domain_id = _get_domain_id_from_token(self.TOKEN)
        self.schedule_info = {}
        self.idx = 0

    def run(self):
        # Every specific interval, check schedule
        schedule.every(self.config).seconds.do(self._check_interval)
        while True:
            schedule.run_pending()
            time.sleep(1)

    def _init_count(self):
        # get current time
        cur = datetime.datetime.now()
        count = {
            'previous': cur,  # Last check_count time
            'index': 0,  # index
            'hour': cur.hour,  # previous hour
            'started_at': 0,  # start time of push_token
            'ended_at': 0  # end time of execution in this tick
        }
        _LOGGER.debug(f'[_init_count] {count}')
        return count

    def _update_token(self):
        token = config.get_global('TOKEN')
        if token == "":
            token = _validate_token(config.get_global('TOKEN_INFO'))
        return token

    def push_task(self, schedule_id, interval_info):
        # schedule_id: schedule_2222
        # interval_info: {integval: 30, domain_id: dom-2222, collector_id: collector-3333}
        task = self._create_job_request(schedule_id, interval_info)
        _LOGGER.debug(f'[push_task] {task["name"]}')
        try:
            validate(task, schema=SPACEONE_TASK_SCHEMA)
            json_task = json.dumps(task)
            _LOGGER.debug(f'[push_task] Task schema: {task}')
            queue.put(self.queue, json_task)
        except Exception as e:
            print(e)
            _LOGGER.debug(f'[push_task] Task schema: {task}, {e}')

    def _check_interval(self):
        """ Check interval schedule
        Then run schedule
        """
        # patch schedule and sync
        # create new interval Schedule per schedule_id
        #interval_schedules = {schedule-1234: {interval: 20, domain_id: dom-1234, collector_id: collector-5678},
        #                      schedule_2222: {integval: 30, domain_id: dom-2222, collector_id: collector-3333}
        #                       }
        interval_schedules = self._get_interval_schedules()
        # Update Scheule based on DB
        schedule_ids = []
        for schedule_id, interval_info in interval_schedules.items():
            schedule_ids.append(schedule_id)
            try:
                interval_value = interval_info['interval']
                # Create New scheduler
                if schedule_id not in self.schedule_info:
                    _LOGGER.debug(
                        f'[_check_interval] create {schedule_id}, at every {interval_value}'
                    )
                    self.schedule_info[schedule_id] = interval_info
                    job = schedule.every(interval_value).seconds.do(
                        self.push_task, schedule_id, interval_info)
                    # Add tag for each job
                    job.tag(schedule_id)
                # Sync previous scheduler
                else:
                    previous_interval_info = self.schedule_info[schedule_id]
                    previous_interval = previous_interval_info['interval']
                    if interval_value != previous_interval:
                        _LOGGER.debug(
                            f'[_check_interval] modify {schedule_id} interval {previous_interval} to {interval_value}'
                        )
                        # delete job and re-create
                        schedule.default_scheduler.clear(tag=schedule_id)
                        job = schedule.every(interval_value).seconds.do(
                            self.push_task, schedule_id, interval_info)
                        job.tag(schedule_id)
                        # Update self.schedule_info
                        self.schedule_info[schedule_id] = interval_info
                    else:
                        _LOGGER.debug(
                            f'[_check_interval] continue {schedule_id}, at every {previous_interval}'
                        )
            except Exception as e:
                _LOGGER.error(f'[_check_interval] contact to developer {e}')
        # Delete garbage
        _LOGGER.debug(
            f'[_check_interval] gargabe collector: {len(schedule.default_scheduler.jobs) - 1}'
        )
        for job in schedule.default_scheduler.jobs:
            if job.tags == set():
                continue
            exist = False
            for schedule_id in schedule_ids:
                if schedule_id in job.tags:
                    print(f'exist: {schedule_id}')
                    exist = True
                    break

            if exist == False:
                # This Job is gargage
                _LOGGER.debug(f'[_check_interval] remove job: {job}')
                schedule.default_scheduler.cancel_job(job)

    def _get_interval_schedules(self):
        """ Find all interval schedules from inventory.Collector.schedule
        """
        schedule_vos = self._list_schedules()
        found_schedules = {}
        for schedule_vo in schedule_vos:
            try:
                interval_value = _get_interval_value(schedule_vo)
                found_schedules.update({
                    schedule_vo.schedule_id: {
                        'interval': interval_value,
                        'domain_id': schedule_vo.domain_id,
                        'collector_id': schedule_vo.collector.collector_id
                    }
                })
            except Exception as e:
                _LOGGER.error(f'[_get_interval_schedules] {e}')
        _LOGGER.debug(f'[_get_interval_schedules] found: {found_schedules}')
        return found_schedules

    def _list_schedules(self):
        try:
            # Loop all domain, then find scheduled collector
            collector_svc = self.locator.get_service('CollectorService')
            schedule = {'interval': None}
            schedule_vos, total = collector_svc.scheduled_collectors(
                {'schedule': schedule})
            return schedule_vos
        except Exception as e:
            _LOGGER.error(e)
            return []

    def _create_job_request(self, schedule_id, interval_info):
        """ Based on scheduler_vo, create Job Request

        Args:
            scheduler_vo: Scheduler VO
                - scheduler_id
                - name
                - collector: Reference of Collector
                - schedule
                - filter
                - collector_mode
                - created_at
                - last_scheduled_at
                - domain_id
                }

        Returns:
            jobs: SpaceONE Pipeline Template

        """
        _LOGGER.debug(f'[_create_job_request] {interval_info}')
        domain_id = interval_info['domain_id']
        metadata = {
            'token': self.TOKEN,
            'service': 'inventory',
            'resource': 'Collector',
            'verb': 'collect',
            'authorization': True,
            'domain_id': self.domain_id
        }
        sched_job = {
            'locator': 'SERVICE',
            'name': 'CollectorService',
            'metadata': metadata,
            'method': 'collect',
            'params': {
                'params': {
                    'collector_id': interval_info['collector_id'],
                    # if filter
                    # contact credential
                    'collect_mode': 'ALL',
                    'filter': {},
                    'domain_id': domain_id
                }
            }
        }
        stp = {
            'name': 'inventory_collect_by_interval_schedule',
            'version': 'v1',
            'executionEngine': 'BaseWorker',
            'stages': [sched_job]
        }
        #_LOGGER.debug(f'[_create_job_request] tasks: {stp}')
        return stp
Beispiel #25
0
 def __init__(self, queue, **kwargs):
     BaseWorker.__init__(self, queue, **kwargs)
     self.locator = Locator()
Beispiel #26
0
class BaseAPI(object):
    locator = Locator()

    def __init__(self):
        self._check_variables()
        self._set_grpc_method()

    @property
    def name(self):
        return self.__class__.__name__

    @property
    def pb2_grpc_module(self):
        return self.pb2_grpc

    @property
    def service_name(self):
        return self.pb2.DESCRIPTOR.services_by_name[
            self.__class__.__name__].full_name

    def _check_variables(self):
        if not hasattr(self, 'pb2'):
            raise Exception(
                f'gRPC Servicer has not set <pb2> variable. (servicer={self.__class__.__name__})'
            )

        if not hasattr(self, 'pb2_grpc'):
            raise Exception(
                f'gRPC Servicer has not set <pb2_grpc> variable. (servicer={self.__class__.__name__})'
            )

    def _get_grpc_servicer(self):
        grpc_servicer = None
        for base_class in self.__class__.__bases__:
            if base_class.__module__ == self.pb2_grpc.__name__:
                grpc_servicer = base_class

        if grpc_servicer is None:
            raise Exception(
                f'gRPC servicer is not set. (servicer={self.__class__.__name__})'
            )

        return grpc_servicer

    def _set_grpc_method(self):
        grpc_servicer = self._get_grpc_servicer()

        for f_name, f_object in inspect.getmembers(
                self.__class__, predicate=inspect.isfunction):
            if hasattr(grpc_servicer, f_name):
                setattr(self, f_name,
                        self._grpc_method(f_object, config.get_service()))

    @staticmethod
    def get_minimal(params):
        return params.get('query', {}).get('minimal', False)

    @staticmethod
    def _error_method(error, context):
        is_logging = False
        if not isinstance(error, ERROR_BASE):
            error = ERROR_UNKNOWN(message=error)
            is_logging = True
        elif error.meta.get('type') == 'service':
            is_logging = True

        if is_logging:
            _LOGGER.error(f'(Error) => {error.message} {error}',
                          extra={
                              'error_code': error.error_code,
                              'error_message': error.message,
                              'traceback': traceback.format_exc()
                          })

        details = f'{error.error_code}: {error.message}'
        context.abort(grpc.StatusCode[error.status_code], details)

    def _generate_response(self, response_iterator, context):
        try:
            for response in response_iterator:
                yield response

        except Exception as e:
            self._error_method(e, context)

    def _grpc_method(self, func, service_name):
        def wrapper(request_or_iterator, context):
            try:
                context.api_info = {
                    'service': service_name,
                    'resource': self.__class__.__name__,
                    'verb': func.__name__
                }

                response_or_iterator = func(self, request_or_iterator, context)

                if isinstance(response_or_iterator, types.GeneratorType):
                    return self._generate_response(response_or_iterator,
                                                   context)
                else:
                    return response_or_iterator

            except Exception as e:
                self._error_method(e, context)

        return wrapper

    @staticmethod
    def _convert_message(request):
        return MessageToDict(request, preserving_proto_field_name=True)

    @staticmethod
    def _get_metadata(context):
        metadata = {}
        for key, value in context.invocation_metadata():
            metadata[key.strip()] = value.strip()

        metadata.update(context.api_info)

        # TODO: This is experimental log. Please confirm peer information is useful on k8s.
        metadata.update({'peer': context.peer()})

        return metadata

    def _generate_message(self, request_iterator):
        for request in request_iterator:
            yield self._convert_message(request)

    def parse_request(self, request_or_iterator, context):
        if isinstance(request_or_iterator, collections.Iterable):
            return self._generate_message(
                request_or_iterator), self._get_metadata(context)
        else:
            return self._convert_message(
                request_or_iterator), self._get_metadata(context)
Beispiel #27
0
 def __init__(self):
     locator = Locator()
     self.actuator = locator.get_actuator('ServerInfo')
Beispiel #28
0
    with open(config_path, 'r') as f:
        _set_file_config(f)


def print_schedules(schedules: List[SpaceoneTaskData]):
    print(f"id | schedule | total_run_count | last_run")
    for sch in schedules:
        print(
            f"{sch.schedule_id} | {sch.schedule_info} | {sch.total_run_count} | {sch.last_run_at}"
        )
    print('\n\n')


if __name__ == '__main__':
    config_server()
    locator = Locator()
    svc = locator.get_service('ScheduleService')
    print('list all schedules')
    print_schedules(svc.list())

    print('add schedule')
    sch_name = f"test_sche_{randint(1, 1000)}"
    svc.add({
        'domain_id': "sample",
        'enabled': True,
        'task': 'spaceone.core.celery.tasks.test_task',
        'name': sch_name,
        'interval': {
            'period': 'seconds',
            'every': randint(6, 12)
        },