def _get_secret_data(self, secret_id, domain_id): """ Return secret data """ root_token = config.get_global('ROOT_TOKEN') root_token_info = config.get_global('ROOT_TOKEN_INFO') root_domain_id = domain_id if root_token != "": root_domain_id = self._get_domain_id_from_token(root_token) _LOGGER.debug( f'[_get_secret_data] root_domain_id: {root_domain_id} vs domain_id: {domain_id}' ) elif root_token_info: # Patch from Consul _LOGGER.debug(f'[_get_secret_data] Patch root_token from Consul') root_token = _validate_token(root_token_info) root_domain_id = self._get_domain_id_from_token(root_token) else: _LOGGER.warn( f'[_get_secret_data] root_token is not configured, may be your are root' ) root_token = self.transaction.get_meta('token') secret_connector: SpaceConnector = self.locator.get_connector( 'SpaceConnector', service='secret', token=root_token) secret_data = secret_connector.dispatch('Secret.get_data', { 'secret_id': secret_id, 'domain_id': root_domain_id }) return secret_data['data']
def __init__(self, transaction): super().__init__(transaction) # container API backend self.backend = config.get_global('BACKEND') connectors_conf = config.get_global('CONNECTORS') plugin_conf = connectors_conf[self.backend] self.port_range = (plugin_conf['start_port'], plugin_conf['end_port'])
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) connector = config.get_global('CONNECTORS') backend = config.get_global('BACKEND', 'AWSSecretManagerConnector') try: _LOGGER.debug(f'[SecretConnectorManager] Create {backend}') self.secret_conn = self.locator.get_connector(backend) except Exception as e: _LOGGER.error( f'[SecretConnectorManager] not defined backend {backend}') raise ERROR_DEFINE_SECRET_BACKEND(backend=backend)
def _get_registry_url(): """ Get a registry_url from RegistryConnector Config """ try: connector_conf = config.get_global("CONNECTORS") # ex) 'https://registry.hub.docker.com' reg_con = connector_conf['RegistryConnector']['host'] item = reg_con.split('://') return item[1] except Exception as e: raise ERROR_CONFIGURATION(key='CONNECTORS.RegistryConnector') _LOGGER.error('No RegistryConnector.host:%s' % config.get_global())
def get_token(name='TOKEN'): try: token = config.get_global(name) except Exception as e: _LOGGER.error(f'[get_token] config error: {name}') raise ERROR_CONFIGURATION(key=name) if token == "": try: token_info = config.get_global(f'{name}_INFO') except Exception as e: _LOGGER.error(f'[get_token] config error: {name}_INFO') raise ERROR_CONFIGURATION(key=name) token = _validate_token(token_info) return token
def __init__(self, metadata: dict = {}, transaction: Transaction = None, **kwargs): self.func_name = None self.is_with_statement = False if transaction: self.transaction = transaction else: self.transaction = Transaction(metadata) if config.get_global('SET_LOGGING', True): set_logger(transaction=self.transaction) self.locator = Locator(self.transaction) self.handler = { 'authentication': { 'handlers': [], 'methods': [] }, 'authorization': { 'handlers': [], 'methods': [] }, 'mutation': { 'handlers': [], 'methods': [] }, 'event': { 'handlers': [], 'methods': [] }, }
def serve(): conf = config.get_global() # Enable logging configuration if conf.get('SET_LOGGING', True): set_logger() server_interceptor = _ServerInterceptor() server = grpc.server( futures.ThreadPoolExecutor(max_workers=conf['MAX_WORKERS']), interceptors=(server_interceptor,), # options=_get_grpc_options(conf) ) server, service_names = _init_services(server) service_names_str = '\n\t - '.join(service_names) _LOGGER.debug(f'Loaded Services: \n\t - {service_names_str}') reflection.enable_server_reflection(service_names, server) server.add_insecure_port(f'[::]:{conf["PORT"]}') _LOGGER.info(f'Start gRPC Server ({config.get_service()}): ' f'port={conf["PORT"]}, max_workers={conf["MAX_WORKERS"]}') server.start() server.wait_for_termination()
def delete_resources(self, params): """ Args: params (dict): { 'options': 'dict', 'domain_id': 'str' } Based on domain's delete policy, delete resources Returns: None """ domain_id = params['domain_id'] # Get Delete Policy of domain # TODO: from domain config # policies = self._get_domain_config(state, domain_id) policies = config.get_global('DEFAULT_DELETE_POLICIES', {}) _LOGGER.debug(f'[delete_resources] {policies}') cleanup_mgr: CleanupManager = self.locator.get_manager('CleanupManager') for resource_type, hour in policies.items(): try: _LOGGER.debug(f'[delete_resources] {resource_type}, {hour}, {domain_id}') deleted_count = cleanup_mgr.delete_resources_by_policy(resource_type, hour, domain_id) _LOGGER.debug(f'[delete_resources] number of deleted count: {deleted_count}') # TODO: event notification except Exception as e: _LOGGER.error(f'[delete_resources] {e}')
def _load_conf(self): identity_conf = config.get_global('IDENTITY') or {} token_conf = identity_conf.get('token', {}) self.CONST_TOKEN_TIMEOUT = token_conf.get('token_timeout', 1800) self.CONST_REFRESH_TIMEOUT = token_conf.get('refresh_timeout', 3600) self.CONST_REFRESH_TTL = token_conf.get('refresh_ttl', -1) self.CONST_REFRESH_ONCE = token_conf.get('refresh_once', True)
def api_app(): conf = config.get_global() package = conf['PACKAGE'] rest_route_module = __import__(f'{package}.interface.rest.router', fromlist=['router']) return getattr(rest_route_module, 'app', {})
def initialize(self): _LOGGER.debug(f'[initialize] initialize Worker configuration') queues = config.get_global('QUEUES', {}) if DB_QUEUE_NAME in queues: self.use_db_queue = True _LOGGER.debug(f'[initialize] use db_queue: {self.use_db_queue}')
def _initialize_data_sources(self, domain_id): _LOGGER.debug(f'[_initialize_data_source] domain_id: {domain_id}') query = {'filter': [{'k': 'domain_id', 'v': domain_id, 'o': 'eq'}]} data_source_vos, total_count = self.data_source_mgr.list_data_sources( query) installed_data_sources_ids = [ data_source_vo.plugin_info.plugin_id for data_source_vo in data_source_vos ] _LOGGER.debug( f'[_initialize_data_source] Installed Plugins : {installed_data_sources_ids}' ) global_conf = config.get_global() for _data_source in global_conf.get('INSTALLED_DATA_SOURCE_PLUGINS', []): if _data_source['plugin_info'][ 'plugin_id'] not in installed_data_sources_ids: try: _LOGGER.debug( f'[_initialize_data_source] Create init data source: {_data_source["plugin_info"]["plugin_id"]}' ) _data_source['domain_id'] = domain_id self.register(_data_source) except Exception as e: _LOGGER.error(f'[_initialize_data_source] {e}') return True
def get_event_by_key(self, event_key, domain_id): same_event_time = config.get_global('SAME_EVENT_TIME', 600) same_event_datetime = datetime.utcnow() - timedelta( seconds=same_event_time) query = { 'filter': [{ 'k': 'event_key', 'v': event_key, 'o': 'eq' }, { 'k': 'domain_id', 'v': domain_id, 'o': 'eq' }, { 'k': 'event_type', 'v': 'RECOVERY', 'o': 'not' }, { 'k': 'created_at', 'v': same_event_datetime, 'o': 'gte' }], 'sort': { 'key': 'created_at', 'desc': True } } event_vos, total_count = self.list_events(query) if event_vos.count() > 0: return event_vos[0] else: return None
def _initialize_protocols(self, domain_id): _LOGGER.debug(f'[_initialize_protocol] domain_id: {domain_id}') query = {'filter': [{'k': 'domain_id', 'v': domain_id, 'o': 'eq'}]} protocol_vos, total_count = self.protocol_mgr.list_protocols(query) installed_protocol_ids = [ protocol_vo.plugin_info.plugin_id for protocol_vo in protocol_vos ] _LOGGER.debug( f'[_initialize_protocol] Installed Plugins : {installed_protocol_ids}' ) global_conf = config.get_global() for _protocol in global_conf.get('INSTALLED_PROTOCOL_PLUGINS', []): if _protocol['plugin_info'][ 'plugin_id'] not in installed_protocol_ids: try: _LOGGER.debug( f'[_initialize_protocol] Create init protocol: {_protocol["plugin_info"]["plugin_id"]}' ) _protocol['domain_id'] = domain_id self.create(_protocol) except Exception as e: _LOGGER.error(f'[_initialize_protocol] {e}') return True
def update_job_state(self, params): """ Args: params (dict): { 'options': 'dict', 'domain_id': 'str' } Based on domain's cleanup policy update job.state Returns: None """ domain_id = params['domain_id'] # Get Cleanup Policy of domain # TODO: from domain config job_timeout = config.get_global('JOB_TIMEOUT', 2) # hours policies = { 'inventory.Job': {'TIMEOUT': job_timeout} } job_mgr: JobManager = self.locator.get_manager('JobManager') for resource_type, policy in policies.items(): for status, hour in policy.items(): _LOGGER.debug(f'[update_job_state] {resource_type}, {hour}, {status}, {domain_id}') job_mgr.update_job_status_by_hour(hour, status, domain_id)
def _make_redirect_response(alert_id): console_domain = config.get_global('CONSOLE_DOMAIN') if console_domain.strip() != '': return RedirectResponse( f'{console_domain}/monitoring/alert-manager/alert/{alert_id}') else: return None
def __init__(self, queue, **kwargs): self._name_ = 'worker-%s' % randomString() self.queue = queue _LOGGER.debug(f'[BaseWorker] BaseWorker name : {self._name_}') _LOGGER.debug(f'[BaseWorker] BaseWorker queue : {self.queue}') self.global_config = config.get_global() super().__init__()
def on_after_setup_logger(**kwargs): if config.get_global('CELERY', {}).get('debug_mode'): logger = logging.getLogger('celery') logger.propagate = True logger.level = logging.DEBUG logger = logging.getLogger('celery.app.trace') logger.propagate = True logger.level = logging.DEBUG
def _get_queue_name(self, name='collect_queue'): """ Return queue """ try: return config.get_global(name) except Exception as e: _LOGGER.warning(f'[_get_queue_name] name: {name} is not configured') return None
def metadata(self): if self._metadata is None: token = config.get_global('CELERY',{}).get('auth',{}).get('token') if token: self._metadata = {'token': token, } else: self._metadata = {} return self._metadata
def check_global_configuration(self): try: self.name = config.get_global('NAME') self.hostname = config.get_global('HOSTNAME') self.tags = config.get_global('TAGS', {}) self.labels = config.get_global('LABELS', {}) self.plugin_config = config.get_global('PLUGIN') self.token = config.get_global('TOKEN') if self.token == "": self.token = _validate_token(config.get_global('TOKEN_INFO')) if self.token == "": _LOGGER.error("TOKEN is not configured") raise ERROR_CONFIGURATION(key='TOKEN') if self.name == "": _LOGGER.error("name is not configured!") raise ERROR_CONFIGURATION(key='NAME') if self.hostname == "": _LOGGER.error("hostname is not configured!") raise ERROR_CONFIGURATION(key='HOSTNAME') self.domain_id = _get_domain_id_from_token(self.token) return True except Exception as e: _LOGGER.error(f'[check_global_configuration] error: {e}') return False
def serve(): # Load scheduler config # Create Scheduler threads # start Scheduler set_logger() conf = config.get_global() server = Server(config.get_service(), conf) server.start()
def _create_connection(topic): global_conf = config.get_global() if 'QUEUES' not in global_conf: raise ERROR_CONFIGURATION(key='QUEUES') queue_conf = global_conf['QUEUES'][topic].copy() backend = queue_conf.pop('backend', None) module_name, class_name = backend.rsplit('.', 1) queue_module = __import__(module_name, fromlist=[class_name]) return getattr(queue_module, class_name)(queue_conf)
def connect(cls): db_alias = cls._meta.get('db_alias', 'default') if db_alias not in _MONGO_CONNECTIONS: global_conf = config.get_global() if db_alias not in global_conf.get('DATABASES', {}): raise ERROR_DB_CONFIGURATION(backend=db_alias) db_conf = global_conf['DATABASES'][db_alias].copy() register_connection(db_alias, **db_conf) _MONGO_CONNECTIONS.append(db_alias)
def serve(): # Load scheduler config # Create Scheduler threads # start Scheduler conf = config.get_global() # Enable logging configuration if conf.get('SET_LOGGING', True): set_logger() server = Server(config.get_service(), conf) server.start()
def register_beat_schedules(app): conf = config.get_global() schedules_config = conf.get('CELERY', {}).get('schedules', {}) for name, sch_info in schedules_config.items(): schedule = { "task": sch_info['task'], "schedule": parse_schedule(sch_info['rule_type'], sch_info['rule']) } if args := sch_info.get('args'): schedule['args'] = args if kwargs := sch_info.get('kwargs'): schedule['kwargs'] = kwargs
def _make_callback_url(self, alert_id, domain_id, access_key): def _rollback(alert_id, access_key): _LOGGER.info(f'[_make_callback_url._rollback] ' f'Delete cache : {alert_id} ' f'({access_key})') cache.delete(f'alert-notification-callback:{alert_id}:{access_key}') cache.set(f'alert-notification-callback:{alert_id}:{access_key}', domain_id, expire=600) self.transaction.add_rollback(_rollback, alert_id, access_key) webhook_domain = config.get_global('WEBHOOK_DOMAIN') return f'{webhook_domain}/monitoring/v1/alert/{alert_id}/{access_key}/ACKNOWLEDGED'
def init(cls): cls.connect() if cls not in _MONGO_INIT_MODELS: global_conf = config.get_global() cls.auto_create_index = global_conf.get( 'DATABASE_AUTO_CREATE_INDEX', True) cls.case_insensitive_index = global_conf.get( 'DATABASE_CASE_INSENSITIVE_INDEX', False) cls._create_index() _MONGO_INIT_MODELS.append(cls)
def _create_connection(backend): global_conf = config.get_global() if backend not in global_conf.get('CACHES', {}): raise ERROR_CACHE_CONFIGURATION(backend=backend) cache_conf = global_conf['CACHES'][backend].copy() backend = cache_conf.pop('backend', None) if backend is None: raise ERROR_CACHE_CONFIGURATION(backend=backend) module_name, class_name = backend.rsplit('.', 1) cache_module = __import__(module_name, fromlist=[class_name]) return getattr(cache_module, class_name)(backend, cache_conf)
def serve(): conf = config.get_global() # Enable logging configuration if conf.get('SET_LOGGING', True): set_logger() _LOGGER.info(f'Start REST Server ({config.get_service()}): ' f'host={conf["HOST"]} port={conf["PORT"]}') uvicorn.run('spaceone.core.fastapi.server:api_app', host=conf['HOST'], port=conf['PORT'], factory=True)