def gnocchi_is_enabled(self): print cfg.CONF.meter_dispatchers # debug by Tony pdb.set_trace() # debug by Tony if self._gnocchi_is_enabled is None: if cfg.CONF.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled elif ("gnocchi" not in cfg.CONF.meter_dispatchers or "database" in cfg.CONF.meter_dispatchers): self._gnocchi_is_enabled = False else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client()) catalog.url_for(service_type='metric') except exceptions.EndpointNotFound: self._gnocchi_is_enabled = False except exceptions.ClientException: LOG.warning( _LW("Can't connect to keystone, assuming " "gnocchi is disabled and retry later")) else: self._gnocchi_is_enabled = True LOG.warning( _LW("ceilometer-api started with gnocchi " "enabled. The resources/meters/samples " "URLs are disabled.")) return self._gnocchi_is_enabled
def gnocchi_is_enabled(self): if self._gnocchi_is_enabled is None: if pecan.request.cfg.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = ( pecan.request.cfg.api.gnocchi_is_enabled) elif ("gnocchi" not in pecan.request.cfg.meter_dispatchers or "database" in pecan.request.cfg.meter_dispatchers): self._gnocchi_is_enabled = False else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client(pecan.request.cfg)) catalog.url_for(service_type='metric') except exceptions.EndpointNotFound: self._gnocchi_is_enabled = False except exceptions.ClientException: LOG.warning("Can't connect to keystone, assuming " "gnocchi is disabled and retry later") else: self._gnocchi_is_enabled = True LOG.warning("ceilometer-api started with gnocchi " "enabled. The resources/meters/samples " "URLs are disabled.") return self._gnocchi_is_enabled
def keystone(self): # FIXME(sileht): This lazy loading of keystone client doesn't # look concurrently safe, we never see issue because once we have # connected to keystone everything is fine, and because all pollsters # are delayed during startup. But each polling task creates a new # client and overrides it which has been created by other polling # tasks. During this short time bad thing can occur. # # I think we must not reset keystone client before # running a polling task, but refresh it periodicaly instead. # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client() self._keystone_last_exception = None except ka_exceptions.ClientException as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception
def gnocchi_is_enabled(self): if self._gnocchi_is_enabled is None: if pecan.request.cfg.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = ( pecan.request.cfg.api.gnocchi_is_enabled) elif ("gnocchi" not in pecan.request.cfg.meter_dispatchers or "database" in pecan.request.cfg.meter_dispatchers): self._gnocchi_is_enabled = False else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client(pecan.request.cfg)) catalog.url_for(service_type='metric') except exceptions.EndpointNotFound: self._gnocchi_is_enabled = False except exceptions.ClientException: LOG.warning(_LW("Can't connect to keystone, assuming " "gnocchi is disabled and retry later")) else: self._gnocchi_is_enabled = True LOG.warning(_LW("ceilometer-api started with gnocchi " "enabled. The resources/meters/samples " "URLs are disabled.")) return self._gnocchi_is_enabled
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client(conf) self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache oslo_cache.configure(self.conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if self.conf.cache.enabled: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( self.conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning('unable to configure oslo_cache: %s', exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = gnocchi_client.get_gnocchiclient(conf) self._already_logged_event_types = set() self._already_logged_metric_names = set()
def gnocchi_is_enabled(self): if self._gnocchi_is_enabled is None: if cfg.CONF.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled elif ("gnocchi" not in cfg.CONF.dispatcher or "database" in cfg.CONF.dispatcher): self._gnocchi_is_enabled = False else: try: ks = keystone_client.get_client() ks.service_catalog.url_for(service_type='metric') except exceptions.EndpointNotFound: self._gnocchi_is_enabled = False except exceptions.ClientException: LOG.warn( _LW("Can't connect to keystone, assuming gnocchi " "is disabled and retry later")) else: self._gnocchi_is_enabled = True LOG.warn( _LW("ceilometer-api started with gnocchi " "enabled. The resources/meters/samples " "URLs are disabled.")) return self._gnocchi_is_enabled
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client() self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache oslo_cache.configure(self.conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if 'null' not in self.conf.cache.backend: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( self.conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warn(_LW('unable to configure oslo_cache: %s') % exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = threading.Lock() self._gnocchi = gnocchi_client.Client(conf.dispatcher_gnocchi.url)
def panko_url(self): if self._panko_url is None: if pecan.request.cfg.api.panko_is_enabled is False: self._panko_url = "" elif pecan.request.cfg.api.panko_url is not None: self._panko_url = self._normalize_url( pecan.request.cfg.api.panko_url) else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client(pecan.request.cfg)) self._panko_url = self._normalize_url( catalog.url_for(service_type='event')) except exceptions.EndpointNotFound: self._panko_url = "" except exceptions.ClientException: LOG.warning( _LW("Can't connect to keystone, assuming Panko " "is disabled and retry later.")) else: LOG.warning( _LW("ceilometer-api started with Panko " "enabled. Events URLs will be redirected " "to Panko endpoint.")) return self._panko_url
def aodh_url(self): if self._aodh_url is None: if cfg.CONF.api.aodh_is_enabled is False: self._aodh_url = "" elif cfg.CONF.api.aodh_url is not None: self._aodh_url = self._normalize_aodh_url( cfg.CONF.api.aodh_url) else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client()) self._aodh_url = self._normalize_aodh_url( catalog.url_for(service_type='alarming')) except exceptions.EndpointNotFound: self._aodh_url = "" except exceptions.ClientException: LOG.warning( _LW("Can't connect to keystone, assuming aodh " "is disabled and retry later.")) else: LOG.warning( _LW("ceilometer-api started with aodh " "enabled. Alarms URLs will be redirected " "to aodh endpoint.")) return self._aodh_url
def keystone(self): # FIXME(sileht): This lazy loading of keystone client doesn't # look concurrently safe, we never see issue because once we have # connected to keystone everything is fine, and because all pollsters # are delayed during startup. But each polling task creates a new # client and overrides it which has been created by other polling # tasks. During this short time bad thing can occur. # # I think we must not reset keystone client before # running a polling task, but refresh it periodically instead. # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client(self.conf) self._keystone_last_exception = None except ka_exceptions.ClientException as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client() self.resources_definition = self._load_resources_definitions(conf) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi = gnocchi_client.Client(conf.dispatcher_gnocchi.url)
def __init__(self, conf, parsed_url): super(GnocchiPublisher, self).__init__(conf, parsed_url) # TODO(jd) allow to override Gnocchi endpoint via the host in the URL options = urlparse.parse_qs(parsed_url.query) self.filter_project = options.get( 'filter_project', [conf.dispatcher_gnocchi.filter_project])[-1] resources_definition_file = options.get( 'resources_definition_file', [conf.dispatcher_gnocchi.resources_definition_file])[-1] archive_policy_override = options.get( 'archive_policy', [conf.dispatcher_gnocchi.archive_policy])[-1] self.resources_definition, self.archive_policies_definition = ( self._load_definitions(conf, archive_policy_override, resources_definition_file)) self.metric_map = dict((metric, rd) for rd in self.resources_definition for metric in rd.metrics) timeout = options.get('timeout', [conf.dispatcher_gnocchi.request_timeout])[-1] self._ks_client = keystone_client.get_client(conf) self.cache = None try: import oslo_cache oslo_cache.configure(conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if conf.cache.enabled: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning('unable to configure oslo_cache: %s', exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = gnocchi_client.get_gnocchiclient( conf, request_timeout=timeout) self._already_logged_event_types = set() self._already_logged_metric_names = set() self._already_configured_archive_policies = False
def _get_aggregation_methods(): ks_client = keystone_client.get_client() gnocchi_url = cfg.CONF.alarms.gnocchi_url headers = {'Content-Type': "application/json", 'X-Auth-Token': ks_client.auth_token} try: r = requests.get("%s/v1/capabilities" % gnocchi_url, headers=headers) except requests.ConnectionError as e: raise GnocchiUnavailable(e) if r.status_code // 200 != 1: raise GnocchiUnavailable(r.text) return jsonutils.loads(r.text).get('aggregation_methods', [])
def _prepare_cache(endpoint, params, cache): if 'network.statistics.contrail' in cache: return cache['network.statistics.contrail'] data = { 'o_client': client.Client(endpoint), 'n_client': neutron_client.Client(), 'ks_client': keystone_client.get_client() } cache['network.statistics.contrail'] = data return data
def __init__(self, conf, parsed_url): super(GnocchiPublisher, self).__init__(conf, parsed_url) # TODO(jd) allow to override Gnocchi endpoint via the host in the URL options = urlparse.parse_qs(parsed_url.query) self.filter_project = options.get('filter_project', ['service'])[-1] self.filter_domain = options.get('filter_domain', ['Default'])[-1] resources_definition_file = options.get( 'resources_definition_file', ['gnocchi_resources.yaml'])[-1] archive_policy_override = options.get('archive_policy', [None])[-1] self.resources_definition, self.archive_policies_definition = ( self._load_definitions(conf, archive_policy_override, resources_definition_file)) self.metric_map = dict((metric, rd) for rd in self.resources_definition for metric in rd.metrics) timeout = options.get('timeout', [6.05])[-1] self._ks_client = keystone_client.get_client(conf) self.cache = None try: import oslo_cache oslo_cache.configure(conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if conf.cache.enabled: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning('unable to configure oslo_cache: %s', exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = gnocchi_client.get_gnocchiclient( conf, request_timeout=timeout) self._already_logged_event_types = set() self._already_logged_metric_names = set() self._already_configured_archive_policies = False
def keystone(self): # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client() self._keystone_last_exception = None except (ka_exceptions.ClientException, ks_exceptions.ClientException) as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception
def keystone(self): # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client() self._keystone_last_exception = None except ks_exceptions.ClientException as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = conf.dispatcher_gnocchi.filter_service_activity self._ks_client = keystone_client.get_client() self.gnocchi_url = conf.dispatcher_gnocchi.url self.gnocchi_archive_policy_default = conf.dispatcher_gnocchi.archive_policy self.gnocchi_archive_policy_data = self._load_archive_policy(conf) self.mgmr = stevedore.dispatch.DispatchExtensionManager( "ceilometer.dispatcher.resource", lambda x: True, invoke_on_load=True ) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_api = None self._gnocchi_api_lock = threading.Lock()
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client(conf) self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache oslo_cache.configure(self.conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if self.conf.cache.enabled: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( self.conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning(_LW('unable to configure oslo_cache: %s') % exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = gnocchi_client.get_gnocchiclient(conf) retries = conf.storage.max_retries @tenacity.retry( wait=tenacity.wait_fixed(conf.storage.retry_interval), stop=(tenacity.stop_after_attempt(retries) if retries >= 0 else tenacity.stop_never), reraise=True) def _get_connection(): self._gnocchi.capabilities.list() try: _get_connection() except Exception: LOG.error(_LE('Failed to connect to Gnocchi.')) raise
def validate_alarm(cls, alarm): super(AggregationMetricByResourcesLookupRule, cls).validate_alarm(alarm) rule = alarm.gnocchi_aggregation_by_resources_threshold_rule # check the query string is a valid json try: query = jsonutils.loads(rule.query) except ValueError: raise wsme.exc.InvalidInput('rule/query', rule.query) # Scope the alarm to the project id if needed auth_project = v2_utils.get_auth_project(alarm.project_id) if auth_project: rule.query = jsonutils.dumps({ "and": [{ "=": { "created_by_project_id": auth_project } }, query] }) # Delegate the query validation to gnocchi ks_client = keystone_client.get_client() request = { 'url': "%s/v1/aggregation/resource/%s/metric/%s" % (cfg.CONF.alarms.gnocchi_url, rule.resource_type, rule.metric), 'headers': { 'Content-Type': "application/json", 'X-Auth-Token': ks_client.auth_token }, 'params': { 'aggregation': rule.aggregation_method }, 'data': rule.query, } try: r = requests.post(**request) except requests.ConnectionError as e: raise GnocchiUnavailable(e) if r.status_code // 200 != 1: raise base.ClientSideError(r.content, status_code=r.status_code)
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client(conf) self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache oslo_cache.configure(self.conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if self.conf.cache.enabled: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( self.conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning(_LW('unable to configure oslo_cache: %s') % exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = gnocchi_client.get_gnocchiclient(conf) retries = conf.storage.max_retries @tenacity.retry(wait=tenacity.wait_fixed(conf.storage.retry_interval), stop=(tenacity.stop_after_attempt(retries) if retries >= 0 else tenacity.stop_never), reraise=True) def _get_connection(): self._gnocchi.capabilities.list() try: _get_connection() except Exception: LOG.error(_LE('Failed to connect to Gnocchi.')) raise
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client() self.gnocchi_url = conf.dispatcher_gnocchi.url self.gnocchi_archive_policy_default = ( conf.dispatcher_gnocchi.archive_policy) self.gnocchi_archive_policy_data = self._load_archive_policy(conf) self.mgmr = stevedore.dispatch.DispatchExtensionManager( 'ceilometer.dispatcher.resource', lambda x: True, invoke_on_load=True) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_api = None self._gnocchi_api_lock = threading.Lock()
def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = conf.dispatcher_gnocchi.filter_service_activity self._ks_client = keystone_client.get_client() self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache oslo_cache.configure(self.conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if "null" not in self.conf.cache.backend: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region(self.conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning(_LW("unable to configure oslo_cache: %s") % exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = get_gnocchiclient(conf) # Convert retry_interval secs to msecs for retry decorator retries = conf.storage.max_retries @retrying.retry( wait_fixed=conf.storage.retry_interval * 1000, stop_max_attempt_number=(retries if retries >= 0 else None) ) def _get_connection(): self._gnocchi.capabilities.list() try: _get_connection() except Exception: LOG.error(_LE("Failed to connect to Gnocchi.")) raise
def validate_alarm(cls, alarm): super(MetricOfResourceRule, cls).validate_alarm(alarm) rule = alarm.gnocchi_resources_threshold_rule ks_client = keystone_client.get_client() gnocchi_url = cfg.CONF.alarms.gnocchi_url headers = { 'Content-Type': "application/json", 'X-Auth-Token': ks_client.auth_token } try: r = requests.get( "%s/v1/resource/%s/%s" % (gnocchi_url, rule.resource_type, rule.resource_id), headers=headers) except requests.ConnectionError as e: raise GnocchiUnavailable(e) if r.status_code == 404: raise base.EntityNotFound('gnocchi resource', rule.resource_id) elif r.status_code // 200 != 1: raise base.ClientSideError(r.content, status_code=r.status_code)
def validate_alarm(cls, alarm): super(MetricOfResourceRule, cls).validate_alarm(alarm) rule = alarm.gnocchi_resources_threshold_rule ks_client = keystone_client.get_client() gnocchi_url = cfg.CONF.alarms.gnocchi_url headers = {'Content-Type': "application/json", 'X-Auth-Token': ks_client.auth_token} try: r = requests.get("%s/v1/resource/%s/%s" % ( gnocchi_url, rule.resource_type, rule.resource_id), headers=headers) except requests.ConnectionError as e: raise GnocchiUnavailable(e) if r.status_code == 404: raise base.EntityNotFound('gnocchi resource', rule.resource_id) elif r.status_code // 200 != 1: raise base.ClientSideError(r.content, status_code=r.status_code)
def _prepare_cache(self, endpoint, params, cache): if 'network.statistics.opendaylight_v2' in cache: return cache['network.statistics.opendaylight_v2'] data = {} odl_params = {} if 'auth' in params: odl_params['auth'] = params['auth'][0] if 'user' in params: odl_params['user'] = params['user'][0] if 'password' in params: odl_params['password'] = params['password'][0] cs = client.Client(self.conf, endpoint, odl_params) if not self.admin_project_id: try: ks_client = keystone_client.get_client(self.conf) project = ks_client.projects.find(name='admin') if project: self.admin_project_id = project.id except Exception: LOG.exception('Unable to fetch admin tenant id') cache['network.statistics.opendaylight_v2'] = data return data try: # get switch statistics data['switch'] = cs.switch_statistics.get_statistics() data['admin_tenant_id'] = self.admin_project_id except client.OpenDaylightRESTAPIFailed: LOG.exception('OpenDaylight REST API Failed. ') except Exception: LOG.exception('Failed to connect to OpenDaylight' ' REST API') cache['network.statistics.opendaylight_v2'] = data return data
def aodh_url(self): if self._aodh_url is None: if cfg.CONF.api.aodh_is_enabled is False: self._aodh_url = "" elif cfg.CONF.api.aodh_url is not None: self._aodh_url = self._normalize_aodh_url( cfg.CONF.api.aodh_url) else: try: ks = keystone_client.get_client() self._aodh_url = self._normalize_aodh_url( ks.service_catalog.url_for(service_type='alarming')) except exceptions.EndpointNotFound: self._aodh_url = "" except exceptions.ClientException: LOG.warn(_LW("Can't connect to keystone, " "assuming aodh is disabled and retry later.")) else: LOG.warn(_LW("ceilometer-api started with aodh enabled. " "Alarms URLs will be redirected to aodh " "endpoint.")) return self._aodh_url
def validate_alarm(cls, alarm): super(AggregationMetricByResourcesLookupRule, cls).validate_alarm(alarm) rule = alarm.gnocchi_aggregation_by_resources_threshold_rule # check the query string is a valid json try: query = jsonutils.loads(rule.query) except ValueError: raise wsme.exc.InvalidInput('rule/query', rule.query) # Scope the alarm to the project id if needed auth_project = v2_utils.get_auth_project(alarm.project_id) if auth_project: rule.query = jsonutils.dumps({ "and": [{"=": {"created_by_project_id": auth_project}}, query]}) # Delegate the query validation to gnocchi ks_client = keystone_client.get_client() request = { 'url': "%s/v1/aggregation/resource/%s/metric/%s" % ( cfg.CONF.alarms.gnocchi_url, rule.resource_type, rule.metric), 'headers': {'Content-Type': "application/json", 'X-Auth-Token': ks_client.auth_token}, 'params': {'aggregation': rule.aggregation_method}, 'data': rule.query, } try: r = requests.post(**request) except requests.ConnectionError as e: raise GnocchiUnavailable(e) if r.status_code // 200 != 1: raise base.ClientSideError(r.content, status_code=r.status_code)
def gnocchi_is_enabled(self): if self._gnocchi_is_enabled is None: if cfg.CONF.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled elif ("gnocchi" not in cfg.CONF.dispatcher or "database" in cfg.CONF.dispatcher): self._gnocchi_is_enabled = False else: try: ks = keystone_client.get_client() ks.service_catalog.url_for(service_type='metric') except exceptions.EndpointNotFound: self._gnocchi_is_enabled = False except exceptions.ClientException: LOG.warn(_LW("Can't connect to keystone, assuming gnocchi " "is disabled and retry later")) else: self._gnocchi_is_enabled = True LOG.warn(_LW("ceilometer-api started with gnocchi " "enabled. The resources/meters/samples " "URLs are disabled.")) return self._gnocchi_is_enabled
def aodh_url(self): if self._aodh_url is None: if pecan.request.cfg.api.aodh_is_enabled is False: self._aodh_url = "" elif pecan.request.cfg.api.aodh_url is not None: self._aodh_url = self._normalize_url( pecan.request.cfg.api.aodh_url) else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client(pecan.request.cfg)) self._aodh_url = self._normalize_url( catalog.url_for(service_type='alarming')) except exceptions.EndpointNotFound: self._aodh_url = "" except exceptions.ClientException: LOG.warning(_LW("Can't connect to keystone, assuming aodh " "is disabled and retry later.")) else: LOG.warning(_LW("ceilometer-api started with aodh " "enabled. Alarms URLs will be redirected " "to aodh endpoint.")) return self._aodh_url
def ks_client(self): if self._ks_client is None: self._ks_client = keystone_client.get_client() return self._ks_client
#!/usr/bin/env python from collections import defaultdict from hashlib import md5 import itertools import operator import threading import uuid from gnocchiclient import client from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import session as ka_session from oslo_config import cfg from oslo_log import log import requests import retrying import six from stevedore import extension from ceilometer import declarative from ceilometer import dispatcher from ceilometer.i18n import _, _LE, _LW from ceilometer import keystone_client from ceilometer import utils _ks_client = keystone_client.get_client() project = _ks_client.projects.find(name='gnocchi')
def interval_task(self, task): try: self.keystone = keystone_client.get_client() except Exception as e: self.keystone = e task.poll_and_notify()
def __init__(self, parsed_url): self.driver_endpoint_from_pipeline = parsed_url # database connection parameters self.dbaddress = cfg.CONF.influxdb.influxdb_addr self.dbport = cfg.CONF.influxdb.influxdb_port self.dbname = cfg.CONF.influxdb.influxdb_instance self.dbuser = cfg.CONF.influxdb.influxdb_user self.dbpass = cfg.CONF.influxdb.influxdb_pass self.retention_policy = cfg.CONF.influxdb.retention_policy self.verboselog = cfg.CONF.influxdb.verboselog self.mappings = cfg.CONF.influxdb.mappings self.mapping_data = {} # open mapping file with open(self.mappings, "r") as mapping_descriptor: self.mappingfile = json.loads(mapping_descriptor.read()) LOG.info( "[*] InfluxDB Publisher: Loaded Meters and Tag Mappings from config file [%s]." % self.mappings) # parse json... for entry in self.mappingfile: self.mapping_data[entry["name"]] = entry["values"] # this host self.hostname = gethostname() # compile additional tags self.additional_tags = { 'hypervisor_hostname': self.hostname, 'retention_policy': self.retention_policy } # set meter prefix if cfg.CONF.influxdb.append_hypervisor: self.meter_prefix = cfg.CONF.influxdb.metering_prefix + self.hostname else: self.meter_prefix = cfg.CONF.influxdb.metering_prefix # get keystone client instance self.identity = get_client() # get initial tenant list self.tenants = self.identity.projects.list() # at startup, register available tenants in in-memory database # subsequent queries either hit the in memory cache or need a new query to keystone for t in self.tenants: InfluxDBPublisherUtils.pushTenant(t.id, t.name) # create DB connection # sanity check on database parameters if not (network_utils.is_valid_ipv4(self.dbaddress) and network_utils.is_valid_port(self.dbport)): raise Exception("dbaddr:dbport validation error %s:%s" % (self.dbaddress, self.dbport)) try: self.dbconn = dbclient(self.dbaddress, self.dbport, self.dbuser, self.dbpass, self.dbname) except Exception as e: LOG.info(e) # OK init done LOG.info("[+] InfluxDB Publisher [%s] registered to [%s]" % (self.driver_endpoint_from_pipeline, self.dbaddress))
def __init__(self, url): self._gnocchi_url = url self._ks_client = keystone_client.get_client() self._session = GnocchiSession()
def interval_task(self, task): try: self.keystone = keystone_client.get_client() except Exception as e: self.keystone = e super(AgentManager, self).interval_task(task)