def list_metrics(self, tenant_id, region, name, dimensions, offset, limit, start_timestamp=None, end_timestamp=None): try: query = self._build_show_series_query(dimensions, name, tenant_id, region) query += " limit {}".format(limit + 1) if offset: query += ' offset {}'.format(int(offset) + 1) result = self.influxdb_client.query(query) json_metric_list = self._build_serie_metric_list(result, tenant_id, region, start_timestamp, end_timestamp, offset) return json_metric_list except InfluxDBClientError as ex: if str(ex).startswith(MEASUREMENT_NOT_FOUND_MSG): return [] else: LOG.exception(ex) raise exceptions.RepositoryException(ex) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_dimension_values(self, tenant_id, region, metric_name, dimension_name): try: if metric_name: rows = self.session.execute( self.dim_val_by_metric_stmt, [region, tenant_id, metric_name, dimension_name]) else: rows = self.session.execute( self.dim_val_stmt, [region, tenant_id, dimension_name]) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) json_dim_value_list = [] if not rows: return json_dim_value_list for row in rows: json_dim_value_list.append({u'dimension_value': row.value}) json_dim_value_list.sort(key=lambda x: x[u'dimension_value']) return json_dim_value_list
def list_dimension_names(self, tenant_id, region, metric_name): try: if metric_name: rows = self.session.execute(self.dim_name_by_metric_stmt, [region, tenant_id, metric_name]) ordered = True else: rows = self.session.execute(self.dim_name_stmt, [region, tenant_id]) ordered = False except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) if not rows: return [] json_dim_name_list = [{u'dimension_name': row.name} for row in rows] if not ordered: json_dim_name_list.sort(key=lambda x: x[u'dimension_name']) return json_dim_name_list
def __init__(self): try: super(SQLRepository, self).__init__() self.conf = cfg.CONF url = None if self.conf.mysql.database_name is not None: settings_db = (self.conf.mysql.username, self.conf.mysql.password, self.conf.mysql.hostname, self.conf.mysql.database_name) url = make_url("mysql+pymysql://%s:%s@%s/%s" % settings_db) else: if self.conf.database.url is not None: url = make_url(self.conf.database.url) else: database_conf = dict(self.conf.database) if 'url' in database_conf: del database_conf['url'] url = URL(**database_conf) from sqlalchemy import create_engine self._db_engine = create_engine(url, pool_recycle=3600) self.metadata = MetaData() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_dimension_names(self, tenant_id, region, metric_name): try: parms = [] query = self._build_select_metric_map_query( tenant_id, region, parms) name_clause = self._build_name_clause(metric_name, parms) query += name_clause stmt = SimpleStatement(query, fetch_size=2147483647) rows = self.cassandra_session.execute(stmt, parms) json_dim_name_list = [] for row in rows: metric_map = row.metric_map for name, value in metric_map.iteritems(): name = urllib.unquote_plus(name) dim_name = {u'dimension_name': name} if name != '__name__' and dim_name not in json_dim_name_list: json_dim_name_list.append(dim_name) return sorted(json_dim_name_list) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_dimension_names(self, tenant_id, region, metric_name, start_timestamp=None, end_timestamp=None): if start_timestamp or end_timestamp: # NOTE(brtknr): For more details, see story # https://storyboard.openstack.org/#!/story/2006204 LOG.info("Scoping by timestamp not implemented for cassandra.") try: if metric_name: rows = self.session.execute(self.dim_name_by_metric_stmt, [region, tenant_id, metric_name]) ordered = True else: rows = self.session.execute(self.dim_name_stmt, [region, tenant_id]) ordered = False except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) if not rows: return [] json_dim_name_list = [{u'dimension_name': row.name} for row in rows] if not ordered: json_dim_name_list.sort(key=lambda x: x[u'dimension_name']) return json_dim_name_list
def list_metric_names(self, tenant_id, region, dimensions, offset, limit): try: select_stmt = """ select metric_hash, metric_map from metric_map where tenant_id = %s and region = %s """ parms = [tenant_id.encode('utf8'), region.encode('utf8')] dimension_clause = self._build_dimensions_clause(dimensions, parms) select_stmt += dimension_clause if offset: select_stmt += ' and metric_hash > %s ' parms.append(bytearray(offset.decode('hex'))) if limit: select_stmt += ' limit %s ' parms.append(limit + 1) select_stmt += ' allow filtering' json_name_list = [] stmt = SimpleStatement(select_stmt, fetch_size=2147483647) rows = self.cassandra_session.execute(stmt, parms) if not rows: return json_name_list for (metric_hash, metric_map) in rows: metric = {} for name, value in metric_map.iteritems(): if name == '__name__': name = urllib.unquote_plus(value) metric[u'name'] = name break metric[u'id'] = binascii.hexlify(bytearray(metric_hash)) json_name_list.append(metric) return json_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: self.conf = cfg.CONF except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_metric_names(self, tenant_id, region, dimensions, offset, limit): try: json_name_list = [] return json_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: self._cassandra_cluster = Cluster( CONF.cassandra.cluster_ip_addresses) self.cassandra_session = self._cassandra_cluster.connect( CONF.cassandra.keyspace) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_dimension_names(self, tenant_id, region, metric_name): try: query = self._build_show_tag_keys_query(metric_name, tenant_id, region) result = self.influxdb_client.query(query) json_dim_name_list = self._build_serie_dimension_names(result) return json_dim_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: super(AlarmsCount, self).__init__() self._region = cfg.CONF.region self._alarms_repo = simport.load( cfg.CONF.repositories.alarms_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: super(SQLRepository, self).__init__() self.conf = CONF self._db_engine = get_engine() self.metadata = sqlalchemy.MetaData() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def _build_serie_metric_list(self, *args, **kwargs): if self._version: f = self._serie_builders_version_map[self._version][1] return f(*args, **kwargs) else: self._init_version() if self._version: f = self._serie_builders_version_map[self._version][1] return f(*args, **kwargs) LOG.error('influxdb is not available, giving up') raise exceptions.RepositoryException('Repository not available')
def measurement_list(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag): try: json_measurement_list = [] return json_measurement_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: super(AlarmsCount, self).__init__() self._region = cfg.CONF.region self._default_authorized_roles = ( cfg.CONF.security.default_authorized_roles) self._alarms_repo = simport.load( cfg.CONF.repositories.alarms_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def alarm_history(self, tenant_id, alarm_id_list, offset, limit, start_timestamp=None, end_timestamp=None): try: json_alarm_history_list = [] return json_alarm_history_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: self.conf = cfg.CONF self.influxdb_client = client.InfluxDBClient( self.conf.influxdb.ip_address, self.conf.influxdb.port, self.conf.influxdb.user, self.conf.influxdb.password, self.conf.influxdb.database_name) self._init_serie_builders() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: self.conf = cfg.CONF if self.conf.cassandra.user: auth_provider = PlainTextAuthProvider( username=self.conf.cassandra.user, password=self.conf.cassandra.password) else: auth_provider = None self.cluster = Cluster( self.conf.cassandra.contact_points, port=self.conf.cassandra.port, auth_provider=auth_provider, connect_timeout=self.conf.cassandra.connection_timeout, load_balancing_policy=TokenAwarePolicy( DCAwareRoundRobinPolicy( local_dc=self.conf.cassandra.local_data_center))) self.session = self.cluster.connect(self.conf.cassandra.keyspace) self.dim_val_by_metric_stmt = self.session.prepare( DIMENSION_VALUE_BY_METRIC_CQL) self.dim_val_stmt = self.session.prepare(DIMENSION_VALUE_CQL) self.dim_name_by_metric_stmt = self.session.prepare( DIMENSION_NAME_BY_METRIC_CQL) self.dim_name_stmt = self.session.prepare(DIMENSION_NAME_CQL) self.metric_name_by_dimension_stmt = self.session.prepare( METRIC_NAME_BY_DIMENSION_CQL) self.metric_name_by_dimension_offset_stmt = self.session.prepare( METRIC_NAME_BY_DIMENSION_OFFSET_CQL) self.metric_name_stmt = self.session.prepare(METRIC_NAME_CQL) self.metric_name_offset_stmt = self.session.prepare( METRIC_NAME_OFFSET_CQL) self.metric_by_id_stmt = self.session.prepare(METRIC_BY_ID_CQL) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) self.epoch = datetime.utcfromtimestamp(0)
def __init__(self): try: super(MySQLRepository, self).__init__() self.conf = cfg.CONF self.database_name = self.conf.mysql.database_name self.database_server = self.conf.mysql.hostname self.database_uid = self.conf.mysql.username self.database_pwd = self.conf.mysql.password except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def try_it(*args, **kwargs): try: return fun(*args, **kwargs) except exceptions.DoesNotExistException: raise except exceptions.InvalidUpdateException: raise except exceptions.AlreadyExistsException: raise except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_metric_names(self, tenant_id, region, dimensions): try: query = self._build_show_measurements_query(dimensions, None, tenant_id, region) result = self.influxdb_client.query(query) json_name_list = self._build_measurement_name_list(result) return json_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: super(AlarmsStateHistory, self).__init__() self._region = cfg.CONF.region self._get_alarms_authorized_roles = ( cfg.CONF.security.default_authorized_roles + cfg.CONF.security.read_only_authorized_roles) self._alarms_repo = simport.load( cfg.CONF.repositories.alarms_driver)() self._metrics_repo = simport.load( cfg.CONF.repositories.metrics_driver)() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_dimension_names(self, tenant_id, region, metric_name, start_timestamp=None, end_timestamp=None): try: query = self._build_show_tag_keys_query(metric_name, tenant_id, region, start_timestamp, end_timestamp) result = self.query_tenant_db(query, tenant_id) json_dim_name_list = self._build_serie_dimension_names(result) return json_dim_name_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def list_dimension_values(self, tenant_id, region, metric_name, dimension_name, offset, limit): try: query = self._build_show_series_query(None, metric_name, tenant_id, region) result = self.influxdb_client.query(query) json_dim_vals = self._build_serie_dimension_values( result, metric_name, dimension_name, tenant_id, region, offset) return json_dim_vals except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def _list_metric_names_single_dimension_value(self, tenant_id, region, dimensions, offset=None): try: futures = [] if dimensions: for name, value in dimensions.items(): if offset: futures.append( self.session.execute_async( self.metric_name_by_dimension_offset_stmt, [region, tenant_id, name, value, offset])) else: futures.append( self.session.execute_async( self.metric_name_by_dimension_stmt, [region, tenant_id, name, value])) else: if offset: futures.append( self.session.execute_async( self.metric_name_offset_stmt, [region, tenant_id, offset])) else: futures.append( self.session.execute_async(self.metric_name_stmt, [region, tenant_id])) names_list = [] for future in futures: rows = future.result() tmp = set() for row in rows: tmp.add(row.metric_name) names_list.append(tmp) return [{u'name': v} for v in set.intersection(*names_list)] except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def __init__(self): try: self.conf = cfg.CONF self.influxdb_client = client.InfluxDBClient( self.conf.influxdb.ip_address, self.conf.influxdb.port, self.conf.influxdb.user, self.conf.influxdb.password) self._version = None self._init_version() except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex) self._serie_builders_version_map = { 'from_0.11.0': (self._build_serie_dimension_values_from_v0_11_0, self._build_serie_metric_list_from_v0_11_0) }
def list_dimension_values(self, tenant_id, region, metric_name, dimension_name): try: parms = [] query = self._build_select_metric_map_query( tenant_id, region, parms) name_clause = self._build_name_clause(metric_name, parms) dimensions = {dimension_name: None} dimension_clause = self._build_dimensions_clause(dimensions, parms) query += name_clause + dimension_clause query += ' allow filtering ' stmt = SimpleStatement(query, fetch_size=2147483647) rows = self.cassandra_session.execute(stmt, parms) json_dim_value_list = [] if not rows: return json_dim_value_list for row in rows: metric_map = row.metric_map for name, value in metric_map.items(): name = urllib.unquote_plus(name) value = urllib.unquote_plus(value) dim_value = {u'dimension_value': value} if name == dimension_name and dim_value not in json_dim_value_list: json_dim_value_list.append(dim_value) return sorted(json_dim_value_list) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def _insert_into_alarm_action(self, conn, alarm_definition_id, actions, alarm_state): if actions is None: return for action in actions: row = conn.execute(self.select_nm_query, b_id=action.encode('utf8')).fetchone() if row is None: raise exceptions.RepositoryException( "Non-existent notification id {} submitted for {} " "notification action".format(action.encode('utf8'), alarm_state.encode('utf8'))) conn.execute(self.insert_aa_query, b_alarm_definition_id=alarm_definition_id, b_alarm_state=alarm_state.encode('utf8'), b_action_id=action.encode('utf8'))
def measurement_list(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag): try: json_measurement_list = [] rows = self._get_measurements(tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag) if not rows: return json_measurement_list if not merge_metrics_flag: dimensions = self._get_dimensions(tenant_id, region, name, dimensions) measurements_list = ([[ self._isotime_msec(time_stamp), value, rest_utils.from_json(value_meta) if value_meta else {} ] for (time_stamp, value, value_meta) in rows]) measurement = { u'name': name, # The last date in the measurements list. u'id': measurements_list[-1][0], u'dimensions': dimensions, u'columns': [u'timestamp', u'value', u'value_meta'], u'measurements': measurements_list } json_measurement_list.append(measurement) return json_measurement_list except exceptions.RepositoryException as ex: LOG.exception(ex) raise ex except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)