def __init__(self, influxdb_client): """ Initialize query client. :param influxdb_client: influxdb client """ self._influxdb_client = influxdb_client self._query_api = QueryService(influxdb_client.api_client)
class QueryApi(object): default_dialect = Dialect(header=True, delimiter=",", comment_prefix="#", annotations=["datatype", "group", "default"], date_time_format="RFC3339") def __init__(self, influxdb_client): """ Initializes query client. :param influxdb_client: influxdb client """ self._influxdb_client = influxdb_client self._query_api = QueryService(influxdb_client.api_client) def query_csv(self, query: str, org=None, dialect: Dialect = default_dialect): """ Executes the Flux query and return results as a CSV iterator. Each iteration returns a row of the CSV file. :param query: a Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param dialect: csv dialect format :return: The returned object is an iterator. Each iteration returns a row of the CSV file (which can span multiple input lines). """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, dialect), async_req=False, _preload_content=False) return csv.reader(codecs.iterdecode(response, 'utf-8')) def query_raw(self, query: str, org=None, dialect=default_dialect): """ Synchronously executes the Flux query and return result as raw unprocessed result as a str :param query: a Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param dialect: csv dialect format :return: str """ if org is None: org = self._influxdb_client.org result = self._query_api.post_query(org=org, query=self._create_query( query, dialect), async_req=False, _preload_content=False) return result def query(self, query: str, org=None) -> List['FluxTable']: """ Synchronously executes the Flux query and return result as a List['FluxTable'] :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :return: """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.tables) list(_parser.generator()) return _parser.tables def query_stream(self, query: str, org=None) -> Generator['FluxRecord', Any, None]: """ Synchronously executes the Flux query and return stream of FluxRecord as a Generator['FluxRecord'] :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :return: """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.stream) return _parser.generator() def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None): """ Synchronously executes the Flux query and return Pandas DataFrame. Note that if a query returns more then one table than the client generates a DataFrame for each of them. :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param data_frame_index: the list of columns that are used as DataFrame index :return: """ from ..extras import pd if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.dataFrame, data_frame_index=data_frame_index) _dataFrames = list(_parser.generator()) if len(_dataFrames) == 0: return pd.DataFrame(columns=[], index=None) elif len(_dataFrames) == 1: return _dataFrames[0] else: return _dataFrames # private helper for c @staticmethod def _create_query(query, dialect=default_dialect): created = Query(query=query, dialect=dialect) return created def __del__(self): pass
class QueryApi(object): """Implementation for '/api/v2/query' endpoint.""" default_dialect = Dialect(header=True, delimiter=",", comment_prefix="#", annotations=["datatype", "group", "default"], date_time_format="RFC3339") def __init__(self, influxdb_client): """ Initialize query client. :param influxdb_client: influxdb client """ self._influxdb_client = influxdb_client self._query_api = QueryService(influxdb_client.api_client) def query_csv(self, query: str, org=None, dialect: Dialect = default_dialect, params: dict = None): """ Execute the Flux query and return results as a CSV iterator. Each iteration returns a row of the CSV file. :param query: a Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param dialect: csv dialect format :param params: bind parameters :return: The returned object is an iterator. Each iteration returns a row of the CSV file (which can span multiple input lines). """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, dialect, params), async_req=False, _preload_content=False) return csv.reader(codecs.iterdecode(response, 'utf-8')) def query_raw(self, query: str, org=None, dialect=default_dialect, params: dict = None): """ Execute synchronous Flux query and return result as raw unprocessed result as a str. :param query: a Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param dialect: csv dialect format :param params: bind parameters :return: str """ if org is None: org = self._influxdb_client.org result = self._query_api.post_query(org=org, query=self._create_query( query, dialect, params), async_req=False, _preload_content=False) return result def query(self, query: str, org=None, params: dict = None) -> List['FluxTable']: """ Execute synchronous Flux query and return result as a List['FluxTable']. :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param params: bind parameters :return: """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect, params), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.tables) list(_parser.generator()) return _parser.tables def query_stream( self, query: str, org=None, params: dict = None) -> Generator['FluxRecord', Any, None]: """ Execute synchronous Flux query and return stream of FluxRecord as a Generator['FluxRecord']. :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param params: bind parameters :return: """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect, params), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.stream) return _parser.generator() def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None): """ Execute synchronous Flux query and return Pandas DataFrame. Note that if a query returns more then one table than the client generates a DataFrame for each of them. :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param data_frame_index: the list of columns that are used as DataFrame index :param params: bind parameters :return: """ from ..extras import pd _generator = self.query_data_frame_stream( query, org=org, data_frame_index=data_frame_index, params=params) _dataFrames = list(_generator) if len(_dataFrames) == 0: return pd.DataFrame(columns=[], index=None) elif len(_dataFrames) == 1: return _dataFrames[0] else: return _dataFrames def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None): """ Execute synchronous Flux query and return stream of Pandas DataFrame as a Generator['pd.DataFrame']. Note that if a query returns more then one table than the client generates a DataFrame for each of them. :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param data_frame_index: the list of columns that are used as DataFrame index :param params: bind parameters :return: """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect, params), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.dataFrame, data_frame_index=data_frame_index) return _parser.generator() # private helper for c @staticmethod def _create_query(query, dialect=default_dialect, params: dict = None): created = Query(query=query, dialect=dialect, extern=QueryApi._build_flux_ast(params)) return created @staticmethod def _params_to_extern_ast(params: dict) -> List['OptionStatement']: statements = [] for key, value in params.items(): if value is None: continue if isinstance(value, bool): literal = BooleanLiteral("BooleanLiteral", value) elif isinstance(value, int): literal = IntegerLiteral("IntegerLiteral", str(value)) elif isinstance(value, float): literal = FloatLiteral("FloatLiteral", value) elif isinstance(value, datetime): value = get_date_helper().to_utc(value) literal = DateTimeLiteral( "DateTimeLiteral", value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) elif isinstance(value, timedelta): # convert to microsecodns _micro_delta = int(value / timedelta(microseconds=1)) if _micro_delta < 0: literal = UnaryExpression( "UnaryExpression", argument=DurationLiteral( "DurationLiteral", [Duration(magnitude=-_micro_delta, unit="us")]), operator="-") else: literal = DurationLiteral( "DurationLiteral", [Duration(magnitude=_micro_delta, unit="us")]) elif isinstance(value, str): literal = StringLiteral("StringLiteral", str(value)) else: literal = value statements.append( OptionStatement( "OptionStatement", VariableAssignment("VariableAssignment", Identifier("Identifier", key), literal))) return statements @staticmethod def _build_flux_ast(params: dict = None): if params is None: return None return File(package=None, name=None, type=None, imports=[], body=QueryApi._params_to_extern_ast(params)) def __del__(self): """Close QueryAPI.""" pass
class QueryApi(object): """Implementation for '/api/v2/query' endpoint.""" default_dialect = Dialect(header=True, delimiter=",", comment_prefix="#", annotations=["datatype", "group", "default"], date_time_format="RFC3339") def __init__(self, influxdb_client, query_options=QueryOptions()): """ Initialize query client. :param influxdb_client: influxdb client """ self._influxdb_client = influxdb_client self._query_options = query_options self._query_api = QueryService(influxdb_client.api_client) def query_csv(self, query: str, org=None, dialect: Dialect = default_dialect, params: dict = None): """ Execute the Flux query and return results as a CSV iterator. Each iteration returns a row of the CSV file. :param query: a Flux query :param str, Organization org: specifies the organization for executing the query; take the ID, Name or Organization; if it's not specified then is used default from client.org. :param dialect: csv dialect format :param params: bind parameters :return: The returned object is an iterator. Each iteration returns a row of the CSV file (which can span multiple input lines). """ org = self._org_param(org) response = self._query_api.post_query(org=org, query=self._create_query( query, dialect, params), async_req=False, _preload_content=False) return csv.reader(codecs.iterdecode(response, 'utf-8')) def query_raw(self, query: str, org=None, dialect=default_dialect, params: dict = None): """ Execute synchronous Flux query and return result as raw unprocessed result as a str. :param query: a Flux query :param str, Organization org: specifies the organization for executing the query; take the ID, Name or Organization; if it's not specified then is used default from client.org. :param dialect: csv dialect format :param params: bind parameters :return: str """ org = self._org_param(org) result = self._query_api.post_query(org=org, query=self._create_query( query, dialect, params), async_req=False, _preload_content=False) return result def query(self, query: str, org=None, params: dict = None) -> List['FluxTable']: """ Execute synchronous Flux query and return result as a List['FluxTable']. :param query: the Flux query :param str, Organization org: specifies the organization for executing the query; take the ID, Name or Organization; if it's not specified then is used default from client.org. :param params: bind parameters :return: """ org = self._org_param(org) response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect, params), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.tables, query_options=self._get_query_options()) list(_parser.generator()) return _parser.table_list() def query_stream( self, query: str, org=None, params: dict = None) -> Generator['FluxRecord', Any, None]: """ Execute synchronous Flux query and return stream of FluxRecord as a Generator['FluxRecord']. :param query: the Flux query :param str, Organization org: specifies the organization for executing the query; take the ID, Name or Organization; if it's not specified then is used default from client.org. :param params: bind parameters :return: """ org = self._org_param(org) response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect, params), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.stream, query_options=self._get_query_options()) return _parser.generator() def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None): """ Execute synchronous Flux query and return Pandas DataFrame. Note that if a query returns tables with differing schemas than the client generates a DataFrame for each of them. :param query: the Flux query :param str, Organization org: specifies the organization for executing the query; take the ID, Name or Organization; if it's not specified then is used default from client.org. :param data_frame_index: the list of columns that are used as DataFrame index :param params: bind parameters :return: """ from ..extras import pd _generator = self.query_data_frame_stream( query, org=org, data_frame_index=data_frame_index, params=params) _dataFrames = list(_generator) if len(_dataFrames) == 0: return pd.DataFrame(columns=[], index=None) elif len(_dataFrames) == 1: return _dataFrames[0] else: return _dataFrames def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None): """ Execute synchronous Flux query and return stream of Pandas DataFrame as a Generator['pd.DataFrame']. Note that if a query returns tables with differing schemas than the client generates a DataFrame for each of them. :param query: the Flux query :param str, Organization org: specifies the organization for executing the query; take the ID, Name or Organization; if it's not specified then is used default from client.org. :param data_frame_index: the list of columns that are used as DataFrame index :param params: bind parameters :return: """ org = self._org_param(org) response = self._query_api.post_query(org=org, query=self._create_query( query, self.default_dialect, params), async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.dataFrame, data_frame_index=data_frame_index, query_options=self._get_query_options()) return _parser.generator() def _get_query_options(self): if self._query_options and self._query_options.profilers: return self._query_options elif self._influxdb_client.profilers: return QueryOptions(profilers=self._influxdb_client.profilers) def _create_query(self, query, dialect=default_dialect, params: dict = None): query_options = self._get_query_options() profilers = query_options.profilers if query_options is not None else None q = Query(query=query, dialect=dialect, extern=QueryApi._build_flux_ast(params, profilers)) if profilers: print("\n===============") print("Profiler: query") print("===============") print(query) return q def _org_param(self, org): return get_org_query_param(org=org, client=self._influxdb_client) @staticmethod def _params_to_extern_ast(params: dict) -> List['OptionStatement']: statements = [] for key, value in params.items(): expression = QueryApi._parm_to_extern_ast(value) if expression is None: continue statements.append( OptionStatement( "OptionStatement", VariableAssignment("VariableAssignment", Identifier("Identifier", key), expression))) return statements @staticmethod def _parm_to_extern_ast(value) -> Union[Expression, None]: if value is None: return None if isinstance(value, bool): return BooleanLiteral("BooleanLiteral", value) elif isinstance(value, int): return IntegerLiteral("IntegerLiteral", str(value)) elif isinstance(value, float): return FloatLiteral("FloatLiteral", value) elif isinstance(value, datetime): value = get_date_helper().to_utc(value) return DateTimeLiteral("DateTimeLiteral", value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')) elif isinstance(value, timedelta): _micro_delta = int(value / timedelta(microseconds=1)) if _micro_delta < 0: return UnaryExpression( "UnaryExpression", argument=DurationLiteral( "DurationLiteral", [Duration(magnitude=-_micro_delta, unit="us")]), operator="-") else: return DurationLiteral( "DurationLiteral", [Duration(magnitude=_micro_delta, unit="us")]) elif isinstance(value, str): return StringLiteral("StringLiteral", str(value)) elif isinstance(value, Iterable): return ArrayExpression( "ArrayExpression", elements=list( map(lambda it: QueryApi._parm_to_extern_ast(it), value))) else: return value @staticmethod def _build_flux_ast(params: dict = None, profilers: List[str] = None): imports = [] body = [] if profilers is not None and len(profilers) > 0: imports.append( ImportDeclaration("ImportDeclaration", path=StringLiteral("StringLiteral", "profiler"))) elements = [] for profiler in profilers: elements.append(StringLiteral("StringLiteral", value=profiler)) member = MemberExpression( "MemberExpression", object=Identifier("Identifier", "profiler"), _property=Identifier("Identifier", "enabledProfilers")) prof = OptionStatement("OptionStatement", assignment=MemberAssignment( "MemberAssignment", member=member, init=ArrayExpression( "ArrayExpression", elements=elements))) body.append(prof) if params is not None: body.extend(QueryApi._params_to_extern_ast(params)) return File(package=None, name=None, type=None, imports=imports, body=body) def __del__(self): """Close QueryAPI.""" pass
class QueryApi(object): default_dialect = Dialect(header=True, delimiter=",", comment_prefix="#", annotations=["datatype", "group", "default"], date_time_format="RFC3339") def __init__(self, influxdb_client): """ Initializes query client. :param influxdb_client: influxdb client """ self._influxdb_client = influxdb_client self._query_api = QueryService(influxdb_client.api_client) def query_csv(self, query: str, org=None, dialect: Dialect = default_dialect): """ Executes the Flux query and return results as a CSV iterator. Each iteration returns a row of the CSV file. :param query: a Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param dialect: csv dialect format :return: The returned object is an iterator. Each iteration returns a row of the CSV file (which can span multiple input lines). """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query(query, dialect), async_req=False, _preload_content=False) return csv.reader(codecs.iterdecode(response, 'utf-8')) def query_raw(self, query: str, org=None, dialect=default_dialect): """ Synchronously executes the Flux query and return result as raw unprocessed result as a str :param query: a Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param dialect: csv dialect format :return: str """ if org is None: org = self._influxdb_client.org result = self._query_api.post_query(org=org, query=self._create_query(query, dialect), async_req=False, _preload_content=False) return result # return codecs.iterdecode(result, 'utf-8') def query(self, query: str, org=None, dialect=default_dialect) -> List['FluxTable']: """ Synchronously executes the Flux query and return result as a List['FluxTable'] :param query: the Flux query :param org: organization name (optional if already specified in InfluxDBClient) :param dialect: csv dialect format :return: """ if org is None: org = self._influxdb_client.org response = self._query_api.post_query(org=org, query=self._create_query(query, dialect), async_req=False, _preload_content=False, _return_http_data_only=False) consumer = FluxResponseConsumerTable() parser = FluxCsvParser() parser.parse_flux_response(response=response, cancellable=None, consumer=consumer) return consumer.tables # private helper for c @staticmethod def _create_query(query, dialect=default_dialect): created = Query(query=query, dialect=dialect) return created def __del__(self): pass
def init_queries(self): self.queries = map(lambda q: q['text'], self.info.properties.get('queries', [])) self.query_api = self.client.query_api() self.query_service = QueryService(self.client.api_client)
class InfluxDBDashboardCell: def __init__(self, client, org_id, dashboard_id, cell_info): self._results = None self.client = client self.cell_info = cell_info self.org_id = org_id self.dashboard_id = dashboard_id self.cs = CellsService(self.client.api_client) self.init_cell() self.init_queries() self.init_axes() self.init_colors() self.init_params() def init_cell(self): self.id = self.cell_info.id self.x = self.cell_info.x self.y = self.cell_info.y self.w = self.cell_info.w self.h = self.cell_info.h self.yh = self.y + self.h self.info = self.cs.get_dashboards_id_cells_id_view( self.dashboard_id, self.id) self.type = self.info.properties.get('type', 'xy') self.value_prefix = self.info.properties.get('prefix', '') self.value_suffix = self.info.properties.get('suffix', '') self.name = self.info.name def init_queries(self): self.queries = map(lambda q: q['text'], self.info.properties.get('queries', [])) self.query_api = self.client.query_api() self.query_service = QueryService(self.client.api_client) def init_axes(self): self.x_column = self.info.properties.get('xColumn', '_time') if self.x_column == '': self.x_column = '_time' self.y_column = self.info.properties.get('yColumn', '_value') if self.y_column == '': self.y_column = '_value' self.x_axis = self.info.properties.get('axes', {}).get('x', {}) self.y_axis = self.info.properties.get('axes', {}).get('y', {}) self.shade_below = self.info.properties.get('shadeBelow', False) decimal_places = self.info.properties.get('decimalPlaces', {'isEnforced': False}) self.decimal_places = int( decimal_places['digits']) if decimal_places['isEnforced'] else None def init_colors(self): convert_color = lambda c: { **c, 'color': (int(c['hex'][1:3], 16), int(c['hex'][3:5], 16), int(c['hex'][5:7], 16)) } self.colors = list( map(convert_color, self.info.properties.get('colors', []))) self.color_map = {} for c in self.colors: t = c['type'] if not t in self.color_map: self.color_map[t] = [] self.color_map[t] += [c] def init_params(self): self._extern_obj = InfluxDBDashboardExtern('v') self.set_time_range() def set_time_range(self, start_offset=-(7 * 24 * 60), end_offset=0, window_period=15, offset_unit='m'): self._extern_obj.add_time_offset_variable('timeRangeStart', start_offset, offset_unit=offset_unit) self._extern_obj.add_time_offset_variable('timeRangeStop', end_offset, offset_unit=offset_unit) self._extern_obj.add_duration_variable('windowPeriod', window_period, offset_unit=offset_unit) self._extern = self._extern_obj.serialize() # invalidate any previously stored results self._results = None def set_string_literval_variable(self, name, value): # TODO: handle duplicated values self._extern_obj.add_string_literal(name, value) self._extern = self._extern_obj.serialize() # invalidate any previously stored results self._results = None def queries(self): return self.queries def retrieve_result(self, query_string): # TODO: this method is hopefully temporary until InfluxDB /api/v2/query API simplifies passing # mimic default dialect dialect = Dialect(header=True, delimiter=",", comment_prefix="#", annotations=["datatype", "group", "default"], date_time_format="RFC3339") query = Query(query=query_string, dialect=dialect, extern=self._extern) try: response = self.query_service.post_query( org=self.client.org, query=query, async_req=False, _preload_content=False, _return_http_data_only=False) _parser = FluxCsvParser( response=response, serialization_mode=FluxSerializationMode.tables) list(_parser.generator()) tables = _parser.tables except rest.ApiException as error: # TODO: log error print('Error while retrieving data:', error) tables = [] return tables def results(self): if self._results == None: self._results = list(map(self.retrieve_result, self.queries)) return self._results def flat_results(self): tables = [] for result in self.results(): tables += result return tables def cell_output(self): return InfluxDBDashboardCellOutput(self, self.flat_results())
def __init__(self, influxdb_client, query_options=None): from influxdb_client.client.query_api import QueryOptions self._query_options = QueryOptions( ) if query_options is None else query_options self._influxdb_client = influxdb_client self._query_api = QueryService(influxdb_client.api_client)