def _parse_flux_response(self):
        table_index = 0
        start_new_table = False
        table = None
        parsing_state_error = False

        for csv in self._reader:
            # debug
            # print("parsing: ", csv)

            # Response has HTTP status ok, but response is error.
            if len(csv) < 1:
                continue

            if "error" == csv[1] and "reference" == csv[2]:
                parsing_state_error = True
                continue

            # Throw  InfluxException with error response
            if parsing_state_error:
                error = csv[1]
                reference_value = csv[2]
                raise FluxQueryException(error, reference_value)

            token = csv[0]
            # start    new    table
            if "#datatype" == token:
                start_new_table = True
                table = FluxTable()
                self._insert_table(table, table_index)
                table_index = table_index + 1
            elif table is None:
                raise FluxCsvParserException(
                    "Unable to parse CSV response. FluxTable definition was not found."
                )

            #  # datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string
            if "#datatype" == token:
                self.add_data_types(table, csv)

            elif "#group" == token:
                self.add_groups(table, csv)

            elif "#default" == token:
                self.add_default_empty_values(table, csv)

            else:
                # parse column names
                if start_new_table:
                    self.add_column_names_and_tags(table, csv)
                    start_new_table = False
                    continue

                # to int converions todo
                current_index = int(csv[2])

                if current_index > (table_index - 1):
                    # create    new        table       with previous column headers settings
                    flux_columns = table.columns
                    table = FluxTable()
                    table.columns.extend(flux_columns)
                    self._insert_table(table, table_index)
                    table_index = table_index + 1

                flux_record = self.parse_record(table_index - 1, table, csv)

                if not self._stream:
                    self.tables[table_index - 1].records.append(flux_record)

                yield flux_record
    def _parse_flux_response_row(self, metadata, csv):
        if len(csv) < 1:
            # Skip empty line in results (new line is used as a delimiter between tables or table and error)
            pass

        elif "error" == csv[1] and "reference" == csv[2]:
            metadata.parsing_state_error = True

        else:
            # Throw  InfluxException with error response
            if metadata.parsing_state_error:
                error = csv[1]
                reference_value = csv[2]
                raise FluxQueryException(error, reference_value)

            token = csv[0]
            # start new table
            if (token in ANNOTATIONS and not metadata.start_new_table) or \
                    (self._response_metadata_mode is FluxResponseMetadataMode.only_names and not metadata.table):

                # Return already parsed DataFrame
                if (self._serialization_mode is FluxSerializationMode.dataFrame) & hasattr(self, '_data_frame'):
                    df = self._prepare_data_frame()
                    if not self._is_profiler_table(metadata.table):
                        yield df

                metadata.start_new_table = True
                metadata.table = FluxTable()
                self._insert_table(metadata.table, metadata.table_index)
                metadata.table_index = metadata.table_index + 1
                metadata.table_id = -1
            elif metadata.table is None:
                raise FluxCsvParserException("Unable to parse CSV response. FluxTable definition was not found.")

            #  # datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string
            if ANNOTATION_DATATYPE == token:
                self.add_data_types(metadata.table, csv)

            elif ANNOTATION_GROUP == token:
                metadata.groups = csv

            elif ANNOTATION_DEFAULT == token:
                self.add_default_empty_values(metadata.table, csv)

            else:
                # parse column names
                if metadata.start_new_table:
                    # Invokable scripts doesn't supports dialect => all columns are string
                    if not metadata.table.columns and \
                            self._response_metadata_mode is FluxResponseMetadataMode.only_names:
                        self.add_data_types(metadata.table, list(map(lambda column: 'string', csv)))
                        metadata.groups = list(map(lambda column: 'false', csv))
                    self.add_groups(metadata.table, metadata.groups)
                    self.add_column_names_and_tags(metadata.table, csv)
                    metadata.start_new_table = False
                    # Create DataFrame with default values
                    if self._serialization_mode is FluxSerializationMode.dataFrame:
                        from ..extras import pd
                        labels = list(map(lambda it: it.label, metadata.table.columns))
                        self._data_frame = pd.DataFrame(data=[], columns=labels, index=None)
                        pass
                else:

                    # to int converions todo
                    current_id = int(csv[2])
                    if metadata.table_id == -1:
                        metadata.table_id = current_id

                    if metadata.table_id != current_id:
                        # create    new        table       with previous column headers settings
                        flux_columns = metadata.table.columns
                        metadata.table = FluxTable()
                        metadata.table.columns.extend(flux_columns)
                        self._insert_table(metadata.table, metadata.table_index)
                        metadata.table_index = metadata.table_index + 1
                        metadata.table_id = current_id

                    flux_record = self.parse_record(metadata.table_index - 1, metadata.table, csv)

                    if self._is_profiler_record(flux_record):
                        self._print_profiler_info(flux_record)
                    else:
                        if self._serialization_mode is FluxSerializationMode.tables:
                            self.tables[metadata.table_index - 1].records.append(flux_record)

                        if self._serialization_mode is FluxSerializationMode.stream:
                            yield flux_record

                        if self._serialization_mode is FluxSerializationMode.dataFrame:
                            self._data_frame_values.append(flux_record.values)
                            pass
Ejemplo n.º 3
0
    def _parse_flux_response(self):
        table_index = 0
        table_id = -1
        start_new_table = False
        table = None
        groups = []
        parsing_state_error = False

        for csv in self._reader:
            # debug
            # print("parsing: ", csv)

            # Response has HTTP status ok, but response is error.
            if len(csv) < 1:
                continue

            if "error" == csv[1] and "reference" == csv[2]:
                parsing_state_error = True
                continue

            # Throw  InfluxException with error response
            if parsing_state_error:
                error = csv[1]
                reference_value = csv[2]
                raise FluxQueryException(error, reference_value)

            token = csv[0]
            # start    new    table
            if token in ANNOTATIONS and not start_new_table:

                # Return already parsed DataFrame
                if (self._serialization_mode is FluxSerializationMode.dataFrame
                    ) & hasattr(self, '_data_frame'):
                    df = self._prepare_data_frame()
                    if not self._is_profiler_table(table):
                        yield df

                start_new_table = True
                table = FluxTable()
                self._insert_table(table, table_index)
                table_index = table_index + 1
                table_id = -1
            elif table is None:
                raise FluxCsvParserException(
                    "Unable to parse CSV response. FluxTable definition was not found."
                )

            #  # datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string
            if ANNOTATION_DATATYPE == token:
                self.add_data_types(table, csv)

            elif ANNOTATION_GROUP == token:
                groups = csv

            elif ANNOTATION_DEFAULT == token:
                self.add_default_empty_values(table, csv)

            else:
                # parse column names
                if start_new_table:
                    self.add_groups(table, groups)
                    self.add_column_names_and_tags(table, csv)
                    start_new_table = False
                    # Create DataFrame with default values
                    if self._serialization_mode is FluxSerializationMode.dataFrame:
                        from ..extras import pd
                        labels = list(map(lambda it: it.label, table.columns))
                        self._data_frame = pd.DataFrame(data=[],
                                                        columns=labels,
                                                        index=None)
                        pass
                    continue

                # to int converions todo
                current_id = int(csv[2])
                if table_id == -1:
                    table_id = current_id

                if table_id != current_id:
                    # create    new        table       with previous column headers settings
                    flux_columns = table.columns
                    table = FluxTable()
                    table.columns.extend(flux_columns)
                    self._insert_table(table, table_index)
                    table_index = table_index + 1
                    table_id = current_id

                flux_record = self.parse_record(table_index - 1, table, csv)

                if self._is_profiler_record(flux_record):
                    self._print_profiler_info(flux_record)
                    continue

                if self._serialization_mode is FluxSerializationMode.tables:
                    self.tables[table_index - 1].records.append(flux_record)

                if self._serialization_mode is FluxSerializationMode.stream:
                    yield flux_record

                if self._serialization_mode is FluxSerializationMode.dataFrame:
                    self._data_frame_values.append(flux_record.values)
                    pass

        # Return latest DataFrame
        if (self._serialization_mode is FluxSerializationMode.dataFrame
            ) & hasattr(self, '_data_frame'):
            df = self._prepare_data_frame()
            if not self._is_profiler_table(table):
                yield df
Ejemplo n.º 4
0
    def _parse_flux_response(self):
        table_index = 0
        start_new_table = False
        table = None
        parsing_state_error = False

        for csv in self._reader:
            # debug
            # print("parsing: ", csv)

            # Response has HTTP status ok, but response is error.
            if len(csv) < 1:
                continue

            if "error" == csv[1] and "reference" == csv[2]:
                parsing_state_error = True
                continue

            # Throw  InfluxException with error response
            if parsing_state_error:
                error = csv[1]
                reference_value = csv[2]
                raise FluxQueryException(error, reference_value)

            token = csv[0]
            # start    new    table
            if "#datatype" == token:

                # Return already parsed DataFrame
                if (self._serialization_mode is FluxSerializationMode.dataFrame
                    ) & hasattr(self, '_data_frame'):
                    yield self._prepare_data_frame()

                start_new_table = True
                table = FluxTable()
                self._insert_table(table, table_index)
                table_index = table_index + 1
            elif table is None:
                raise FluxCsvParserException(
                    "Unable to parse CSV response. FluxTable definition was not found."
                )

            #  # datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string
            if "#datatype" == token:
                self.add_data_types(table, csv)

            elif "#group" == token:
                self.add_groups(table, csv)

            elif "#default" == token:
                self.add_default_empty_values(table, csv)

            else:
                # parse column names
                if start_new_table:
                    self.add_column_names_and_tags(table, csv)
                    start_new_table = False
                    # Create DataFrame with default values
                    if self._serialization_mode is FluxSerializationMode.dataFrame:
                        self._data_frame = DataFrame(data=[],
                                                     columns=[],
                                                     index=None)
                        for column in table.columns:
                            self._data_frame[
                                column.label] = column.default_value
                        pass
                    continue

                # to int converions todo
                current_index = int(csv[2])

                if current_index > (table_index - 1):
                    # create    new        table       with previous column headers settings
                    flux_columns = table.columns
                    table = FluxTable()
                    table.columns.extend(flux_columns)
                    self._insert_table(table, table_index)
                    table_index = table_index + 1

                flux_record = self.parse_record(table_index - 1, table, csv)

                if self._serialization_mode is FluxSerializationMode.tables:
                    self.tables[table_index - 1].records.append(flux_record)

                if self._serialization_mode is FluxSerializationMode.stream:
                    yield flux_record

                if self._serialization_mode is FluxSerializationMode.dataFrame:
                    self._data_frame.loc[len(
                        self._data_frame.index)] = flux_record.values
                    pass

                # debug
                # print(flux_record)

        # Return latest DataFrame
        if (self._serialization_mode is FluxSerializationMode.dataFrame
            ) & hasattr(self, '_data_frame'):
            yield self._prepare_data_frame()
Ejemplo n.º 5
0
    def test_create_structure(self):
        _time = datetime.datetime(1970,
                                  1,
                                  1,
                                  0,
                                  0,
                                  tzinfo=datetime.timezone.utc)

        table = FluxTable()
        table.columns = [
            FluxColumn(index=0,
                       label='result',
                       data_type='string',
                       group=False,
                       default_value='_result'),
            FluxColumn(index=1,
                       label='table',
                       data_type='long',
                       group=False,
                       default_value=''),
            FluxColumn(index=2,
                       label='_start',
                       data_type='dateTime:RFC3339',
                       group=True,
                       default_value=''),
            FluxColumn(index=3,
                       label='_stop',
                       data_type='dateTime:RFC3339',
                       group=True,
                       default_value=''),
            FluxColumn(index=4,
                       label='_time',
                       data_type='dateTime:RFC3339',
                       group=False,
                       default_value=''),
            FluxColumn(index=5,
                       label='_value',
                       data_type='double',
                       group=False,
                       default_value=''),
            FluxColumn(index=6,
                       label='_field',
                       data_type='string',
                       group=True,
                       default_value=''),
            FluxColumn(index=7,
                       label='_measurement',
                       data_type='string',
                       group=True,
                       default_value=''),
            FluxColumn(index=8,
                       label='location',
                       data_type='string',
                       group=True,
                       default_value='')
        ]

        record1 = FluxRecord(table=0)
        record1["table"] = 0
        record1["_start"] = _time
        record1["_stop"] = _time
        record1["_time"] = _time
        record1["_value"] = 1.0
        record1["_field"] = 'water level'
        record1["_measurement"] = 'h2o'
        record1["location"] = 'coyote_creek'

        record2 = FluxRecord(table=0)
        record2["table"] = 0
        record2["_start"] = _time
        record2["_stop"] = _time
        record2["_time"] = _time + datetime.timedelta(days=1)
        record2["_value"] = 2.0
        record2["_field"] = 'water level'
        record2["_measurement"] = 'h2o'
        record2["location"] = 'coyote_creek'

        table.records = [record1, record2]

        self.assertEqual(9, table.columns.__len__())
        self.assertEqual(2, table.records.__len__())

        # record 1
        self.assertEqual(_time, table.records[0].get_start())
        self.assertEqual(_time, table.records[0].get_stop())
        self.assertEqual(_time, table.records[0].get_time())
        self.assertEqual(1.0, table.records[0].get_value())
        self.assertEqual(1.0, table.records[0]["_value"])
        self.assertEqual('water level', table.records[0].get_field())
        self.assertEqual('water level', table.records[0]["_field"])
        self.assertEqual('h2o', table.records[0].get_measurement())
        self.assertEqual('h2o', table.records[0]["_measurement"])
        self.assertEqual('coyote_creek', table.records[0].values['location'])
        self.assertEqual('coyote_creek', table.records[0]['location'])

        # record 2
        self.assertEqual(_time, table.records[1].get_start())
        self.assertEqual(_time, table.records[1].get_stop())
        self.assertEqual(_time + datetime.timedelta(days=1),
                         table.records[1].get_time())
        self.assertEqual(2.0, table.records[1].get_value())
        self.assertEqual(2.0, table.records[1]["_value"])
        self.assertEqual('water level', table.records[1].get_field())
        self.assertEqual('water level', table.records[1]["_field"])
        self.assertEqual('h2o', table.records[1].get_measurement())
        self.assertEqual('h2o', table.records[1]["_measurement"])
        self.assertEqual('coyote_creek', table.records[1].values['location'])
        self.assertEqual('coyote_creek', table.records[1]['location'])