示例#1
0
    def _write_point(self):
        self.write_client = self.client.write_api(write_options=SYNCHRONOUS)

        bucket = self.create_test_bucket()

        measurement = "h2o_feet"
        field_name = "water_level"
        val = "1.0"
        tag = "location"
        tag_value = "creek level"

        p = Point(measurement)
        p.field(field_name, val)
        p.tag(tag, tag_value)

        record_list = [p]

        self.write_client.write(bucket.name, self.org, record_list)

        query = 'from(bucket:"' + bucket.name + '") |> range(start: 1970-01-01T00:00:00.000000001Z)'
        flux_result = self.client.query_api().query(query)
        self.assertEqual(1, len(flux_result))
        rec = flux_result[0].records[0]

        self.assertEqual(self.id_tag, rec["id"])
        self.assertEqual(self.customer_tag, rec["customer"])
        self.assertEqual("LA", rec[self.data_center_key])

        self.delete_test_bucket(bucket)
示例#2
0
    def getGRCPoints(self, bucket):
        points = []
        dt = datetime.now(tz=pytz.timezone('US/Pacific')).isoformat()
        mag = self.explainMagnitude()
        mining = self.getMiningInfo()
        for prj in mag.magnitude:
            point = Point(measurement_name='magnitude')
            point.time(time=dt)
            point.tag('project_name', prj.project)
            point.field('rac', prj.rac)
            point.field('magnitude', prj.magnitude)
            points.append({"bucket": bucket, "point": point})

        mining_point = Point(measurement_name='mining')
        mining_point.time(time=dt)
        mining_point.tag('CPID', mining.CPID)
        mining_point.field('blocks', mining.blocks)
        mining_point.field('magnitude', mining.current_magnitude)
        mining_point.field('current_difficulty', mining.difficulty.current)
        mining_point.field('pending_reward', mining.BoincRewardPending)
        mining_point.field('stake_weight', mining.stakeweight.valuesum)
        mining_point.field('time_to_stake', mining.time_to_stake_days)
        mining_point.field('staking_efficiency', mining.staking_efficiency)
        points.append({"bucket": bucket, "point": mining_point})
        return points
示例#3
0
 def parse(measurement: Measurement) -> Point:
     point = Point(measurement.name)
     if measurement.groups is not None:
         for (key, value) in measurement.groups.items():
             point.tag(key, value)
     point.field("value", measurement.value).time(measurement.time)
     return point
示例#4
0
    def test_write_points_unicode(self):
        bucket = self.create_test_bucket()

        measurement = "h2o_feet_ěščřĚŠČŘ"
        field_name = "field_ěščř"
        utf8_val = "Přerov 🍺"
        tag = "tag_ěščř"
        tag_value = "tag_value_ěščř"

        p = Point(measurement)
        p.field(field_name, utf8_val)
        p.tag(tag, tag_value)
        record_list = [p]

        self.write_client.write(bucket.name, self.org, record_list)

        query = 'from(bucket:"' + bucket.name + '") |> range(start: 1970-01-01T00:00:00.000000001Z)'
        flux_result = self.client.query_api().query(query)
        self.assertEqual(1, len(flux_result))
        rec = flux_result[0].records[0]

        self.assertEqual(self.id_tag, rec["id"])
        self.assertEqual(self.customer_tag, rec["customer"])
        self.assertEqual("LA", rec[self.data_center_key])

        self.assertEqual(measurement, rec.get_measurement())
        self.assertEqual(utf8_val, rec.get_value())
        self.assertEqual(field_name, rec.get_field())
    def write(self,bucket,measurement : str,
              time ,
              field_list : list,
              tag_list : list = [],
              **kwargs):
        '''
        :param bucket : the bucket on which write the data
        :param measurement: name of measurement
        :param time: timestamp
        :param field_list: field list : containing tuple (key,value)
        :param tag_list: tag_lisit : containing tuple (key,value)
        optionnal parameter
        :return:
        '''
        point = Point(measurement)
        point.time(time,WritePrecision.MS)
        if not field_list :
            # TODO : Create an exception NoDataException
            raise Exception("Not point to write in database.")
        for field_tuple in field_list:
            point.field(field_tuple[0],field_tuple[1])
        for tag_tuple in tag_list:
            point.tag(tag_tuple[0], tag_tuple[1])

        self.write_api.write(bucket=bucket, record=point, org= self.org, **kwargs)
    def map_data(self, fields: dict, tags: dict):
        point = Point('ups_status')

        for k in fields.keys():
            point.field(k, fields[k])

        for k in tags.keys():
            point.tag(k, tags[k])

        return point
示例#7
0
 def _post_telemetry(self) -> None:
     """Post telemetry points"""
     if self.telem_logger is not None:
         p = Point('sensor_fusion')
         p.field('blind_target_bias_mag', np.abs(self.blind_target_bias))
         p.field('blind_target_bias_angle',
                 np.degrees(np.angle(self.blind_target_bias)))
         p.tag('units', 'degrees')
         p.tag('class', type(self).__name__)
         p.time(datetime.utcnow())
         self.telem_logger.post_points(p)
示例#8
0
    def process_camera_frame(self) -> Tuple[Time, Angle, Angle]:
        """Get frame from camera and find target using computer vision

        Args:
            telem: Dict into which telemetry channels will be added

        Returns:
            Tuple containing:
            - The approximate time that the camera frame was captured.
            - The position of the target within the camera frame where the first element is the X
              position and the second is the Y position and the origin is the center of the camera
              frame.
        """
        # This time isn't going to be exceptionally accurate, but unfortunately most cameras do not
        # provide a means of determining the exact time when the frame was captured by the sensor.
        # There are probably ways to estimate the frame time more accurately but this is likely
        # good enough.
        target_time = Time.now()
        frame = self.camera.get_frame(timeout=self.camera_timeout)

        if frame is None:
            raise self.IndeterminatePosition(
                'Timeout waiting for frame from camera')

        keypoints = find_features(frame)

        if not keypoints:
            self.preview_window.show_annotated_frame(frame)
            raise self.IndeterminatePosition(
                'No target detected in most recent frame')

        # select the keypoint that is most likely to be the target of interest
        target_keypoint = self._select_one_keypoint(keypoints)

        self.preview_window.show_annotated_frame(frame, keypoints,
                                                 target_keypoint)

        # convert target position units from pixels to degrees
        target_x_px, target_y_px = self._get_keypoint_xy(target_keypoint)
        target_x = Angle(target_x_px * self.camera.pixel_scale *
                         self.camera.binning * u.deg)
        target_y = Angle(target_y_px * self.camera.pixel_scale *
                         self.camera.binning * u.deg)

        if self.telem_logger is not None:
            p = Point('camera_target')
            p.field('x', target_x.deg)
            p.field('y', target_y.deg)
            p.tag('units', 'degrees')
            p.tag('class', type(self).__name__)
            p.time(target_time.to_datetime())
            self.telem_logger.post_points(p)

        return target_time, target_x, target_y
示例#9
0
    def report_data_list(self, category, host, data):
        try:
            point = Point(category)
            point.tag("host", host)
            for field_key, field_value in data:
                point.field(field_key, field_value)
            point.time(datetime.datetime.utcnow(), WritePrecision.NS)

            self._write_client.write(self._settings.bucket,
                                     self._settings.tenant, point)
        except Exception as e:
            logger.error(f"Failed to report data to InfluxDB: {e}")
示例#10
0
def publishData(measurement, tags, time, fields):
    p = Point(measurement).time(time)
    #log.info("Will plublish %s" % (str(p)))

    for key, value in tags.items():
        p.tag(key, value)

    for key, value in fields.items():
        p.field(key, value)

    write_api.write(bucket=INFLUX_DB_NAME, record=p)
    log.info("Published data point in influxdb.")
示例#11
0
def _process_data_row(rowIn, tblFlds, tblName):
    
    point = Point(tblName)
    point.time(rowIn['timestamp'], WritePrecision.NS)
    
    for key in tblFlds:
        if tblFlds[key] == 'field':
            point.field(key, rowIn[key])
        elif tblFlds[key] == 'tag':
            point.tag(key, rowIn[key])
                
    return point
示例#12
0
    def save(self, measurement: str, fields: dict, tags: dict):
        point = Point(measurement)

        for key in fields:
            point.field(key, fields[key])

        for key in tags:
            point.tag(key, tags[key])

        point.time(datetime.utcnow(), WritePrecision.NS)

        self.__write_api__.write(INFLUX_BUCKET, INFLUX_ORG, point)
示例#13
0
    def update_influx(self):
        pp = []
        for date, fields in DATA.italy.to_dict('index').items():
            p = Point("italia")
            p.time(date.value)
            for k, v in fields.items():
                if type(v) is str:
                    p.tag(k, v)
                if not np.isnan(v):
                    p.field(k, v)
            pp.append(p)

        self.write.write("coviddi", "coviddi", pp)
def point(reg):
    fields = struct.unpack('I32B', reg)
    words = fields[1:]
    words_idx = [w for w in range(31) if w % 5 != 0]
    p = Point('frame')
    p.time(fields[0])
    for idx in words_idx:
        p.field(f'word_{idx:02d}', words[idx])
    frame_number = (words[2] >> 2) & 0x3
    p.tag('frame_number', frame_number)
    if frame_number == 0 and words[7] == 0b11110100:
        p.tag('start_of_group', True)
    return p
示例#15
0
def write_point(measurement):
    point = Point(measurement["name"])
    for tag in measurement["tags"]:
        point = point.tag(tag, measurement["tags"][tag])
    for field in measurement["fields"]:
        point = point.field(field, measurement["fields"][field])
    point = point.time(datetime.utcnow(), WritePrecision.NS)

    write_api.write(bucket, org, point)
示例#16
0
    def writeData(self, database, name, data, tag=None):
        self.connect()
        write_api = self.client.write_api(write_options=SYNCHRONOUS)

        try:
            p = Point(name)
            if tag is not None:
                p.tag(*tag)

            for key, value in data.items():
                p.field(key, value)

            logger.debug("Writing Data to DB: %s", p)
            write_api.write(bucket=database, record=p)

        except Exception:
            logger.exception("Exception writing data: %s", p)
            self.client = None
示例#17
0
    def add_data_point(self, field_name, field_value, tags=None):
        point = Point(self.name)
        point.field(field_name, field_value)

        if tags is not None:
            for tag_key, tag_value in tags.items():
                point = point.tag(tag_key, tag_value)

        self.records.append(point)
示例#18
0
def main():
    global device_details
    #get_devices()
    #print(device_details)
    get_devices_refresh()
    cache_timestamp = datetime.datetime.now()
    for line in sys.stdin:
        #May be able to adjust this logic to have seperate timers for each app_id
        #Update cache if older than 1 hour
        if (datetime.datetime.now() -
                cache_timestamp).seconds >= 3600 and len(device_details) > 0:
            #refresh every hour
            get_devices_refresh()
            #re-Set timestamp
            cache_timestamp = datetime.datetime.now()
        #Parse timeprotocol to Points type
        lproto = line_protocol_parser.parse_line(line)
        #parse topic tag to return the device_id and app_id eg: v3/fort-digital-agri@ttn/devices/2027a0023/up v3/{{app_id}}@{{tenant}}/devices/{{device_id}}/up
        topic_match = topic_re.match(lproto['tags']['topic'])
        app_id = topic_match[1]
        #tenant = topic_match[2]
        device_id = topic_match[3]
        point = Point(lproto['measurement']).time(lproto['time'])
        if app_id not in device_details or device_id not in device_details[
                app_id]:
            get_devices(app_id)
        if app_id in device_details:
            for key, value in lproto['fields'].items():
                point = point.field(key, will_it_float(value))
            for key, value in lproto['tags'].items():
                if key != 'name':
                    point = point.tag(key, value)
            #get details from global variable
            device = get_device(app_id, device_id)
            #Add additional tags
            point = point.tag("device_id", device_id)
            for key, value in device.items():
                point = point.tag(key, value)
            #Print lineprotocol to stdout
            print(point.to_line_protocol())
            #flushstdout
            sys.stdout.flush()
        else:
            print("Device Details not found")
示例#19
0
def send_dict_influxdb(user, pointName, mainObject, field=False):
    """
            format dict to send data at influxDb

            :param user: name on the user
            :param pointName: name on the point
            :param mainObject: object to iterate
            :param field: optional tag

    """
    if field:
        toIterate = mainObject.get(field)
    else:
        toIterate = mainObject
    for key in toIterate:
        point = Point(pointName).tag("host", user)
        if field:
            point.tag("data", field)
        point.field(key, toIterate.get(key)).time(datetime.utcnow(), WritePrecision.NS)
        write_api.write(bucket, org, point)
示例#20
0
    def test_write_using_default_tags(self):
        bucket = self.create_test_bucket()

        measurement = "h2o_feet"
        field_name = "water_level"
        val = "1.0"
        val2 = "2.0"
        tag = "location"
        tag_value = "creek level"

        p = Point(measurement)
        p.field(field_name, val)
        p.tag(tag, tag_value)
        p.time(1)

        p2 = Point(measurement)
        p2.field(field_name, val2)
        p2.tag(tag, tag_value)
        p2.time(2)

        record_list = [p, p2]

        self.write_client.write(bucket.name, self.org, record_list)

        query = 'from(bucket:"' + bucket.name + '") |> range(start: 1970-01-01T00:00:00.000000001Z)'
        flux_result = self.client.query_api().query(query)
        self.assertEqual(1, len(flux_result))
        rec = flux_result[0].records[0]
        rec2 = flux_result[0].records[1]

        self.assertEqual(self.id_tag, rec["id"])
        self.assertEqual(self.customer_tag, rec["customer"])
        self.assertEqual("LA", rec[self.data_center_key])

        self.assertEqual(self.id_tag, rec2["id"])
        self.assertEqual(self.customer_tag, rec2["customer"])
        self.assertEqual("LA", rec2[self.data_center_key])

        self.delete_test_bucket(bucket)
示例#21
0
 def write(self, results):
     points = []
     for result in results:
         point = Point(result[Check.Result.NAME]) \
             .tag("host", result[Check.Result.HOST]) \
             .time(result[Check.Result.TIME].isoformat())
         if Check.Result.DEVICE in result:
             point = point.tag("device", result[Check.Result.DEVICE])
         for field in result[Check.Result.FIELDS]:
             point = point.field(field[Check.Field.NAME], field[Check.Field.VALUE])
             if Check.Field.UNIT in field:
                 point = point.field(f"{field[Check.Field.NAME]}_unit", field[Check.Field.UNIT])
         points.append(point)
     self.write_api.write(self.bucket, self.org, points)
示例#22
0
class InfluxDBWrapper:
    def __init__(self, name, field_name, field_value, tags=None):
        self.name = name
        self.point = Point(name)

        tags = tags or {}
        self.record = self._record(field_name, field_value, tags)

        self.write_api = influxdb_client.write_api(write_options=SYNCHRONOUS)

    def _record(self, field_name, field_value, tags):
        for tag_key, tag_value in tags.items():
            self.point = self.point.tag(tag_key, tag_value)
        return self.point.field(field_name, field_value)

    def write(self):
        self.write_api.write(bucket=settings.INFLUXDB_BUCKET, record=self.record)
示例#23
0
def panel_info_to_influx_points(panel_info):
    points = []

    for control in panel_info['controls']:
        point = Point("cellar_panel_read")
        point.tag("slot", control["slot"])
        point.tag("vessel", control["label"])
        point.tag("batch_number", control['batch_info']['Batch #'])
        point.field("temp", control['temp'])
        point.field("set_point", control['set_point'])
        point.field("valve_open", control['valve_open'])
        point.field("days_in_vessel", control['batch_info']['Days in Vessel'])
        point.time(panel_info['read_at'], WritePrecision.NS)
        points.append(point)

    return points
示例#24
0
def send_results(results):
    """
    Formats the payload to send to InfluxDB
    :rtype: None
    """
    result_dict = results.dict()
    pt = Point("speed_test_results")
    pt.field('download', result_dict['download'])
    pt.field('upload', result_dict['upload'])
    pt.field('ping', result_dict['server']['latency'])
    pt.tag('server', result_dict['server']['id'])
    pt.tag('server_name', result_dict['server']['name'])
    pt.tag('server_country', result_dict['server']['country'])

    if getenv("INFLUXDB_V2_URL"):
        client = InfluxDBClient.from_env_properties()
        write_api = client.write_api(write_options=SYNCHRONOUS)
        if write_api.write("speedtests/autogen", 'patrickjmcd', pt):
            logging.debug('Data written to InfluxDB')
        else:
            logging.error("Data not written to influxdb")
示例#25
0
    def _finish_control_cycle(
        self,
        cycle_period: Optional[float],
        mount_state: MountState,
        rate_command: Optional[SlewRateCommand] = None,
        rate_command_time_error: Optional[float] = None,
        callback_override: bool = False,
    ) -> "Tracker.StopReason":
        """Final tasks to perform at the end of each control cycle."""

        # list of telemetry points to be populated
        points = []

        # timestamp to use for all telemetry points that don't correspond to sensor readings
        # or other events that occur at well-defined times
        cycle_timestamp = datetime.utcnow()

        # coordinate system transformations
        position_mount_topo = self.mount_model.encoders_to_topocentric(
            mount_state.position)

        try:
            # get target position for the same time as mount state was queried
            position_target = self.target.get_position(
                mount_state.time_queried)

        except Target.IndeterminatePosition:
            stop_reason = self._check_stopping_conditions()

        else:
            # on-sky separation between target and mount positions
            error_magnitude = separation(position_target.topo,
                                         position_mount_topo)

            stop_reason = self._check_stopping_conditions(error_magnitude)

            if self.telem_logger is not None:
                error_enc = {
                    axis: float(
                        smallest_allowed_error(
                            mount_state.position[axis].deg,
                            position_target.enc[axis].deg,
                            self.mount.no_cross_encoder_positions()[axis].deg,
                        ))
                    for axis in self.axes
                }

                # target position
                pt = Point('target_position')
                pt.field('azimuth', position_target.topo.az.deg)
                pt.field('altitude', position_target.topo.alt.deg)
                for axis in self.axes:
                    pt.field(f'encoder_{axis}', position_target.enc[axis].deg)
                pt.tag('units', 'degrees')
                pt.tag('class', type(self).__name__)
                pt.time(position_target.time.to_datetime())
                points.append(pt)

                # mount position error
                pt = Point('mount_position_error')
                pt.field('magnitude', error_magnitude.deg)
                for axis in self.axes:
                    pt.field(f'enoder_{axis}', error_enc[axis])
                pt.tag('units', 'degrees')
                pt.tag('class', type(self).__name__)
                pt.time(cycle_timestamp)
                points.append(pt)

        if self.telem_logger is not None:

            pt = Point('control_cycle_stats')
            pt.field('period', cycle_period)
            pt.field('cycle_count', self.num_iterations)
            pt.field('callback_override', callback_override)
            pt.tag('class', type(self).__name__)
            pt.time(cycle_timestamp)
            points.append(pt)

            # mount positions
            pt = Point('mount_position')
            for axis in self.axes:
                pt.field(f'encoder_{axis}', mount_state.position[axis].deg)
            pt.field('azimuth', position_mount_topo.az.deg)
            pt.field('altitude', position_mount_topo.alt.deg)
            pt.tag('units', 'degrees')
            pt.tag('class', type(self).__name__)
            pt.time(mount_state.time_queried.to_datetime())
            points.append(pt)

            # mount slew rate
            pt = Point('mount_rate')
            for axis in self.axes:
                pt.field(f'axis_{axis}', mount_state.rates[axis])
            pt.tag('units', 'degrees/s')
            pt.tag('class', type(self).__name__)
            pt.time(mount_state.time_queried.to_datetime())
            points.append(pt)

            # controller commands
            if rate_command is not None:
                pt = Point('controller_commands')
                for axis in self.axes:
                    pt.field(f'rate_axis_{axis}', rate_command.rates[axis])
                if rate_command_time_error is not None:
                    pt.field('time_error', rate_command_time_error.sec)
                pt.tag('units', 'degrees/s')
                pt.tag('class', type(self).__name__)
                pt.time(cycle_timestamp)
                points.append(pt)

            self.telem_logger.post_points(points)

        self.num_iterations += 1
        return stop_reason
示例#26
0
    def to_target(self, target: str):
        """
        Emit Pandas DataFrame to target. A target
        is identified by a connection string.

        Examples:

        - duckdb://dwd.duckdb?table=weather
        - influxdb://localhost/?database=dwd&table=weather
        - crate://localhost/?database=dwd&table=weather

        Dispatch data to different data sinks. Currently, SQLite, DuckDB,
        InfluxDB and CrateDB are implemented. However, through the SQLAlchemy
        layer, it should actually work with any supported SQL database.

        - https://docs.sqlalchemy.org/en/13/dialects/

        :param target: Target connection string.
        :return: self
        """

        log.info(f"Exporting records to {target}\n{self.df.count()}")

        connspec = ConnectionString(target)
        protocol = connspec.url.scheme
        database = connspec.get_database()
        tablename = connspec.get_table()

        if target.startswith("file://"):
            filepath = connspec.get_path()

            if target.endswith(".xlsx"):
                log.info(f"Writing to spreadsheet file '{filepath}'")

                # Convert all datetime columns to ISO format.
                df = convert_datetimes(self.df)
                df.to_excel(filepath, index=False)

            elif target.endswith(".feather"):
                # https://arrow.apache.org/docs/python/feather.html
                log.info(f"Writing to Feather file '{filepath}'")
                import pyarrow.feather as feather

                feather.write_feather(self.df, filepath, compression="lz4")

            elif target.endswith(".parquet"):
                """
                # Acquire data and store to Parquet file.
                alias fetch="wetterdienst dwd observations values --station=1048,4411 --parameter=kl --resolution=daily --period=recent"
                fetch --target="file://observations.parquet"

                # Check Parquet file.
                parquet-tools schema observations.parquet
                parquet-tools head observations.parquet

                # References
                - https://arrow.apache.org/docs/python/parquet.html
                """

                log.info(f"Writing to Parquet file '{filepath}'")
                import pyarrow as pa
                import pyarrow.parquet as pq

                table = pa.Table.from_pandas(self.df)
                pq.write_table(table, filepath)

            elif target.endswith(".zarr"):
                """
                # Acquire data and store to Zarr group.
                alias fetch="wetterdienst dwd observations values --station=1048,4411 --parameter=kl --resolution=daily --period=recent"
                fetch --target="file://observations.zarr"

                # References
                - https://xarray.pydata.org/en/stable/generated/xarray.Dataset.from_dataframe.html
                - https://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_zarr.html
                """

                log.info(f"Writing to Zarr group '{filepath}'")
                import xarray

                df = self.df

                # Problem: `ValueError: Cannot setitem on a Categorical with a new category, set the categories first`.
                # Solution: Let's convert all categorical columns back to their designated type representations.
                #           https://stackoverflow.com/questions/32011359/convert-categorical-data-in-pandas-dataframe/32011969#32011969
                if "quality" in df:
                    df.quality = df.quality.astype("Int64")
                categorical_columns = df.select_dtypes(["category"]).columns
                df[categorical_columns] = df[categorical_columns].astype("str")

                # Problem: `TypeError: float() argument must be a string or a number, not 'NAType'`.
                # Solution: Fill gaps in the data.
                df = df.fillna(-999)

                # Convert pandas DataFrame to xarray Dataset.
                dataset = xarray.Dataset.from_dataframe(df)
                log.info(f"Converted to xarray Dataset. Size={dataset.sizes}")

                # Export to Zarr format.
                # TODO: Add "group" parameter.
                #       Group path. (a.k.a. `path` in zarr terminology.)
                # TODO: Also use attributes: `store.set_attribute()`
                store = dataset.to_zarr(
                    filepath,
                    mode="w",
                    group=None,
                    encoding={"date": {
                        "dtype": "datetime64"
                    }},
                )

                # Reporting.
                dimensions = store.get_dimensions()
                variables = list(store.get_variables().keys())

                log.info(
                    f"Wrote Zarr file with dimensions={dimensions} and variables={variables}"
                )
                log.info(f"Zarr Dataset Group info:\n{store.ds.info}")

            else:
                raise KeyError("Unknown export file type")

            return

        if target.startswith("duckdb://"):
            """
            ====================
            DuckDB database sink
            ====================

            Install Python driver::

                pip install duckdb

            Acquire data::

                wetterdienst dwd observations values --station=1048,4411 --parameter=kl --resolution=daily --period=recent --target="duckdb:///dwd.duckdb?table=weather"

            Example queries::

                python -c 'import duckdb; c = duckdb.connect(database="dwd.duckdb"); print(c.table("weather"))'  # noqa
                python -c 'import duckdb; c = duckdb.connect(database="dwd.duckdb"); print(c.execute("SELECT * FROM weather").df())'  # noqa

            """
            log.info(
                f"Writing to DuckDB. database={database}, table={tablename}")
            import duckdb

            connection = duckdb.connect(database=database, read_only=False)
            connection.register("origin", self.df)
            connection.execute(f"DROP TABLE IF EXISTS {tablename};")
            connection.execute(
                f"CREATE TABLE {tablename} AS SELECT * FROM origin;"
            )  # noqa:S608

            weather_table = connection.table(tablename)
            print(weather_table)  # noqa: T001
            print("Cardinalities:")  # noqa: T001
            print(weather_table.to_df().count())  # noqa: T001
            connection.close()
            log.info("Writing to DuckDB finished")

        elif protocol.startswith("influxdb"):
            """
            ==========================
            InfluxDB 1.x database sink
            ==========================

            Install Python driver::

                pip install influxdb

            Run database::

                docker run -it --rm --publish=8086:8086 influxdb:1.8

            Acquire data::

                alias fetch="wetterdienst values --provider=dwd --network=observation --parameter=kl --resolution=daily --period=recent --station=1048,4411"
                fetch --target="influxdb://localhost/?database=dwd&table=weather"

            Example queries::

                http 'localhost:8086/query?db=dwd&q=SELECT * FROM weather;'
                http 'localhost:8086/query?db=dwd&q=SELECT COUNT(*) FROM weather;'


            ==========================
            InfluxDB 2.x database sink
            ==========================

            Install Python driver::

                pip install influxdb_client

            Run database::

                docker run -it --rm --publish=8086:8086 influxdb:2.0
                influx setup --name=default --username=root --password=12345678 --org=acme --bucket=dwd --retention=0 --force

            Acquire data::

                INFLUXDB_ORGANIZATION=acme
                INFLUXDB_TOKEN=t5PJry6TyepGsG7IY_n0K4VHp5uPvt9iap60qNHIXL4E6mW9dLmowGdNz0BDi6aK_bAbtD76Z7ddfho6luL2LA==

                alias fetch="wetterdienst values --provider=dwd --network=observation --parameter=kl --resolution=daily --period=recent --station=1048,4411"
                fetch --target="influxdb2://${INFLUXDB_ORGANIZATION}:${INFLUXDB_TOKEN}@localhost/?database=dwd&table=weather"

            Example queries::

                influx query 'from(bucket:"dwd") |> range(start:-2d) |> limit(n: 10)'
            """

            if protocol in [
                    "influxdb", "influxdbs", "influxdb1", "influxdb1s"
            ]:
                version = 1
            elif protocol in ["influxdb2", "influxdb2s"]:
                version = 2
            else:
                raise KeyError(
                    f"Unknown protocol variant '{protocol}' for InfluxDB")

            log.info(
                f"Writing to InfluxDB version {version}. database={database}, table={tablename}"
            )

            # 1. Mungle the data frame.
            # Use the "date" column as appropriate timestamp index.
            df = self.df.set_index(pd.DatetimeIndex(self.df["date"]))
            df = df.drop(["date"], axis=1)

            # Compute designated tag fields from some candidates.
            tag_columns = []
            tag_candidates = [
                Columns.STATION_ID.value,
                Columns.QUALITY.value,
                Columns.QUALITY_PREFIX.value,
                Columns.DATASET.value,
                Columns.PARAMETER.value,
            ]
            for tag_candidate in tag_candidates:
                tag_candidate = tag_candidate.lower()
                for column in df.columns:
                    if column.startswith(tag_candidate):
                        tag_columns.append(column)

            # Setup the connection.
            if version == 1:
                from influxdb import InfluxDBClient

                client = InfluxDBClient(
                    host=connspec.url.hostname,
                    port=connspec.url.port or 8086,
                    username=connspec.url.username,
                    password=connspec.url.password,
                    database=database,
                    ssl=protocol.endswith("s"),
                )
                client.create_database(database)
            elif version == 2:
                from influxdb_client import InfluxDBClient, Point
                from influxdb_client.client.write_api import SYNCHRONOUS

                ssl = protocol.endswith("s")
                url = f"http{ssl and 's' or ''}://{connspec.url.hostname}:{connspec.url.port or 8086}"
                client = InfluxDBClient(url=url,
                                        org=connspec.url.username,
                                        token=connspec.url.password)
                write_api = client.write_api(write_options=SYNCHRONOUS)

            points = []
            for items in chunker(df, chunksize=50000):

                for date, record in items.iterrows():
                    time = date.isoformat()
                    tags = {
                        tag: record.pop(tag)
                        for tag in tag_columns if tag in record
                    }

                    fields = record.dropna().to_dict()
                    if not fields:
                        continue

                    if version == 1:
                        point = {
                            "measurement": tablename,
                            "time": time,
                            "tags": tags,
                            "fields": fields,
                        }
                    elif version == 2:
                        point = Point(tablename).time(date.isoformat())
                        for tag, value in tags.items():
                            point = point.tag(tag, value)
                        for field, value in fields.items():
                            point = point.field(field, value)

                    points.append(point)

            # Write to InfluxDB.
            if version == 1:
                client.write_points(
                    points=points,
                    batch_size=50000,
                )
            elif version == 2:
                write_api.write(bucket=database, record=points)
                write_api.close()

            log.info("Writing to InfluxDB finished")

        elif target.startswith("crate://"):
            """
            =====================
            CrateDB database sink
            =====================

            Install Python driver::

                pip install crate[sqlalchemy] crash

            Run database::

                docker run -it --rm --publish=4200:4200 --env CRATE_HEAP_SIZE=2048M crate/crate:nightly

            Acquire data::

                wetterdienst dwd observations values --station=1048,4411 --parameter=kl --resolution=daily --period=recent --target="crate://crate@localhost/dwd?table=weather"

            Example queries::

                psql postgres://crate@localhost --command 'SELECT * FROM dwd.weather;'

                crash -c 'select * from dwd.weather;'
                crash -c 'select count(*) from dwd.weather;'
                crash -c "select *, date_format('%Y-%m-%dT%H:%i:%s.%fZ', date) as datetime from dwd.weather order by datetime limit 10;"  # noqa

            """
            log.info(f"Writing to CrateDB. target={target}, table={tablename}")

            # CrateDB's SQLAlchemy driver doesn't accept `database` or `table` query parameters.
            cratedb_url = connspec.url._replace(path="", query=None)
            cratedb_target = urlunparse(cratedb_url)

            # Convert timezone-aware datetime fields to naive ones.
            # FIXME: Omit this as soon as the CrateDB driver is capable of supporting timezone-qualified timestamps.
            self.df.date = self.df.date.dt.tz_localize(None)

            self.df.to_sql(
                name=tablename,
                con=cratedb_target,
                schema=database,
                if_exists="replace",
                index=False,
                chunksize=5000,
            )
            log.info("Writing to CrateDB finished")

        else:
            """
            ================================
            Generic SQLAlchemy database sink
            ================================

            Install Python driver::

                pip install sqlalchemy

            Examples::

                # Prepare
                alias fetch='wetterdienst dwd observations values --station=1048,4411 --parameter=kl --resolution=daily --period=recent'

                # Acquire data.
                fetch --target="sqlite:///dwd.sqlite?table=weather"

                # Query data.
                sqlite3 dwd.sqlite "SELECT * FROM weather;"

            """

            # Honour SQLite's SQLITE_MAX_VARIABLE_NUMBER, which defaults to 999
            # for SQLite versions prior to 3.32.0 (2020-05-22),
            # see https://www.sqlite.org/limits.html#max_variable_number.
            chunksize = 5000
            if target.startswith("sqlite://"):
                import sqlite3

                if sqlite3.sqlite_version_info < (3, 32, 0):
                    chunksize = int(999 / len(self.df.columns))

            log.info("Writing to SQL database")
            self.df.to_sql(
                name=tablename,
                con=target,
                if_exists="replace",
                index=False,
                method="multi",
                chunksize=chunksize,
            )
            log.info("Writing to SQL database finished")
示例#27
0
def get_gateway_details(gateway):
    #print(gateway)
    gateway_id = gateway['ids']['gateway_id']
    point = Point("TTN_Gateways").tag("gateway_id",
                                      gateway_id).tag("name", gateway['name'])
    if 'antennas' in gateway:
        for dimension in ['latitude', 'longitude', 'altitude']:
            if dimension in gateway['antennas'][0]['location']:
                value = gateway['antennas'][0]['location'][dimension]
            else:
                value = 0
            point = point.tag(
                dimension, value
            )  #body['gateway']['antennas'][0]['location'][dimension] = antenna_locations[dimension]
        #point = point.tag('latitude',gateway['antennas'][0]['location']['latitude']).tag('longitude',gateway['antennas'][0]['location']['longitude']).tag('altitude',gateway['antennas'][0]['location']['altitude'])
        #for key,value in gateway['antennas'][0]['location']:
        #    point = point.tag(key,value)
    gateway_stats = (requests.get(base_uri + "/api/v3/gs/gateways/" +
                                  gateway_id + "/connection/stats",
                                  params=gateway_stats_params,
                                  headers=http_headers)).json()
    #https://eu1.cloud.thethings.network/api/v3/gs/gateways/fort-digital-80029c641ef8/connection/stats
    if 'attributes' in gateway:
        for key, value in gateway['attributes'].items():
            point = point.tag(key, value)
    #Need to consider how to handle last_status_received_at not updating but not getting a 'gateway not connected' message yet to mark a site as 'down'
    #Can probably handle this in the query?
    if "connected_at" in gateway_stats:
        #print(gateway_stats)
        point = point.field("status", 1)
        if 'last_status_received_at' in gateway_stats:
            point = point.time(gateway_stats['last_status_received_at'])
        if 'uplink_count' in gateway_stats:
            point = point.field("uplink_count",
                                will_it_float(gateway_stats['uplink_count']))
        if 'downlink_count' in gateway_stats:
            point = point.field("downlink_count",
                                will_it_float(gateway_stats['downlink_count']))
        if 'last_status' in gateway_stats:
            if 'metrics' in gateway_stats['last_status']:
                for key, value in gateway_stats['last_status'][
                        'metrics'].items():
                    point = point.field(key, will_it_float(value))
            #Could use the latest antenna location to automatically update gateway location as its ignored from UDP gateway_stats['last_status']['antenna_locations']['latitude/longitude/altitude']
            #print(gateway_stats)
            if 'antenna_locations' in gateway_stats['last_status']:
                if 'antennas' not in gateway or gateway['antennas'][0][
                        'location']['latitude'] != gateway_stats[
                            'last_status']['antenna_locations'][0][
                                'latitude'] or gateway['antennas'][0][
                                    'location']['longitude'] != gateway_stats[
                                        'last_status']['antenna_locations'][0][
                                            'longitude']:
                    update_gateway(
                        gateway_id,
                        gateway_stats['last_status']['antenna_locations'][0])
    else:
        #Gateway Not Connected
        point = point.field("status", 0)
    print(point.to_line_protocol())
    #flushstdout
    sys.stdout.flush()
示例#28
0
                        port=server_config.get(server, 'port'),
                        username=server_config.get(server, 'username'),
                        password=server_config.get(server, 'password')))

    power_ok = True
    write_api = client.write_api(write_options=SYNCHRONOUS)

    for server_data in ups_data:
        for ups in server_data:
            if ups == False:
                continue
            if ups['ups.status'] != 'OL' or ups['battery.charge'] < 100:
                power_ok = False

            point = Point("ups")
            point.tag("ups_name", ups['name'])
            for entry in ups:
                if isinstance(ups[entry], (float, int)):
                    point.field(entry, ups[entry])

            point.time(datetime.utcnow(), WritePrecision.NS)
            try:
                write_api.write(bucket=bucket, org=org, record=point)
                print(datetime.now().strftime("%d/%m/%Y@%H:%M:%S"), end=':')
                print('data sent for: ' + ups['name'])
            except Exception as e:
                print(datetime.now().strftime("%d/%m/%Y@%H:%M:%S"), end=':')
                print('Could not send data to influx for:' + ups['name'])
                print(e)
                print(influx_url, token, org, bucket)
def extract_transform_load_time_series_json(swift_result, swift_container,
                                            swift_id, process_type):
    """
    Fonction de traitement d'un fichier Json Time Series
    """
    client = InfluxDBClient(url=config.url_influxdb,
                            token=config.token_influxdb)
    write_api = client.write_api(write_options=SYNCHRONOUS)

    time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
    points = []
    for m in json.loads(swift_result):
        topic = m.get("topic")
        subID = m.get("subID")
        unitID = m.get("unitID")
        input_v = m.get("input")
        value_units = m.get("value_units")
        value = m.get("value")
        time = datetime.strptime(m.get("time"), time_format)
        m_points = []

        if "energy" in topic:
            if config.influxdb_measurement == "topic":
                point = Point(topic) \
                    .tag("subID", subID) \
                    .tag("unitID", unitID) \
                    .time(time, WritePrecision.MS)
                if "input" in m:
                    point.tag("input", input_v)
                for i in range(len(value_units)):
                    point.field(value_units[i], value[i])
                m_points.append(point)
            else:
                for i in range(len(value_units)):
                    point = Point(value_units[i]) \
                        .field("value", value[i]) \
                        .tag("topic", topic) \
                        .tag("subID", subID) \
                        .tag("unitID", unitID) \
                        .time(time, WritePrecision.MS)
                    if "input" in m:
                        point.tag("input", input_v)
                    m_points.append(point)
        else:
            if config.influxdb_measurement == "topic":
                point = Point(topic) \
                    .field(value_units, value)
            else:
                point = Point(value_units) \
                    .field("value", value) \
                    .tag("topic", topic)

            point.tag("subID", subID) \
                .tag("unitID", unitID) \
                .time(time, WritePrecision.MS)

            if "input" in m:
                point.tag("input", input_v)

            m_points.append(point)

        history_data(process_type, swift_container, swift_id,
                     "mqtt json to influxdb data points", m,
                     [p.to_line_protocol() for p in m_points])

        points += m_points

    write_api.write(config.bucket_influxdb, config.org_influxdb, points)