예제 #1
0
class InfluxQuery:

    client = InfluxDBClient.from_env_properties()
    query_api = client.query_api()

    def get(
        self,
        days_back: int,
        experiment: str = 'seaside-kitchen-fridge-2',
    ) -> pd.DataFrame:
        dfs: List[pd.DataFrame] = list()
        for day_back in range(days_back):
            query_str = (
                f'from(bucket: "{BUCKET}")'
                f'  |> range(start: -{day_back+1}d, stop: -{day_back}d)'
                 '  |> filter(fn: (r) => r["_measurement"] == "sensorReadings")'
                 '  |> filter(fn: (r) => r["_field"] == "reed_switch_is_open_int"'
                 ' or r["_field"] == "tmp117_temperature"'
                 ' or r["_field"] == "bme280_temperature")'
                f'  |> filter(fn: (r) => r["experiment"] == "{experiment}")'
                 '  |> aggregateWindow(every: 1s, fn: last, createEmpty: false)'
                 '  |> yield(name: "last")'
            )
            df = self.query_api.query_data_frame(query_str)
            dfs.extend(df)
        return pd.concat(dfs)
예제 #2
0
파일: submitter.py 프로젝트: jonkerj/smapy
    def __attrs_post_init__(self):
        self.log = logging.getLogger('smapy.submitter')

        self.influx_client = InfluxDBClient.from_env_properties()
        self.influx_write_api = self.influx_client.write_api(
            write_options=SYNCHRONOUS)
        self.sma.login()
예제 #3
0
def send_data(data_table, lake_prefix, bucket="lakeinfo/autogen", lake_temp=None):
    """Writes data to influxdb client in env properties."""
    client = InfluxDBClient.from_env_properties()
    # client = InfluxDBClient(url=getenv("INFLUXDB_V2_URL"), org=getenv(
    #     "INFLUXDB_V2_ORG"), token=getenv("INFLUXDB_V2_TOKEN"))
    write_api = client.write_api(write_options=SYNCHRONOUS)

    last_point = data_table[-1]
    print(last_point)
    points = [

        Point("{}_level".format(lake_prefix)).tag("units", "ft").field("value", last_point['lake_level']).field(
            "valueNum", float(last_point['lake_level'])),  # .time(last_point['timestamp']),
        Point("{}_turbine_release".format(lake_prefix)).tag("units", "cfps").field(
            "valueNum", last_point['turbine_release_cfs']).field("value", float(last_point['turbine_release_cfs'])),  # .time(last_point['timestamp']),
        Point("{}_spillway_release".format(lake_prefix)).tag("units", "cfps").field(
            "valueNum", last_point['spillway_release_cfs']).field("value", float(last_point['spillway_release_cfs'])),  # .time(last_point['timestamp']),
        Point("{}_total_release".format(lake_prefix)).tag("units", "cfps").field(
            "valueNum", last_point['total_release_cfs']).field("value", float(last_point['total_release_cfs']))  # .time(last_point['timestamp']),
    ]

    if lake_temp:
        points.append(Point("{}_temperature".format(lake_prefix)).tag("units", "ºF").field(
            "valueNum", lake_temp).field("value", lake_temp))

    for i in points:
        write_api.write(bucket, 'patrickjmcd', i)
        print("Wrote {}".format(i._name))
예제 #4
0
def main(gpio, room, org, bucket):
    while True:
        client = InfluxDBClient.from_env_properties()
        write_api = client.write_api(write_options=SYNCHRONOUS)
        hum, temp = Adafruit_DHT.read_retry(SENSOR, gpio)
        if temp is not None:
            p = Point("temp").tag("room", room).field("degrees_c", temp).time(datetime.utcnow())
            logging.info("Writing %s", p.to_line_protocol())
            write_api.write(bucket, org, p)
        if hum is not None:
            p = Point("humid").tag("room", room).field("perc_rh", hum).time(datetime.utcnow())
            logging.info("Writing %s", p.to_line_protocol())
            write_api.write(bucket, org, p)
        write_api.close()

        time.sleep(INTERVAL)
예제 #5
0
    def insert(self):
        client = InfluxDBClient.from_env_properties()
        write_api = client.write_api(write_options=SYNCHRONOUS)
        try:
            print(self.bucket)
            # write_precision padrão é nanosegundos, setado para segundo
            write_api.write(self.bucket,
                            client.org,
                            self.data,
                            write_precision=WritePrecision.S)
        except ApiException as e:
            print(e)
            return {"message": "An error ocurred in the server-side"}, 500
        write_api.__del__()
        client.__del__()

        return {"message": "Created"}, 201
예제 #6
0
def do_it():
    endpoint = f"http://{GATEWAY_IP}/cgi-bin/post_manager"
    influx_client = InfluxDBClient.from_env_properties()
    influx_write_api = influx_client.write_api(write_options=SYNCHRONOUS)
    s = requests.Session()
    s.auth = (USERNAME, PASSWORD)
    meter_id = get_smartmeter_id(s, endpoint)

    while True:
        instantaneous_demand = get_instantaneous_demand(s, endpoint, meter_id)
        utc_dt = datetime.datetime.utcnow()
        influx_data = build_influx_measurements(
            instantaneous_demand=instantaneous_demand, utc_dt=utc_dt)
        logging.info("submitting stats to Influx")
        logging.info(influx_data)
        influx_write_api.write(INFLUX_BUCKET, record=influx_data)
        logging.info(f"sleeping for {CHECK_INTERVAL}")
        sleep(CHECK_INTERVAL)
def main(args: Dict[str, str]):
    print("args: " + str(args.keys()))
    bucket = args.pop("bucket")
    url = args.pop("vm_addr")

    for k, v in args.items():
        if v is not None:
            os.environ[k] = v
        print(f"Using {k}={os.getenv(k)}")

    client = InfluxDBClient.from_env_properties()

    query_api = client.query_api()  # use synchronous to see errors

    # Get all unique series by reading first entry of every table.
    # With latest InfluxDB we could possibly use "schema.measurements()" but this doesn't exist in 2.0
    first_in_series = f"""
    from(bucket: "{bucket}")
    |> range(start: 0, stop: now())
    |> first()"""
    timeseries: List[pd.DataFrame] = query_api.query_data_frame(
        first_in_series)

    # get all unique measurement-field pairs and then fetch and export them one-by-one.
    # With really large databases the results should be possibly split further
    # Something like query_data_frame_stream() might be then useful.
    measurements_and_fields = [
        gr[0] for df in timeseries
        for gr in df.groupby(["_measurement", "_field"])
    ]
    print(f"Found {len(measurements_and_fields)} unique time series")
    for meas, field in measurements_and_fields:
        print(f"Exporting {meas}_{field}")
        whole_series = f"""
        from(bucket: "{bucket}")
        |> range(start: 0, stop: now())
        |> filter(fn: (r) => r["_measurement"] == "{meas}")
        |> filter(fn: (r) => r["_field"] == "{field}")
        """
        df = query_api.query_data_frame(whole_series)

        line = get_influxdb_lines(df)
        # "db" is added as an extra tag for the value.
        requests.post(f"{url}/write?db={bucket}", data=line)
예제 #8
0
def do_it():
    influx_client = InfluxDBClient.from_env_properties()
    influx_write_api = influx_client.write_api(write_options=SYNCHRONOUS)
    s = create_session(user_agent=USERAGENT)
    tz = pytz.timezone("Australia/Melbourne")
    now_dt = pytz.utc.localize(
        datetime.datetime.utcnow(), is_dst=None
    ).astimezone(tz)

    for i in range(1, BACKFILL_DAYS + 1):
        backfill_dt = now_dt - datetime.timedelta(days=i)
        print(f"Backfilling today - {i} ({backfill_dt})")
        do_login(JEMENA_USERNAME, JEMENA_PASSWORD, s, BASE_URL)
        periodic_data = get_periodic_data(s, BASE_URL, i)
        influx_data = build_influx_measurements(
            tz, periodic_data, backfill_dt, now_dt.replace(tzinfo=None)
        )

        logging.info("submitting stats to Influx")
        influx_write_api.write(INFLUX_BUCKET, record=influx_data)
예제 #9
0
def send_results(results):
    """
    Formats the payload to send to InfluxDB
    :rtype: None
    """
    result_dict = results.dict()
    pt = Point("speed_test_results")
    pt.field('download', result_dict['download'])
    pt.field('upload', result_dict['upload'])
    pt.field('ping', result_dict['server']['latency'])
    pt.tag('server', result_dict['server']['id'])
    pt.tag('server_name', result_dict['server']['name'])
    pt.tag('server_country', result_dict['server']['country'])

    if getenv("INFLUXDB_V2_URL"):
        client = InfluxDBClient.from_env_properties()
        write_api = client.write_api(write_options=SYNCHRONOUS)
        if write_api.write("speedtests/autogen", 'patrickjmcd', pt):
            logging.debug('Data written to InfluxDB')
        else:
            logging.error("Data not written to influxdb")
예제 #10
0
    def test_write_context_manager(self):

        with InfluxDBClient.from_env_properties(self.debug) as self.client:
            api_client = self.client.api_client
            with self.client.write_api(write_options=WriteOptions(
                    write_type=WriteType.batching)) as write_api:
                write_api_test = write_api
                write_api.write(bucket="my-bucket",
                                record=Point("h2o_feet").tag(
                                    "location", "coyote_creek").field(
                                        "level water_level", 5.0))
                self.assertIsNotNone(write_api._subject)
                self.assertIsNotNone(write_api._disposable)

            self.assertIsNone(write_api_test._subject)
            self.assertIsNone(write_api_test._disposable)
            self.assertIsNotNone(self.client.api_client)
            self.assertIsNotNone(
                self.client.api_client.rest_client.pool_manager)

        self.assertIsNone(api_client._pool)
        self.assertIsNone(self.client.api_client)
예제 #11
0
DATA_COLLECTION_INTERVAL = int(environ.get("DATA_COLLECTION_INTERVAL", 30))

INVERTER_ENDPOINT = environ["INVERTER_ENDPOINT"]

INFLUXDB_BUCKET = environ.get("INFLUXDB_BUCKET", "fronius")

IGNORE_SUN_DOWN = environ.get("IGNORE_SUN_DOWN", "false") == "true"

LOCATION_CITY = environ["LOCATION_CITY"]
LOCATION_REGION = environ["LOCATION_REGION"]
LOCATION_TIMEZONE = environ["LOCATION_TIMEZONE"]
LOCATION_LAT = float(environ["LOCATION_LAT"])
LOCATION_LNG = float(environ["LOCATION_LNG"])

client = InfluxDBClient.from_env_properties()

location_info = LocationInfo(LOCATION_CITY, LOCATION_REGION, LOCATION_TIMEZONE,
                             LOCATION_LAT, LOCATION_LNG)
tz = pytz.timezone(LOCATION_TIMEZONE)
print(f"Location: {location_info}")
print(f"Timezone: {tz}")
endpoints = []

if COLLECT_MINMAX_INVERTER_DATA:
    print("Collecting MinMax Inverter Data")
    endpoints.append(
        f"http://{INVERTER_ENDPOINT}/solar_api/v1/GetInverterRealtimeData.cgi?Scope=Device&DataCollection=MinMaxInverterData&DeviceId=1",
    )

if COLLECT_3P_INVERTER_DATA:
예제 #12
0
    def test_default_tags_from_env(self):
        self.client.close()
        self.client = InfluxDBClient.from_env_properties(self.debug)

        self._write_point()
예제 #13
0
    def test_connection_option_from_env(self):
        self.client.close()
        self.client = InfluxDBClient.from_env_properties(self.debug)

        self.assertEqual("http://localhost:9999", self.client.url)
        self._check_connection_settings()
예제 #14
0
 def __init__(self):
     self.client = InfluxDBClient.from_env_properties()
     self.query_api = self.client.query_api()
예제 #15
0
    def test_init_from_env_ssl_default(self):
        if os.getenv("INFLUXDB_V2_VERIFY_SSL"):
            del os.environ["INFLUXDB_V2_VERIFY_SSL"]
        self.client = InfluxDBClient.from_env_properties()

        self.assertTrue(self.client.api_client.configuration.verify_ssl)
예제 #16
0
    def test_init_from_env_ssl_ca_cert(self):
        os.environ["INFLUXDB_V2_SSL_CA_CERT"] = "/my/custom/path/to/cert"
        self.client = InfluxDBClient.from_env_properties()

        self.assertEqual("/my/custom/path/to/cert",
                         self.client.api_client.configuration.ssl_ca_cert)
예제 #17
0
    def test_init_from_env_ssl_ca_cert_default(self):
        if os.getenv("INFLUXDB_V2_SSL_CA_CERT"):
            del os.environ["INFLUXDB_V2_SSL_CA_CERT"]
        self.client = InfluxDBClient.from_env_properties()

        self.assertIsNone(self.client.api_client.configuration.ssl_ca_cert)
예제 #18
0
def do_it():
    influx_client = InfluxDBClient.from_env_properties()
    influx_write_api = influx_client.write_api(write_options=SYNCHRONOUS)
    s = create_session(user_agent=USERAGENT)
    tz = pytz.timezone("Australia/Melbourne")

    while True:
        do_login(JEMENA_USERNAME, JEMENA_PASSWORD, s, BASE_URL)

        periodic_data = get_periodic_data(s, BASE_URL)
        latest_interval = get_latest_interval(periodic_data)
        # 2021-04-20:17
        # the day is divided into half-hours, so 17 = 8:30 am

        # don't do this if within 3 hours of latest data?
        trigger_latest_data_fetch_response = trigger_latest_data_fetch(
            latest_interval, s, BASE_URL)
        i = 0

        if trigger_latest_data_fetch_response.polling:
            logging.info("Waiting for backend to update...")
            params = {"lastKnownInterval": latest_interval}
            while True:
                # https://electricityoutlook.jemena.com.au/electricityView/isElectricityDataUpdated?lastKnownInterval=2021-04-20:17
                # n.b. data for 2021-04-20:24 (midday) was only available after 12:30
                #
                logging.info("polling...")
                res = s.get(
                    f"{BASE_URL}/electricityView/isElectricityDataUpdated",
                    params=params,
                )
                res.raise_for_status()
                if "true" in res.text:
                    periodic_data = get_periodic_data(s, BASE_URL)
                    latest_interval = get_latest_interval(periodic_data)
                    break

                if i == 9:
                    logging.info("Unable to retrieve any new data!")
                    break

                i += 1
                sleep(3)

            # reget the data, e.g.
            # https://electricityoutlook.jemena.com.au/electricityView/period/day/0?_=1618885953690

        else:
            logging.info("Had latest data")

        # 2021-04-20:17
        half_hour_sections = int(latest_interval.split(":")[-1])

        measurement_base_dt = pytz.utc.localize(datetime.datetime.utcnow(),
                                                is_dst=None).astimezone(tz)

        threshold_dt = (datetime.datetime(
            day=measurement_base_dt.day,
            month=measurement_base_dt.month,
            year=measurement_base_dt.year,
            hour=0,
            minute=0,
            second=0,
        ) + datetime.timedelta(minutes=(half_hour_sections * 30)))

        influx_data = build_influx_measurements(tz, periodic_data,
                                                measurement_base_dt,
                                                threshold_dt)

        logging.info("submitting stats to Influx")
        logging.info(influx_data)
        influx_write_api.write(INFLUX_BUCKET, record=influx_data)
        logging.info(f"sleeping for {CHECK_INTERVAL}")
        sleep(CHECK_INTERVAL)
예제 #19
0
    def test_init_from_env_ssl(self):
        os.environ["INFLUXDB_V2_VERIFY_SSL"] = "False"
        self.client = InfluxDBClient.from_env_properties()

        self.assertFalse(self.client.api_client.configuration.verify_ssl)
예제 #20
0
 def from_env_properties():
     client = InfluxDBClient.from_env_properties()
     return TimeseriesClient(client=client)
예제 #21
0
    def test_init_from_env_connection_pool_maxsize(self):
        os.environ["INFLUXDB_V2_CONNECTION_POOL_MAXSIZE"] = "29"
        self.client = InfluxDBClient.from_env_properties()

        self.assertEqual(
            29, self.client.api_client.configuration.connection_pool_maxsize)
예제 #22
0
파일: db.py 프로젝트: Willmish/Envidrawer
 def __init__(self):
     self.client = InfluxDBClient.from_env_properties()
     self.write_api = self.client.write_api(write_options=SYNCHRONOUS)
     self.bucket_name = getenv("DB_BUCKET")