def __init__(self,
              efd_name,
              db_name='efd',
              creds_service='https://roundtable.lsst.codes/segwarides/',
              client=None):
     self.db_name = db_name
     self.auth = NotebookAuth(service_endpoint=creds_service)
     host, schema_registry, port, user, password = self.auth.get_auth(
         efd_name)
     if schema_registry[-1] == '/':
         self.schema_registry = schema_registry[-1] + ':' + port + '/'
     else:
         self.schema_registry = schema_registry + ':' + port
     if client is None:
         health_url = f'https://{host}:{port}/health'
         response = requests.get(health_url)
         if response.status_code != 200:
             raise RuntimeError(
                 f'InfluxDB server, {host}, does not appear ready to '
                 f'recieve queries.  Recieved code:{response.status_code} '
                 'when attempting the health check.')
         self.influx_client = aioinflux.InfluxDBClient(
             host=host,
             port=port,
             ssl=True,
             username=user,
             password=password,
             db=db_name,
             mode='async')  # mode='blocking')
         self.influx_client.output = 'dataframe'
     else:
         self.influx_client = client
     self.query_history = []
Exemple #2
0
def get_client(which_efd):
    efd_url = AVAILABLE_EFDS[which_efd]
    token = get_token(which_efd)
    return aioinflux.InfluxDBClient(host=efd_url,
                                    port=443,
                                    ssl=True,
                                    username=token.uname,
                                    password=token.pwd,
                                    db='efd',
                                    output="dataframe")
Exemple #3
0
    def __init__(self, bot):
        self.bot = bot
        self._logger = bot.log.get_logger('Influx')

        self.config = bot.config['Monitoring']
        self.client = aioinflux.InfluxDBClient(**self.config['influx']) \
            if self.config['enabled'] \
            else None

        bot.loop.create_task(self._startup_check())
async def main(database, serial_port, syslog_port, database_password,
               database_username, database_host):
    async with aioinflux.InfluxDBClient(host=database_host,
                                        db=database,
                                        username=database_username,
                                        password=database_password) as influx:
        await influx.create_database(db=database)
        app = Application(influx,
                          serial_port=serial_port,
                          syslog_port=syslog_port)
        await app.run()
Exemple #5
0
    async def run(self):
        local_queue = []
        last_time = time.monotonic()

        # FIXME: explicitly handle reconnections with queue flushing to avoid
        # infinite in_queue size
        async with aioinflux.InfluxDBClient(**self.influx_options) as client:
            await client.create_database(database=self.influx_db,
                                         **self.influx_options)
            async for s in self.metric_generator():
                point = self._make_influx_point(s)
                if len(local_queue) > self.batch_size or time.monotonic(
                ) - last_time > self.max_flush_interval:
                    logger.info('%s: [%s] writing batch of %d points',
                                self._classname, self.measurement,
                                len(local_queue))

                    await client.write(local_queue)
                    last_time = time.monotonic()
                    local_queue[:] = []
                else:
                    local_queue.append(point)
Exemple #6
0
    async def setup(self, _, loop):
        self.redis = await aioredis.create_redis_pool(
            getattr(self.config, "REDIS_URL", "redis://127.0.0.1"),
            minsize=3,
            loop=loop
        )
        self.db = AsyncIOMotorClient(getattr(self.config, "MONGO_URL", "mongodb://127.0.0.1")).url_wtf
        self.links = LinkDatabase(self)
        await self.links.create_indexes()
        self.session = ClientSession(loop=loop)
        self.influx = aioinflux.InfluxDBClient(
            host=getattr(self.config, "INFLUX_HOST", "127.0.0.1"),
            database="url_wtf",
            loop=loop
        )
        await self.influx.create_database(db="url_wtf")

        caddy_url = getattr(self.config, "CADDY_URL", None)
        if caddy_url is not None:
            with open("caddy.json", "r") as f:
                caddy_config = json.load(f)

            async with self.session.post(f"{caddy_url}/load", json=caddy_config) as resp:
                resp.raise_for_status()
Exemple #7
0
    async def main(self):
        _LOGGER.info('Loaded module %s', self.__class__.__name__)

        influx = aioinflux.InfluxDBClient(host=self.server,
                                          port=self.port,
                                          database=self.database,
                                          username=self.username,
                                          password=self.password)

        while True:
            if not self.queue:
                await asyncio.sleep(0.1)
                continue
            # Get the oldest item from the list
            item = self.queue[0]
            time, han = item

            # Generate message
            measure = [{"measurement": "amsReadout",
                        "tags": {"user": "******"},
                        "time": int(time.strftime("%s")),
                        "fields": {"watt": han.data["act_pow_pos"]},
                        }]

            try:
                await influx.write(measure)
            except aiohttp.client_exceptions.ClientConnectorError as E:
                _LOGGER.error('Unable to communicate with %s: %s', self.server, E)
                await asyncio.sleep(10)
                continue
            except Exception as E:
                _LOGGER.error(E)
                _LOGGER.error(traceback.format_exc())
                await asyncio.sleep(10)
                continue

            fields = dict(han.data)
            ts = fields["timestamp"]
            del fields["timestamp"]
            if "datetime" in fields:
                del fields["datetime"]
            if "curr_l1" in fields:
                fields["curr_l1"] = float(fields["curr_l1"]) / 1000
            if "curr_l2" in fields:
                fields["curr_l2"] = float(fields["curr_l2"]) / 1000
            if "curr_l3" in fields:
                fields["curr_l3"] = float(fields["curr_l3"]) / 1000
            if "volt_l1" in fields:
                fields["volt_l1"] = float(fields["volt_l1"]) / 10
            if "volt_l2" in fields:
                fields["volt_l2"] = float(fields["volt_l2"]) / 10
            if "volt_l3" in fields:
                fields["volt_l3"] = float(fields["volt_l3"]) / 10
            if "act_pow_neg" in fields:
                fields["act_pow_neg"] = float(fields["act_pow_neg"] / 1000)
            if "act_pow_pos" in fields:
                fields["act_pow_pos"] = float(fields["act_pow_pos"] / 1000)
            if "react_pow_neg" in fields:
                fields["react_pow_neg"] = float(
                    fields["react_pow_neg"] / 1000)
            if "react_pow_pos" in fields:
                fields["react_pow_pos"] = float(
                    fields["react_pow_pos"] / 1000)
            if "act_energy_pa" in fields:
                fields["act_energy_pa"] = float(
                    fields["act_energy_pa"] / 1000)
            m = [{"measurement": "AMS",
                  "tags": {"user": "******"},
                  "fields": fields,
                  }]

            try:
                await influx.write(m)
            except aiohttp.client_exceptions.ClientConnectorError as E:
                _LOGGER.error(
                    'Unable to communicate with %s: %s', self.server, E)
                await asyncio.sleep(10)
                continue
            except Exception as E:
                _LOGGER.error(E)
                _LOGGER.error(traceback.format_exc())
                await asyncio.sleep(10)
                continue

            if self.queue[0] == item:
                # We are finished processing
                # Remove element from queue if the buffer has not overflown
                self.queue.popleft()
            else:
                _LOGGER.info('Current item has been overflown from the buffer')