Exemplo n.º 1
0
 async def delete_credits_left_measurements(self) -> Dict:
     deleted_from_list = {"deleted_from": [], "not_deleted_from": []}
     show_measurements_template = "show measurements"
     credits_history = config["CREDITS_HISTORY_DB"]
     all_measurements = await self.query(show_measurements_template,
                                         db=credits_history)
     get_time_template = "SELECT first(credits_used) FROM {measurement} " \
                         "where credits_used >= 0"
     delete_template = "DELETE FROM {measurement} where time < {time}"
     for measurement_point in iterpoints(
             all_measurements,
             lambda *x, meta: dict(zip(meta['columns'], x))):
         try:
             measurement_name = measurement_point["name"]
             time = await self.query(
                 get_time_template.format(measurement=measurement_name),
                 db=credits_history)
             for time_point in iterpoints(
                     time, lambda *x, meta: dict(zip(meta['columns'], x))):
                 await self.query(
                     delete_template.format(measurement=measurement_name,
                                            time=time_point["time"]))
                 deleted_from_list["deleted_from"].append(measurement_name)
         except (ValueError, KeyError, Exception) as e:
             influxdb_logger.exception(e)
             deleted_from_list["not_deleted_from"].append(measurement_point)
     return deleted_from_list
Exemplo n.º 2
0
    async def delete_mb_and_vcpu_measurements(self, project_name_to_delete,
                                              since_date) -> Dict:
        project_info_mb_template = f"SELECT LAST(value)," \
                                     f"project_name, location_id " \
                                     f"FROM project_mb_usage " \
                                     f"where project_name='{project_name_to_delete}';"
        project_info_vcpu_template = f"SELECT LAST(value)," \
                                     f"project_name, location_id " \
                                     f"FROM project_vcpu_usage " \
                                     f"where project_name='{project_name_to_delete}';"
        mb_info = await self.query(project_info_mb_template)
        vcpu_info = await self.query(project_info_vcpu_template)
        returned_timestamps = {}
        for i in iterpoints(mb_info,
                            lambda *x, meta: dict(zip(meta['columns'], x))):
            returned_timestamps["project_name"] = i["project_name"]
            returned_timestamps["location_id"] = i["location_id"]
        for i in iterpoints(vcpu_info,
                            lambda *x, meta: dict(zip(meta['columns'], x))):
            returned_timestamps["project_name"] = i["project_name"]
            returned_timestamps["location_id"] = i["location_id"]
        if not returned_timestamps:
            return returned_timestamps

        delete_mb_template = f"delete from project_mb_usage " \
                             f"where project_name='{project_name_to_delete}' " \
                             f"and time > {since_date}000000000;"
        delete_vcpu_template = f"delete from project_vcpu_usage " \
                               f"where project_name='{project_name_to_delete}' " \
                               f"and time > {since_date}000000000;"
        await self.query(delete_mb_template)
        await self.query(delete_vcpu_template)

        last_mb_timestamp_template = f"SELECT LAST(value), time, " \
                                     f"project_name, location_id " \
                                     f"FROM project_mb_usage " \
                                     f"where project_name='{project_name_to_delete}';"
        last_vcpu_timestamp_template = f"SELECT LAST(value), time, " \
                                       f"project_name, location_id " \
                                       f"FROM project_vcpu_usage " \
                                       f"where project_name='{project_name_to_delete}';"
        last_mb = await self.query(last_mb_timestamp_template)
        last_vcpu = await self.query(last_vcpu_timestamp_template)
        for i in iterpoints(last_mb,
                            lambda *x, meta: dict(zip(meta['columns'], x))):
            returned_timestamps["last_mb"] = {}
            returned_timestamps["last_mb"]["time"] = i["time"] / 1e9
        for i in iterpoints(last_vcpu,
                            lambda *x, meta: dict(zip(meta['columns'], x))):
            returned_timestamps["last_vcpu"] = {}
            returned_timestamps["last_vcpu"]["time"] = i["time"] / 1e9

        return returned_timestamps
Exemplo n.º 3
0
async def get_device_data(query_data: QueryDataDeviceId,
                          response: Response,
                          user: str = Security(get_current_user, scopes=["all"])
                          ) -> Union[List, Dict]:
    owner_tokens = await get_owner_tokens(user=user)
    if owner_tokens is None:
        response.status_code = status.HTTP_400_BAD_REQUEST
        return {"msg": "User doesn't have owner tokens"}

    if query_data.ownerToken not in owner_tokens:
        response.status_code = status.HTTP_403_FORBIDDEN
        return {"msg": "Owner Token provided is not granted to user"}

    # Search query for InfluxDB where it's searching for device_id
    search_query = "select {},device_id from {} where time < '{}' and time > '{}' and device_id = '{}'".format(
        query_data.readingType, query_data.ownerToken,
        query_data.times.finishTime.isoformat("T")+"Z",
        query_data.times.startTime.isoformat("T")+"Z", query_data.deviceId)

    logger.info(search_query)

    query_list = list()
    client = get_database()
    r = await client.query(search_query)
    for i in iterpoints(r):
        query_list.append(i)
    return query_list
Exemplo n.º 4
0
async def test_iterpoints_with_parser(iter_client):
    r = await iter_client.query("SELECT * FROM cpu_load LIMIT 3")
    parser = lambda *x, meta: dict(zip(meta['columns'], x))  # noqa
    for i in iterpoints(r, parser):
        logger.info(i)
        assert 'time' in i
        assert 'value' in i
        assert 'host' in i
Exemplo n.º 5
0
async def test_empty_chunked_query(async_client):
    resp = await async_client.select_all(measurement='fake',
                                         chunked=True,
                                         chunk_size=10)
    points = []
    async for chunk in resp:
        for point in iterpoints(chunk):
            points.append(point)
    assert len(points) == 0
Exemplo n.º 6
0
async def test_empty_chunked_query(client):
    resp = await client.query('SELECT * FROM fake',
                              chunked=True,
                              chunk_size=10)
    points = []
    async for chunk in resp:
        for point in iterpoints(chunk):
            points.append(point)
    assert len(points) == 0
Exemplo n.º 7
0
async def test_aiter_point(iter_client):
    resp = await iter_client.query('SELECT * from cpu_load',
                                   chunked=True,
                                   chunk_size=10)
    points = []
    async for chunk in resp:
        for point in iterpoints(chunk):
            points.append(point)
    assert len(points) == 100
Exemplo n.º 8
0
async def test_aiter_point(iter_client):
    resp = await iter_client.select_all(measurement='cpu_load',
                                        chunked=True,
                                        chunk_size=10)
    points = []
    async for chunk in resp:
        for point in iterpoints(chunk):
            points.append(point)
    assert len(points) == 100
Exemplo n.º 9
0
def test_iter_multi_series():
    # See https://github.com/gusutabopb/aioinflux/issues/29
    d = {
        'results': [{
            'series': [{
                'columns':
                ['time', 'free', 'total', 'used', 'percent', 'path'],
                'name':
                'win_disk',
                'tags': {
                    'instance': 'C:'
                },
                'values':
                [[1577419571000000000, 94, 238, 144, 60.49140930175781, 'C:']]
            }, {
                'columns':
                ['time', 'free', 'total', 'used', 'percent', 'path'],
                'name':
                'win_disk',
                'tags': {
                    'instance': 'D:'
                },
                'values': [[
                    1577419571000000000, 1727, 1863, 136, 7.3103790283203125,
                    'D:'
                ]]
            }, {
                'columns':
                ['time', 'free', 'total', 'used', 'percent', 'path'],
                'name':
                'win_disk',
                'tags': {
                    'instance': 'HarddiskVolume1'
                },
                'values': [[
                    1577419330000000000, 0, 0, 0, 29.292930603027344,
                    'HarddiskVolume1'
                ]]
            }, {
                'columns':
                ['time', 'free', 'total', 'used', 'percent', 'path'],
                'name':
                'win_disk',
                'tags': {
                    'instance': '_Total'
                },
                'values': [[
                    1577419571000000000, 1821, 2101, 280, 13.345237731933594,
                    '_Total'
                ]]
            }],
            'statement_id':
            0
        }]
    }
    assert len(list(iterpoints(d))) == 4
Exemplo n.º 10
0
async def test_chunked_query(async_client):
    resp = await async_client.select_all(measurement='test_measurement',
                                         chunked=True,
                                         chunk_size=10,
                                         wrap=False)
    points = []
    async for chunk in resp:
        for point in iterpoints(chunk):
            points.append(point)
    assert len(points) == 100
Exemplo n.º 11
0
async def test_iter_point_namedtuple(iter_client):
    from collections import namedtuple
    nt = namedtuple('cpu_load',
                    ['time', 'direction', 'host', 'region', 'value'])

    resp = await iter_client.query('SELECT * from cpu_load')
    points = []
    for point in iterpoints(resp, parser=nt):
        points.append(point)
        assert len(point) == 5
    assert len(points) == 100
Exemplo n.º 12
0
async def test_influx_read_write(influx_client):
    point2 = _TestPoint(
        measurement="test_influx_read_write",
        tag1="test_influx_read_write",
        field1="test",
        timestamp=datetime.now(),
    )
    await influx_client.write(point2)
    result = await influx_client.query("SELECT * FROM test_influx_read_write")
    parsed_point = list(iterpoints(result, _TestPoint.from_iterpoint))[0]
    assert parsed_point == point2
Exemplo n.º 13
0
    async def query_points(
        self,
        measurement: str,
        point_class: Type[PT],
        db: str,
        query_constraints: Optional[List[str]] = None,
    ) -> AsyncGenerator[PT, None]:
        """Asynchronously yields all queried points which in turn are streamed in chunks
        from the InfluxDB, where the points are sorted by their timestamp descending.

        :param measurement: Which table to run the query against.
        :param point_class: Subclass of ``InfluxDBPoint`` whose ``from_iterpoint``
            method will be used to deserialize the returned points.
        :param db: Which database to run the query against.
        :param query_constraints: WHERE-constraints to add to the query, will be AND-ed
            if more than one is given.
        :return: Instances of ``point_class`` ordered by their timestamp descending.
        """
        query_template = """\
        SELECT *
        FROM {measurement}
        {constraints}
        ORDER BY time DESC
        """
        constraints = ""
        if query_constraints and len(query_constraints) > 0:
            constraints = f"WHERE {' AND '.join(query_constraints)}"
        query = query_template.format(constraints=constraints,
                                      measurement=measurement)
        influxdb_logger.debug(
            "Sending query `%s` to InfluxDB",
            shorten(query.replace("\n", ""), len(query)),
        )
        result = await self.query(query, chunked=True, db=db)
        try:
            # If an error occurs it is raised here due to ``chunked=True``
            async for chunk in result:
                for point in iterpoints(chunk, point_class.from_iterpoint):
                    yield point
        except _InfluxDBError as e:
            influxdb_logger.exception("Exception when querying InfluxDB")
            raise InfluxDBError(*e.args)
Exemplo n.º 14
0
async def test_bool_serializer(influx_client):
    """Separate test since it is the only default serializer currently not in use
    """

    @dataclass(frozen=True)
    class BoolTest(InfluxDBPoint):
        t: bool
        f: bool = field(metadata={"tag": True})

    influx_line = b"bool_test,t=T f=FALSE 1553342599293000000"

    test1 = BoolTest.from_lineprotocol(influx_line)
    test2 = BoolTest(
        measurement="bool_test",
        timestamp=datetime(2019, 3, 23, 13, 3, 19, 293000),
        f=False,
        t=True,
    )
    assert test1 == test2
    await influx_client.write(test2)
    result = await influx_client.query("SELECT * FROM bool_test")
    parsed_point = list(iterpoints(result, BoolTest.from_iterpoint))[0]
    assert parsed_point == test2