コード例 #1
0
 def test_metric_fields_by_date_with_filter(
         self, preloaded_datastore: SQLMetricStore):
     system_info = Metadata.system_info()
     timestamp = preloaded_datastore.base_timestamp
     oldest = timestamp - datetime.timedelta(seconds=10)
     items = preloaded_datastore.metric_fields_by_date(
         metric_name="TestMetric",
         oldest=oldest,
         metadata_filter={'platform': system_info.values['platform']})
     all_items = preloaded_datastore.metric_fields_by_date(
         metric_name="TestMetric",
         oldest=oldest - datetime.timedelta(days=100))
     items_key0 = list(items.metric_data.keys())[0]
     assert len(items.metric_data[items_key0]) > 0
     assert len(items.metric_data[items_key0]) < 100
     assert len(all_items.timestamps) == 100
     sorted_items = all_items.metric_data[items_key0][
         -len(items.metric_data[items_key0]):]
     for index, item in enumerate(sorted_items):
         assert item in items.metric_data[items_key0]
     for index in range(1, len(all_items.timestamps)):
         assert all_items.timestamps[index -
                                     1] < all_items.timestamps[index]
     items = preloaded_datastore.metric_fields_by_date(
         metric_name="TestMetric",
         oldest=oldest,
         metadata_filter={'platform': 'no_such_platform'})
     assert items.timestamps == items.metadata
     assert items.metric_data == {}
コード例 #2
0
 def test_metric_fields_by_volume_with_filter(
         self, preloaded_datastore: SQLMetricStore):
     system_info = Metadata.system_info()
     items = preloaded_datastore.metric_fields_by_volume(
         metric_name="TestMetric", count=5)
     for metadata in items.metadata:
         assert metadata.values['platform'] == system_info.values[
             'platform']
     items = preloaded_datastore.metric_fields_by_volume(
         metric_name="TestMetric",
         count=5,
         metadata_filter={'platform': system_info.values['platform']})
     all_items = preloaded_datastore.metric_fields_by_volume(
         metric_name="TestMetric", count=200)
     all_items_key0 = list(all_items.metric_data.keys())[0]
     assert len(all_items.metric_data[all_items_key0]) == 100
     assert len(items.metadata) == 5
     sorted_items = all_items.metric_data[all_items_key0][-5:]
     for index, item in enumerate(items.metric_data[all_items_key0]):
         assert item in sorted_items
     for index in range(1, len(items.timestamps)):
         assert items.timestamps[index - 1] < items.timestamps[index]
     items = preloaded_datastore.metric_fields_by_volume(
         metric_name="TestMetric",
         count=5,
         metadata_filter={'platform': 'fail'})
     assert items.timestamps == items.metadata == []
     assert items.metric_data == {}
コード例 #3
0
 def test_metrics_data_by_date_with_filter(
         self, preloaded_datastore: SQLMetricStore):
     system_info = Metadata.system_info()
     timestamp = preloaded_datastore.base_timestamp
     oldest = timestamp - datetime.timedelta(seconds=10)
     items = preloaded_datastore.dataclass_metrics_by_date(
         name="TestMetric",
         typ=TestMetricData,
         oldest=oldest,
         metadata_filter={
             'platform': system_info.values['platform'],
             'system': system_info.values['system']
         })
     all_items = preloaded_datastore.dataclass_metrics_by_date(
         name="TestMetric",
         typ=TestMetricData,
         oldest=oldest - datetime.timedelta(days=100))
     assert len(items.metric_data) > 0
     assert len(all_items.timestamps) == 100
     sorted_items = all_items.metric_data[-len(items.metric_data):]
     for index, item in enumerate(sorted_items):
         assert item in sorted_items
     for index in range(1, len(all_items.timestamps)):
         assert all_items.timestamps[index -
                                     1] < all_items.timestamps[index]
     items = preloaded_datastore.dataclass_metrics_by_date(
         name="TestMetric",
         typ=TestMetricData,
         oldest=oldest,
         metadata_filter={'platform': 'fail'})
     assert items.timestamps == items.metric_data == items.metadata == []
コード例 #4
0
        def execute(self) -> QueryResult[Dict[str, List[float]]]:
            self._statement = self._statement.order_by(
                desc(SQLCompositeMetric.timestamp))
            if self._max_count:
                query = self._session.query(SQLCompositeMetric.id).order_by(desc(SQLCompositeMetric.timestamp)).\
                    limit(self._max_count)
                self._statement = self._statement.filter(
                    SQLCompositeMetric.id.in_([r.id for r in query.all()
                                               ]))  # MySQL forces the .all()
            sql_result: List[SQLCompositeMetric] = self._statement.all()
            result: QueryResult[Dict[str, List[float]]] = QueryResult()
            result.metric_data = {}  # correction on default type/value
            by_id = OrderedDict()
            for name, value, timestamp, uuid, group_id in reversed(sql_result):
                alias = aliased(SQLMetadata, name='metadata_field_' + name)
                if group_id not in by_id:
                    query = self._session.query(SQLMetadataSet).filter(
                        SQLMetadataSet.uuid == uuid)
                    for metadata_name, (metadata_value,
                                        op) in self._metadata_filter.items():
                        if op == MetricStore.Comparison.EQUAL:
                            query = query.filter(alias.name == metadata_name,
                                                 alias.value == metadata_value)
                        elif op == MetricStore.Comparison.NOT_EQUAL:
                            query = query.filter(alias.name == metadata_name,
                                                 alias.value != metadata_value)
                        elif op == MetricStore.Comparison.LESS_THAN:
                            query = query.filter(alias.name == metadata_name,
                                                 alias.value < metadata_value)
                        elif op == MetricStore.Comparison.GREATER_THAN:
                            query = query.filter(alias.name == metadata_name,
                                                 alias.value > metadata_value)
                        elif op == MetricStore.Comparison.LESS_THAN_OR_EQUAL:
                            query = query.filter(alias.name == metadata_name,
                                                 alias.value <= metadata_value)
                        elif op == MetricStore.Comparison.GREATER_THAN_OR_EQUAL:
                            query = query.filter(alias.name == metadata_name,
                                                 alias.value >= metadata_value)
                        else:
                            raise ValueError(f"Invalid operations: {op}")
                    try:
                        sql_metadata = query.one()
                        metadata = Metadata({})
                        for r in sql_metadata.data:
                            metadata.values[r.name] = r.value
                        by_id[group_id] = timestamp, metadata, {name: value}
                    except NoResultFound:
                        pass
                else:
                    by_id[group_id][2][name] = value
            for (timestamp, metadata, metrics_table) in by_id.values():
                result.timestamps.append(timestamp)
                result.metadata.append(metadata)
                for name, value in metrics_table.items():
                    result.metric_data.setdefault(name, [])
                    result.metric_data[name].append(value)

            return result
コード例 #5
0
 def test_metric_subfields_by_volume(self,
                                     preloaded_datastore: SQLMetricStore):
     system_info = Metadata.system_info()
     items = preloaded_datastore.metric_fields_by_volume(
         metric_name="TestMetric",
         count=5,
         fields=['%grandchild%'],
         metadata_filter={'platform': system_info.values['platform']})
     for key in items.metric_data.keys():
         assert 'grandchild' in key
     items_key0 = list(items.metric_data.keys())[0]
     assert len(items.metric_data[items_key0]) == 5
     assert len(items.metric_data[items_key0]) == len(
         items.metadata) == len(items.timestamps)
     for index in range(1, len(items.timestamps)):
         assert items.timestamps[index - 1] < items.timestamps[index]
コード例 #6
0
ファイル: conftest.py プロジェクト: nak/daktylos
def preloaded_datastore(request):
    engine = sqlalchemy.create_engine(request.param)
    metadata = Metadata.system_info()
    timestamp = datetime.datetime.utcnow()
    # import daktylos.data_stores.sql as sql
    # sql.Base.metadata.drop_all(engine)
    if 'redshift' in request.param:
        from daktylos.data_stores.sql_crippled import (
            SQLMetricStore,
            SQLCompositeMetric,
            SQLMetadataSet,
            SQLMetadata,
            SQLMetric,
        )
    else:
        from daktylos.data_stores.sql import (
            SQLMetricStore,
            SQLCompositeMetric,
            SQLMetadataSet,
            SQLMetadata,
            SQLMetric,
        )
    with SQLMetricStore(engine=engine, create=True) as datastore:
        datastore.SQLCompositeMetric = SQLCompositeMetric
        datastore.SQLMetadataSet = SQLMetadataSet
        datastore.SQLMetadata = SQLMetadata
        datastore.SQLMetric = SQLMetric
        clear_store(datastore, request.param)
        try:
            index = 0
            for metric in data_generator():
                datastore.post(metric,
                               timestamp - datetime.timedelta(seconds=index),
                               metadata=metadata)
                index += 1
            datastore.commit()
            assert datastore._session.query(SQLCompositeMetric).count() == 100
            datastore.base_timestamp = timestamp
            datastore.commit()
            yield datastore
        finally:
            with suppress(Exception):
                clear_store(datastore, request.param)
コード例 #7
0
 def execute(self) -> QueryResult[MetricDataClassT]:
     # we order timestamps for query in descending order to filter out "the top" which are the newest items
     self._statement = self._statement.order_by(
         desc(SQLCompositeMetric.timestamp))
     if self._max_count:
         self._statement = self._statement.limit(self._max_count)
     sql_result: List[SQLCompositeMetric] = self._statement.all()
     result: QueryResult[MetricDataClassT] = QueryResult()
     for item in reversed(
             sql_result
     ):  # order timestamps from oldest to newest when returning to client
         flattened: Dict[str, float] = {}
         for child in item.children:
             flattened[child.name] = child.value
         metadata = Metadata({
             data.name: data.value
             for data in item.metrics_metadata.data
         })
         result.metadata.append(metadata)
         result.timestamps.append(item.timestamp)
         result.metric_data.append(
             CompositeMetric.from_flattened(flattened).to_dataclass(
                 self._type))
     return result
コード例 #8
0
import datetime
from dataclasses import dataclass
from typing import Optional

import pytest

from daktylos.data import CompositeMetric, Metric, Metadata, MetricDataClass
from daktylos.data_stores.sql import SQLMetricStore

metadata = Metadata.system_info()


@dataclass
class SubMetricData:
    grandchild1: float
    grandchild2: Optional[float] = -1.0


@dataclass
class TestMetricData:
    child1: int
    child2: SubMetricData
    child3: SubMetricData


class TestSQLMetricStore:
    def test_purge_by_date(self, preloaded_datastore: SQLMetricStore):
        SQLCompositeMetric = preloaded_datastore.SQLCompositeMetric
        SQLMetadataSet = preloaded_datastore.SQLMetadataSet
        SQLMetadata = preloaded_datastore.SQLMetadata
        preloaded_datastore.purge_by_date(before=datetime.datetime.utcnow() -