コード例 #1
0
    def retrieve_orderedby_time(self, datasources, gp, entitytype, trend_names,
            entities, start, end, limit=None):

        table_names = get_table_names_v4(datasources, gp, entitytype, start, end)

        return retrieve_orderedby_time(self.conn, schema.name, table_names,
                trend_names, entities, start, end, limit)
コード例 #2
0
ファイル: plugin.py プロジェクト: hendrikx-itc/python-minerva
    def retrieve_orderedby_time(self, datasources, gp, entitytype, trend_names,
            entities, start, end, limit=None):

        with closing(self.conn.cursor()) as cursor:
            if isinstance(entitytype, str):
                entitytype = get_entitytype(cursor, entitytype)

            table_names = get_table_names_v4(cursor, datasources, gp, entitytype,
                    start, end)

        return retrieve_orderedby_time(self.conn, schema.name, table_names,
            trend_names, entities, start, end, limit)
コード例 #3
0
ファイル: plugin.py プロジェクト: hendrikx-itc/python-minerva
    def retrieve(self, datasources, gp, entitytype, trend_names, entities,
        start, end, subquery_filter=None, relation_table_name=None, limit=None):

        with closing(self.conn.cursor()) as cursor:
            if isinstance(entitytype, str):
                entitytype = get_entitytype(cursor, entitytype)

            table_names = get_table_names_v4(cursor, datasources, gp, entitytype,
                    start, end)

        return retrieve(self.conn, schema.name, table_names, trend_names, entities,
            start, end, subquery_filter, relation_table_name, limit,
            entitytype=entitytype)
コード例 #4
0
    def retrieve_related(self, datasources, granularity, source_entitytype,
            target_entitytype, trend_names, start, end, subquery_filter=None,
            limit=None):

        table_names = get_table_names_v4(datasources, granularity, target_entitytype,
                start, end)

        if source_entitytype.name == target_entitytype.name:
            relation_table_name = "self"
        else:
            relation_table_name = "{}->{}".format(
                source_entitytype.name, target_entitytype.name)

        return retrieve_related(self.conn, schema.name, relation_table_name,
            table_names, trend_names, start, end, subquery_filter, limit)
コード例 #5
0
    def retrieve(self, trendstores, trend_names, entities,
        start, end, subquery_filter=None, relation_table_name=None, limit=None):

        if isinstance(trendstores, TrendStore):
            trendstores = [trendstores]

        entitytype = trendstores[0].entitytype

        tables = map(partial(Table, "trend"),
                get_table_names_v4(trendstores, start, end))

        with closing(self.conn.cursor()) as cursor:
            return retrieve(cursor, tables, trend_names, entities,
                    start, end, subquery_filter, relation_table_name, limit,
                    entitytype=entitytype)
コード例 #6
0
ファイル: plugin.py プロジェクト: hendrikx-itc/python-minerva
    def retrieve_related(self, datasources, gp, source_entitytype,
        target_entitytype, trend_names, start, end, subquery_filter=None,
            limit=None):

        with closing(self.conn.cursor()) as cursor:
            if isinstance(target_entitytype, str):
                target_entitytype = get_entitytype(cursor, target_entitytype)

            table_names = get_table_names_v4(cursor, datasources, gp,
                    target_entitytype, start, end)

        if source_entitytype.name == target_entitytype.name:
            relation_table_name = "self"
        else:
            relation_table_name = "{}->{}".format(
                source_entitytype.name, target_entitytype.name)

        return retrieve_related(self.conn, schema.name, relation_table_name,
            table_names, trend_names, start, end, subquery_filter, limit)
コード例 #7
0
ファイル: plugin.py プロジェクト: hendrikx-itc/python-minerva
    def last_modified(self, interval, datasource, granularity, entitytype_name,
            subquery_filter=None):
        """
        Return last modified timestamp for specified datasource, granularity,
        entity type and interval
        :param interval: tuple (start, end) with non-naive timestamps,
            specifying interval to check
        :param datasource: datasource object
        :param granularity: granularity in seconds
        :param entitytype_name: name of entity type
        :param subquery_filter: subquery for additional filtering
            by JOINing on field 'id'
        """
        (start, end) = interval

        with closing(self.conn.cursor()) as cursor:
            entitytype = get_entitytype(cursor, entitytype_name)
            table_names = get_table_names_v4(cursor, [datasource], granularity, entitytype,
                    start, end)

        if subquery_filter:
            query = ("SELECT MAX(t.modified) FROM \"{0}\".\"{1}\" AS t "
                "JOIN ({0}) AS filter ON filter.id = t.entity_id "
                "WHERE t.timestamp > %s AND t.timestamp <= %s ")
        else:
            query = ("SELECT MAX(t.modified) FROM \"{0}\".\"{1}\" AS t "
                "WHERE t.timestamp > %s AND t.timestamp <= %s ")

        modifieds = []
        with closing(self.conn.cursor()) as cursor:
            for table_name in table_names:
                try:
                    cursor.execute(query.format(schema.name, table_name), interval)
                    modified, = cursor.fetchone()
                    modifieds.append(modified)
                except (psycopg2.ProgrammingError, psycopg2.InternalError):
                    continue

        if modifieds:
            return max(modifieds)
        else:
            return None
コード例 #8
0
ファイル: plugin.py プロジェクト: hendrikx-itc/python-minerva
    def timestamp_exists(self, datasource, gp, entitytype_name, timestamp):
        """
        Returns True when timestamp occurs for specified data source.
        False otherwise.
        """
        with closing(self.conn.cursor()) as cursor:
            entitytype = get_entitytype(cursor, entitytype_name)

            table_name = get_table_names_v4(cursor, [datasource], gp, entitytype,
                timestamp, timestamp)[0]

        query = (
            "SELECT 1 FROM \"{0}\".\"{1}\" WHERE timestamp = %s "
            "LIMIT 1".format(schema.name, table_name))

        with closing(self.conn.cursor()) as cursor:
            try:
                cursor.execute(query, (timestamp,))
                return bool(cursor.rowcount)
            except (psycopg2.ProgrammingError, psycopg2.InternalError):
                return False
コード例 #9
0
ファイル: plugin.py プロジェクト: hendrikx-itc/python-minerva
    def count(self, datasource, gp, entitytype_name, interval, filter=None):
        """
        Returns row count for specified datasource, gp, entity type and interval
        """
        (start, end) = interval

        with closing(self.conn.cursor()) as cursor:
            entitytype = get_entitytype(cursor, entitytype_name)

            table_names = get_table_names_v4(cursor, [datasource], gp, entitytype,
                    start, end)

        query = (
            "SELECT COUNT(*) FROM \"{0}\".\"{1}\" "
            "WHERE timestamp > %s AND timestamp <= %s ")

        if filter is not None:
            if len(filter) == 0:
                return 0
            else:
                query += "AND entity_id IN ({0}) ".format(
                    ",".join(str(id) for id in filter))

        args = (start, end)

        count = 0

        with closing(self.conn.cursor()) as cursor:
            for table_name in table_names:
                try:
                    cursor.execute(query.format(schema.name, table_name), args)
                    c, = cursor.fetchone()
                    count += c
                except (psycopg2.ProgrammingError, psycopg2.InternalError):
                    continue

        return count
コード例 #10
0
def retrieve_aggregated(conn, datasource, granularity, entitytype,
    column_identifiers, interval, group_by, subquery_filter=None,
    relation_table_name=None):
    """
    Return aggregated data

    :param conn: psycopg2 database connection
    :param datasource: datasource object
    :param granularity: granularity in seconds
    :param entitytype: entitytype object
    :param column_identifiers: e.g. SUM(trend1), MAX(trend2)
    :param interval: (start, end) tuple with non-naive timestamps
    :param group_by: list of columns to GROUP BY
    :param subquery_filter: optional subquery for additional filtering
        by JOINing on field 'id' = entity_id
    :param relation_table_name: optional relation table name for converting
            entity ids to related ones
    """
    start, end = interval

    with closing(conn.cursor()) as cursor:
        source_table_names = get_table_names_v4(cursor, [datasource], granularity,
                entitytype, start, end)

    def get_trend_names(column_identifier):
        if isinstance(column_identifier, Sql):
            return [a.name for a in column_identifier.args]
        else:
            trend_names_part = re.match(".*\(([\w, ]+)\)", column_identifier).group(1)

            return map(str.strip, trend_names_part.split(","))

    trend_names = set(chain(*map(get_trend_names, column_identifiers)))

    #Deal with 'samples' column
    if column_exists(conn, SCHEMA, source_table_names[-1], "samples"):
        select_samples_part = "SUM(samples)"
        select_samples_column = "samples,"
    else:
        select_samples_part = "COUNT(*)"
        select_samples_column = ""

    args = {"start": start, "end": end}

    select_parts = []

    for source_table_name in source_table_names:

        join_parts = []

        return_id_field = "entity_id"

        if subquery_filter:
            join_parts.append(
                "JOIN ({0}) AS filter ON filter.id = \"{1}\".{2}.entity_id".format(
                subquery_filter, SCHEMA, enquote_column_name(source_table_name)))

        if relation_table_name:
            return_id_field = "r.target_id AS entity_id"

            join_parts.append(
                "JOIN relation.\"{0}\" r ON r.source_id = \"{1}\".entity_id".format(
                relation_table_name, source_table_name))

        select_parts.append(
            "SELECT {0}, %(end)s, {1} {2} FROM \"{3}\".\"{4}\" {5}"
            " WHERE timestamp > %(start)s AND timestamp <= %(end)s".format(
                return_id_field,
                select_samples_column,
                ",".join(map(enquote_column_name, trend_names)),
                SCHEMA,
                source_table_name,
                " ".join(join_parts)))

    query = ("SELECT entity_id, %(end)s, {0}, {1} FROM( {2} ) "
        "AS sources GROUP BY {3}").format(
            select_samples_part,
            ",".join(map(quote_ident, column_identifiers)),
            " UNION ALL ".join(select_parts),
            ",".join(map(enquote_column_name, group_by)))

    all_rows = []

    with closing(conn.cursor()) as cursor:
        try:
            cursor.execute(query, args)
        except psycopg2.ProgrammingError:
            logging.debug(cursor.mogrify(query, args))
            conn.rollback()
            # TODO: Check error code
        else:
            all_rows = cursor.fetchall()

    return all_rows