def quote_ident(ident):
    if isinstance(ident, str):
        return enquote_column_name(ident)
    elif hasattr(ident, "__iter__"):
        return ".".join('"{}"'.format(part) for part in ident)
    elif isinstance(ident, Sql):
        return ident.render()
    else:
        raise Exception("invalid identifier '{}'".format(ident))
Example #2
0
def retrieve_aggregated(conn, datasource, granularity, entitytype,
    column_identifiers, interval, group_by, subquery_filter=None,
    relation_table_name=None):
    """
    Return aggregated data

    :param conn: psycopg2 database connection
    :param datasource: datasource object
    :param granularity: granularity in seconds
    :param entitytype: entitytype object
    :param column_identifiers: e.g. SUM(trend1), MAX(trend2)
    :param interval: (start, end) tuple with non-naive timestamps
    :param group_by: list of columns to GROUP BY
    :param subquery_filter: optional subquery for additional filtering
        by JOINing on field 'id' = entity_id
    :param relation_table_name: optional relation table name for converting
            entity ids to related ones
    """
    start, end = interval

    with closing(conn.cursor()) as cursor:
        source_table_names = get_table_names_v4(cursor, [datasource], granularity,
                entitytype, start, end)

    def get_trend_names(column_identifier):
        if isinstance(column_identifier, Sql):
            return [a.name for a in column_identifier.args]
        else:
            trend_names_part = re.match(".*\(([\w, ]+)\)", column_identifier).group(1)

            return map(str.strip, trend_names_part.split(","))

    trend_names = set(chain(*map(get_trend_names, column_identifiers)))

    #Deal with 'samples' column
    if column_exists(conn, SCHEMA, source_table_names[-1], "samples"):
        select_samples_part = "SUM(samples)"
        select_samples_column = "samples,"
    else:
        select_samples_part = "COUNT(*)"
        select_samples_column = ""

    args = {"start": start, "end": end}

    select_parts = []

    for source_table_name in source_table_names:

        join_parts = []

        return_id_field = "entity_id"

        if subquery_filter:
            join_parts.append(
                "JOIN ({0}) AS filter ON filter.id = \"{1}\".{2}.entity_id".format(
                subquery_filter, SCHEMA, enquote_column_name(source_table_name)))

        if relation_table_name:
            return_id_field = "r.target_id AS entity_id"

            join_parts.append(
                "JOIN relation.\"{0}\" r ON r.source_id = \"{1}\".entity_id".format(
                relation_table_name, source_table_name))

        select_parts.append(
            "SELECT {0}, %(end)s, {1} {2} FROM \"{3}\".\"{4}\" {5}"
            " WHERE timestamp > %(start)s AND timestamp <= %(end)s".format(
                return_id_field,
                select_samples_column,
                ",".join(map(enquote_column_name, trend_names)),
                SCHEMA,
                source_table_name,
                " ".join(join_parts)))

    query = ("SELECT entity_id, %(end)s, {0}, {1} FROM( {2} ) "
        "AS sources GROUP BY {3}").format(
            select_samples_part,
            ",".join(map(quote_ident, column_identifiers)),
            " UNION ALL ".join(select_parts),
            ",".join(map(enquote_column_name, group_by)))

    all_rows = []

    with closing(conn.cursor()) as cursor:
        try:
            cursor.execute(query, args)
        except psycopg2.ProgrammingError:
            logging.debug(cursor.mogrify(query, args))
            conn.rollback()
            # TODO: Check error code
        else:
            all_rows = cursor.fetchall()

    return all_rows