Ejemplo n.º 1
0
class QualList(MetricGroupDef):
    """
    Datasource used for the Qual table.
    """
    name = "query_quals"
    xaxis = "relname"
    axis_type = "category"
    data_url = r"/metrics/database/([^\/]+)/query/(\d+)/quals"
    filter_ratio = MetricDef(label="Avg filter_ratio (excluding index)", type="percent")
    execution_count = MetricDef(label="Execution count (excluding index)")

    def prepare(self):
        if not self.has_extension("pg_qualstats"):
            raise HTTPError(501, "PG qualstats is not installed")

    @property
    def query(self):
        base = qualstat_getstatdata()
        c = inner_cc(base)
        return (base.where(c.queryid == bindparam("query")))

    def post_process(self, data, database, query, **kwargs):
        conn = self.connect(database=database)
        data["data"] = resolve_quals(conn, data["data"])
        for qual in data["data"]:
            qual.url = self.reverse_url('QualOverview', database, query,
                                        qual.qualid)
        return data
Ejemplo n.º 2
0
class GlobalDatabasesMetricGroup(MetricGroupDef):
    """
    Metric group used by summarized graphs.
    """
    name = "all_databases"
    data_url = r"/metrics/databases_globals/"
    avg_runtime = MetricDef(label="Avg runtime", type="duration")
    load = MetricDef(label="Runtime per sec", type="duration")
    total_blks_hit = MetricDef(label="Total hit", type="sizerate")
    total_blks_read = MetricDef(label="Total read", type="sizerate")

    @property
    def query(self):
        bs = block_size.c.block_size
        query = powa_getstatdata_sample("db")
        query = query.alias()
        c = query.c
        return (select([
                extract("epoch", c.ts).label("ts"),
                (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
                (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval),1)).label("load"),
                total_read(c),
                total_hit(c)])
            .where(c.calls != None)
            .group_by(c.ts, bs, c.mesure_interval)
            .order_by(c.ts)
            .params(samples=100))
Ejemplo n.º 3
0
class OverviewMetricGroup(MetricGroupDef):
    """
    Metric group used by the "all servers" grid
    """
    name = "all_servers"
    xaxis = "srvid"
    axis_type = "category"
    data_url = r"/server/all_servers/"
    hostname = MetricDef(label="Hostname", type="text")
    port = MetricDef(label="Port", type="text")

    @property
    def query(self):

        sql = text("""SELECT id AS srvid,
                CASE WHEN id = 0 THEN
                   '<local>'
                ELSE
                   COALESCE(alias, hostname || ':' || port)
                END AS alias,
                CASE WHEN id = 0 THEN :host ELSE hostname END as hostname,
                CASE WHEN id = 0 THEN :port ELSE port END AS port
                FROM powa_servers""")

        sql = sql.params(host=self.current_host, port=self.current_port)
        return sql

    def process(self, val, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("ServerOverview", val["srvid"])
        return val
Ejemplo n.º 4
0
class DatabaseOverviewMetricGroup(MetricGroupDef):
    """Metric group for the database global graphs."""
    name = "database_overview"
    xaxis = "ts"
    data_url = r"/metrics/database_overview/([^\/]+)/"
    avg_runtime = MetricDef(label="Avg runtime", type="duration")
    load = MetricDef(label="Runtime per sec", type="duration")
    total_blks_hit = MetricDef(label="Total hit", type="sizerate")
    total_blks_read = MetricDef(label="Total read", type="sizerate")

    @property
    def query(self):
        # Fetch the base query for sample, and filter them on the database
        bs = block_size.c.block_size
        subquery = powa_getstatdata_sample("db")
        # Put the where clause inside the subquery
        subquery = subquery.where(column("datname") == bindparam("database"))
        query = subquery.alias()
        c = query.c
        return (select([
            to_epoch(c.ts),
            (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"),
            (sum(c.runtime) /
             greatest(extract("epoch", c.mesure_interval), 1)).label("load"),
            total_read(c),
            total_hit(c)
        ]).where(c.calls != None).group_by(
            c.ts, bs, c.mesure_interval).order_by(c.ts).params(samples=100))
Ejemplo n.º 5
0
class PgSettingsMetricGroup(MetricGroupDef):
    """
    Metric group for the pg_settings grid.
    """

    name = "pg_settings"
    xaxis = "setting_name"
    data_url = r"/config/pg_settings/"
    axis_type = "category"
    setting_value = MetricDef(label="Value", type="string")
    setting_unit = MetricDef(label="Unit", type="string")
    query = """
Ejemplo n.º 6
0
class ConfigChangesQuery(MetricGroupDef):
    name = "Config Changes"
    data_url = r"/server/(\d+)/database/([^\/]+)/query/(-?\d+)/config_changes"
    xaxis = "ts"
    axis_type = "category"
    setting = MetricDef(label="Name", type="string")
    previous = MetricDef(label="Previous", type="string")
    new = MetricDef(label="New", type="string")
    params = ["server", "database", "query"]

    @property
    def query(self):
        return get_pgts_query(self, True)
Ejemplo n.º 7
0
class PgExtensionsMetricGroup(MetricGroupDef):
    """
    Metric group for the pg_settings grid.
    """

    name = "pg_extensions"
    xaxis = "extname"
    data_url = r"/config/pg_extensions/"
    axis_type = "category"
    available = MetricDef(label="Extension available", type="bool")
    installed = MetricDef(label="Extension installed", type="bool")
    handled = MetricDef(label="Extension handled", type="bool")
    query = """
Ejemplo n.º 8
0
class PgSettingsMetricGroup(MetricGroupDef):
    """
    Metric group for the pg_settings grid.
    """

    name = "pg_settings"
    xaxis = "setting_name"
    data_url = r"/config/(\d+)/pg_settings/"
    axis_type = "category"
    setting_value = MetricDef(label="Value", type="string")
    setting_unit = MetricDef(label="Unit", type="string")
    category_value = MetricDef(label="Category", type="string")
    __query = """
             SELECT name as setting_name, setting as setting_value,
             COALESCE(unit,'') AS setting_unit, category as category_value
             FROM pg_settings
             --WHERE name like 'powa%%'
             ORDER BY name"""
    params = ["server"]

    @property
    def query(self):
        if (self.path_args[0] == '0'):
            return self.__query
        else:
            # we'll get the data on the foreign server in post_process
            return None

    def post_process(self, data, server, **kwargs):
        if (server != '0'):
            values = None
            try:
                values = self.execute(self.__query, srvid=server)
            except Exception:
                # ignore any connection or remote execution error
                pass

            if (values is not None):
                data = {"data": [self.process(val) for val in values]}
            else:
                data = {
                    "data": [],
                    "messages": {
                        'alert': [
                            "Could not retrieve PostgreSQL" + " settings " +
                            "on remote server"
                        ]
                    }
                }

        return data
Ejemplo n.º 9
0
class WaitSamplingList(MetricGroupDef):
    """
    Datasource used for the wait events grid.
    """
    name = "query_wait_events"
    xaxis = "event"
    axis_type = "category"
    data_url = r"/server/(\d+)/metrics/database/([^\/]+)/query/(-?\d+)/wait_events"
    counts = MetricDef(label="# of events",
                       type="integer",
                       direction="descending")

    def prepare(self):
        if not self.has_extension(self.path_args[0], "pg_wait_sampling"):
            raise HTTPError(501, "pg_wait_sampling is not installed")

    @property
    def query(self):
        # Working from the waitdata detailed_db base query
        inner_query = powa_getwaitdata_detailed_db(bindparam("server"))
        inner_query = inner_query.alias()
        c = inner_query.c
        ps = powa_statements

        columns = [
            c.queryid, ps.c.query, c.event_type, c.event,
            sum(c.count).label("counts")
        ]
        from_clause = inner_query.join(ps, (ps.c.queryid == c.queryid) &
                                       (ps.c.dbid == c.dbid))
        return (select(columns).select_from(
            from_clause).where((c.datname == bindparam("database"))
                               & (c.queryid == bindparam("query"))).group_by(
                                   c.queryid, ps.c.query, c.event_type,
                                   c.event).order_by(sum(c.count).desc()))
Ejemplo n.º 10
0
class ByDatabaseWaitSamplingMetricGroup(MetricGroupDef):
    """
    Metric group used by the "wait sampling by database" grid
    """
    name = "wait_sampling_by_database"
    xaxis = "datname"
    data_url = r"/metrics/wait_event_by_databases/"
    axis_type = "category"
    counts = MetricDef(label="# of events",
                       type="integer",
                       direction="descending")

    @property
    def query(self):
        inner_query = powa_getwaitdata_db().alias()
        c = inner_query.c
        from_clause = inner_query.join(powa_databases,
                                       c.dbid == powa_databases.c.oid)

        return (select([
            powa_databases.c.datname,
            c.event_type,
            c.event,
            sum(c.count).label("counts"),
        ]).select_from(from_clause).order_by(sum(c.count).desc()).group_by(
            powa_databases.c.datname, c.event_type, c.event))

    def process(self, val, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("DatabaseOverview", val["datname"])
        return val
Ejemplo n.º 11
0
class OtherQueriesMetricGroup(MetricGroupDef):
    """Metric group showing other queries for this qual."""
    name = "other_queries"
    xaxis = "queryid"
    axis_type = "category"
    data_url = r"/server/(\d+)/metrics/database/([^\/]+)/query/(-?\d+)/qual/(\d+)/other_queries"
    query_str = MetricDef(label="Query", type="query", url_attr="url")

    @property
    def query(self):
        return text("""
            SELECT distinct queryid, query,
            query as query_str, pd.srvid
            FROM powa_qualstats_quals pqs
            JOIN powa_statements USING (queryid, dbid, srvid, userid)
            JOIN powa_databases pd ON pd.oid = pqs.dbid AND pd.srvid =
            pqs.srvid
            WHERE qualid = :qual
                AND pqs.queryid != :query
                AND pd.srvid = :server
                AND pd.datname = :database""")

    def process(self, val, database=None, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("QueryOverview", val["srvid"], database,
                                      val["queryid"])
        return val
Ejemplo n.º 12
0
class Detail(MetricGroupDef):
    calls = MetricDef(label="#Calls", type="string")
    runtime = MetricDef(label="Runtime", type="duration")
    avg_runtime = MetricDef(label="Avg runtime", type="duration")
    shared_blks_read = MetricDef(label="Blocks read", type="size")
    shared_blks_hit = MetricDef(label="Blocks hit", type="size")
    shared_blks_dirtied = MetricDef(label="Blocks dirtied", type="size")
    shared_blks_written = MetricDef(label="Blocks written", type="size")
    temp_blks_written = MetricDef(label="Temp Blocks written", type="size")
Ejemplo n.º 13
0
class ByDatabaseMetricGroup(MetricGroupDef):
    """
    Metric group used by the "by database" grid
    """
    name = "by_database"
    xaxis = "datname"
    data_url = r"/server/(\d+)/metrics/by_databases/"
    axis_type = "category"
    calls = MetricDef(label="#Calls", type="number", direction="descending")
    runtime = MetricDef(label="Runtime", type="duration")
    avg_runtime = MetricDef(label="Avg runtime", type="duration")
    shared_blks_read = MetricDef(label="Blocks read", type="size")
    shared_blks_hit = MetricDef(label="Blocks hit", type="size")
    shared_blks_dirtied = MetricDef(label="Blocks dirtied", type="size")
    shared_blks_written = MetricDef(label="Blocks written", type="size")
    temp_blks_written = MetricDef(label="Temp Blocks written", type="size")
    io_time = MetricDef(label="I/O time", type="duration")
    params = ["server"]

    @property
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db(bindparam("server")).alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            and_(c.dbid == powa_databases.c.oid,
                 c.srvid == powa_databases.c.srvid))

        return (select([
            powa_databases.c.srvid, powa_databases.c.datname,
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            round(
                cast(sum(c.runtime), Numeric) / greatest(sum(c.calls), 1),
                2).label("avg_runtime"),
            mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
            mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
            mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
            mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
            mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
            round(cast(sum(c.blk_read_time + c.blk_write_time), Numeric),
                  2).label("io_time")
        ]).select_from(from_clause).order_by(sum(c.calls).desc()).group_by(
            powa_databases.c.srvid, powa_databases.c.datname, bs))

    def process(self, val, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("DatabaseOverview", val["srvid"],
                                      val["datname"])
        return val
Ejemplo n.º 14
0
class QualConstantsMetricGroup(MetricGroupDef):
    """
    Metric group used for the qual charts.
    """
    name = "QualConstants"
    data_url = r"/server/(\d+)/metrics/database/([^\/]+)/query/(-?\d+)/qual/(\d+)/constants"
    xaxis = "rownumber"
    occurences = MetricDef(label="<%=group%>")
    grouper = "constants"

    @property
    def query(self):
        query = (qual_constants(bindparam("server"),
                                "most_used",
                                bindparam("from"),
                                bindparam("to"),
                                text("""
            datname = :database AND
            s.queryid = :query AND
            qn.qualid = :qual AND
            coalesce_range && tstzrange(:from, :to)"""),
                                top=10))
        base = qualstat_getstatdata(bindparam("server"))
        c = inner_cc(base)
        base = base.where(c.queryid == bindparam("query")).alias()
        totals = (
            base.select().where((c.qualid == bindparam("qual"))
                                & (c.queryid == bindparam("query")))).alias()
        return (query.alias().select().column(
            totals.c.occurences.label('total_occurences')).correlate(query))

    def add_params(self, params):
        params['queryids'] = [int(params['query'])]
        return params

    def post_process(self, data, server, database, query, qual, **kwargs):
        if not data['data']:
            return data
        max_rownumber = 0
        total_top10 = 0
        total = None
        d = {'total_occurences': 0}
        for d in data['data']:
            max_rownumber = max(max_rownumber, d['rownumber'])
            total_top10 += d['occurences']
        else:
            total = d['total_occurences']
        data['data'].append({
            'occurences': total - total_top10,
            'rownumber': max_rownumber + 1,
            'constants': 'Others'
        })
        return data
Ejemplo n.º 15
0
class QualConstantsMetricGroup(MetricGroupDef):
    """
    Metric group used for the qual charts.
    """
    name = "QualConstants"
    data_url = r"/metrics/database/(\w+)/query/(\w+)/qual/(\w+)/constants"
    xaxis = "rownumber"
    count = MetricDef(label="<%=group%>")
    grouper = "constants"

    @property
    def query(self):
        query = (qual_constants("most_executed",
                                text("""
            datname = :database AND
            s.queryid = :query AND
            qn.qualid = :qual AND
            coalesce_range && tstzrange(:from, :to)"""),
                                top=10))
        base = qualstat_getstatdata()
        c = inner_cc(base)
        base = base.where(c.queryid == bindparam("query")).alias()
        totals = (
            base.select().where((c.qualid == bindparam("qual"))
                                & (c.queryid == bindparam("query")))).alias()
        return (query.alias().select().column(
            totals.c.count.label('total_count')).column(
                base.c.queryid).correlate(query))

    def post_process(self, data, database, query, qual, **kwargs):
        if not data['data']:
            return data
        conn = self.connect(database=database)
        max_rownumber = 0
        total_top10 = 0
        total = None
        d = {'total_count': 0}
        for d in data['data']:
            max_rownumber = max(max_rownumber, d['rownumber'])
            total_top10 += d['count']
        else:
            total = d['total_count']
        data['data'].append({
            'count': total - total_top10,
            'rownumber': max_rownumber + 1,
            'constants': 'Others'
        })
        return data
Ejemplo n.º 16
0
class ByQueryWaitSamplingMetricGroup(MetricGroupDef):
    """
    Metric group for indivual query wait events stats (displayed on the grid).
    """
    name = "all_queries_waits"
    xaxis = "query"
    axis_type = "category"
    data_url = r"/server/(\d+)/metrics/database_all_queries_waits/([^\/]+)/"
    counts = MetricDef(label="# of events", type="number",
                       direction="descending")

    @property
    def query(self):
        # Working from the waitdata detailed_db base query
        inner_query = powa_getwaitdata_detailed_db(bindparam("server"))
        inner_query = inner_query.alias()
        c = inner_query.c
        ps = powa_statements

        columns = [c.srvid,
                   c.queryid,
                   ps.c.query,
                   c.event_type,
                   c.event,
                   sum(c.count).label("counts")]
        from_clause = inner_query.join(ps,
                                       (ps.c.queryid == c.queryid) &
                                       (ps.c.dbid == c.dbid) &
                                       (ps.c.srvid == c.srvid))
        return (select(columns)
                .select_from(from_clause)
                .where(c.datname == bindparam("database"))
                .group_by(c.srvid, c.queryid, ps.c.query, c.event_type, c.event)
                .order_by(sum(c.count).desc()))

    def process(self, val, database=None, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url(
            "QueryOverview", val["srvid"], database, val["queryid"])
        return val
Ejemplo n.º 17
0
class DatabaseWaitOverviewMetricGroup(MetricGroupDef):
    """Metric group for the database global wait events graphs."""
    name = "database_waits_overview"
    xaxis = "ts"
    data_url = r"/server/(\d+)/metrics/database_waits_overview/([^\/]+)/"
    # pg 9.6 only metrics
    count_lwlocknamed = MetricDef(label="Lightweight Named",
                                  desc="Number of named lightweight lock"
                                  " wait events")
    count_lwlocktranche = MetricDef(label="Lightweight Tranche",
                                    desc="Number of lightweight lock tranche"
                                    " wait events")
    # pg 10+ metrics
    count_lwlock = MetricDef(label="Lightweight Lock",
                             desc="Number of wait events due to lightweight"
                             " locks")
    count_lock = MetricDef(label="Lock",
                           desc="Number of wait events due to heavyweight"
                           " locks")
    count_bufferpin = MetricDef(label="Buffer pin",
                                desc="Number of wait events due to buffer pin")
    count_activity = MetricDef(label="Activity",
                               desc="Number of wait events due to postgres"
                               " internal processes activity")
    count_client = MetricDef(label="Client",
                             desc="Number of wait events due to client"
                             " activity")
    count_extension = MetricDef(label="Extension",
                                desc="Number wait events due to third-party"
                                " extensions")
    count_ipc = MetricDef(label="IPC",
                          desc="Number of wait events due to inter-process"
                          "communication")
    count_timeout = MetricDef(label="Timeout",
                              desc="Number of wait events due to timeouts")
    count_io = MetricDef(label="IO",
                         desc="Number of wait events due to IO operations")

    def prepare(self):
        if not self.has_extension(self.path_args[0], "pg_wait_sampling"):
            raise HTTPError(501, "pg_wait_sampling is not installed")

    @property
    def query(self):
        query = powa_getwaitdata_sample(bindparam("server"), "db")
        query = query.where(column("datname") == bindparam("database"))
        query = query.alias()
        c = query.c

        def wps(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (col / ts).label(col.name)

        cols = [to_epoch(c.ts)]

        pg_version_num = self.get_pg_version_num(self.path_args[0])
        if pg_version_num < 100000:
            cols += [
                wps(c.count_lwlocknamed),
                wps(c.count_lwlocktranche),
                wps(c.count_lock),
                wps(c.count_bufferpin)
            ]
        else:
            cols += [
                wps(c.count_lwlock),
                wps(c.count_lock),
                wps(c.count_bufferpin),
                wps(c.count_activity),
                wps(c.count_client),
                wps(c.count_extension),
                wps(c.count_ipc),
                wps(c.count_timeout),
                wps(c.count_io)
            ]

        from_clause = query

        return (select(cols).select_from(from_clause)
                #.where(c.count != None)
                .order_by(c.ts).params(samples=100))
Ejemplo n.º 18
0
class QueryOverviewMetricGroup(MetricGroupDef):
    """
    Metric Group for the graphs on the by query page.
    """
    name = "query_overview"
    xaxis = "ts"
    data_url = r"/metrics/database/([^\/]+)/query/(\d+)"
    rows = MetricDef(label="#Rows")
    calls = MetricDef(label="#Calls")
    shared_blks_read = MetricDef(label="Shared read", type="sizerate")
    shared_blks_hit = MetricDef(label="Shared hit", type="sizerate")
    shared_blks_dirtied = MetricDef(label="Shared dirtied", type="sizerate")
    shared_blks_written = MetricDef(label="Shared written", type="sizerate")
    local_blks_read = MetricDef(label="Local read", type="sizerate")
    local_blks_hit = MetricDef(label="Local hit", type="sizerate")
    local_blks_dirtied = MetricDef(label="Local dirtied", type="sizerate")
    local_blks_written = MetricDef(label="Local written", type="sizerate")
    temp_blks_read = MetricDef(label="Temp read", type="sizerate")
    temp_blks_written = MetricDef(label="Temp written", type="sizerate")
    blk_read_time = MetricDef(label="Read time", type="duration")
    blk_write_time = MetricDef(label="Write time", type="duration")
    avg_runtime = MetricDef(label="Avg runtime", type="duration")
    reads = MetricDef(label="Physical read", type="sizerate")
    writes = MetricDef(label="Physical writes", type="sizerate")
    user_time = MetricDef(label="CPU user time / Query time", type="percent")
    system_time = MetricDef(label="CPU system time / Query time", type="percent",
                            )
    other_time = MetricDef(label="CPU other time / Query time", type="percent")
    hit_ratio = MetricDef(label="Shared buffers hit ratio", type="percent")
    miss_ratio = MetricDef(label="Shared buffers miss ratio", type="percent")
    sys_hit_ratio = MetricDef(label="System cache hit ratio", type="percent")
    disk_hit_ratio = MetricDef(label="Disk hit ratio", type="percent")

    @classmethod
    def _get_metrics(cls, handler, **params):
        base = cls.metrics.copy()
        if not handler.has_extension("pg_stat_kcache"):
            for key in ("reads", "writes", "user_time", "system_time",
                        "other_time",
                        "sys_hit_ratio", "disk_hit_ratio"):
                base.pop(key)
        else:
            base.pop("miss_ratio")

        return base

    @property
    def query(self):
        query = powa_getstatdata_sample("query")
        query = query.where(
            (column("datname") == bindparam("database")) &
            (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c
        total_blocks = ((c.shared_blks_read + c.shared_blks_hit)
                        .label("total_blocks"))

        def bps(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (mulblock(col) / ts).label(col.name)
        cols = [to_epoch(c.ts),
                c.rows,
                c.calls,
                case([(total_blocks == 0, 0)],
                     else_=cast(c.shared_blks_hit, Numeric) * 100 /
                     total_blocks).label("hit_ratio"),
                bps(c.shared_blks_read),
                bps(c.shared_blks_hit),
                bps(c.shared_blks_dirtied),
                bps(c.shared_blks_written),
                bps(c.local_blks_read),
                bps(c.local_blks_hit),
                bps(c.local_blks_dirtied),
                bps(c.local_blks_written),
                bps(c.temp_blks_read),
                bps(c.temp_blks_written),
                c.blk_read_time,
                c.blk_write_time,
                (c.runtime / greatest(c.calls, 1)).label("avg_runtime")]

        from_clause = query
        if self.has_extension("pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            # and detailed hit ratio.
            kcache_query = kcache_getstatdata_sample()
            kc = inner_cc(kcache_query)
            kcache_query = (
                kcache_query
                .where(kc.queryid == bindparam("query"))
                .alias())
            kc = kcache_query.c
            sys_hits = (greatest(mulblock(c.shared_blks_read) -
                                 kc.reads, 0)
                        .label("kcache_hitblocks"))
            sys_hitratio = (cast(sys_hits, Numeric) * 100 /
                            mulblock(total_blocks))
            disk_hit_ratio = (kc.reads /
                              mulblock(total_blocks))
            total_time = greatest(c.runtime, 1);
            # Rusage can return values > real time due to sampling bias
            # aligned to kernel ticks. As such, we have to clamp values to 100%
            total_time_percent = lambda x: least(100, (x * 100) /
                                                 total_time)
            cols.extend([
                kc.reads,
                kc.writes,
                total_time_percent(kc.user_time * 1000).label("user_time"),
                total_time_percent(kc.system_time * 1000).label("system_time"),
                greatest(total_time_percent(
                    c.runtime - (kc.user_time + kc.system_time) *
                    1000), 0).label("other_time"),
                case([(total_blocks == 0, 0)],
                     else_=disk_hit_ratio).label("disk_hit_ratio"),
                case([(total_blocks == 0, 0)],
                     else_=sys_hitratio).label("sys_hit_ratio")])
            from_clause = from_clause.join(
                kcache_query,
                kcache_query.c.ts == c.ts)
        else:
            cols.extend([
                case([(total_blocks == 0, 0)],
                     else_=cast(c.shared_blks_read, Numeric) * 100 /
                     total_blocks).label("miss_ratio")
            ])

        return (select(cols)
                .select_from(from_clause)
                .where(c.calls != None)
                .order_by(c.ts)
                .params(samples=100))
Ejemplo n.º 19
0
class Totals(MetricGroupDef):
    avg_runtime = MetricDef(label="Total runtime", type="duration")
    total_blks_hit = MetricDef(label="Total hit", type="sizerate")
    total_blks_read = MetricDef(label="Total read", type="sizerate")
Ejemplo n.º 20
0
class PowaServersMetricGroup(MetricGroupDef):
    """
    Metric group for the servers list grid.
    """

    name = "powa_servers"
    xaxis = "server_alias"
    data_url = r"/config/powa_servers/"
    axis_type = "category"
    hostname = MetricDef(label="Hostname", type="string")
    port = MetricDef(label="Port", type="string")
    username = MetricDef(label="User name", type="string")
    password = MetricDef(label="Password", type="string")
    dbname = MetricDef(label="Database name", type="string")
    frequency = MetricDef(label="Frequency", type="string")
    retention = MetricDef(label="Retention", type="string")
    powa_coalesce = MetricDef(label="Powa coalesce", type="string")
    allow_ui_connection = MetricDef(label="Allow UI connection", type="bool")
    snapts = MetricDef(label="Last snapshot", type="string")
    no_err = MetricDef(label="Error", type="bool")
    collector_status = MetricDef(label="Collector Status", type="string")

    query = """SELECT id,
     CASE WHEN s.id = 0 THEN
        '<local>'
     ELSE
        COALESCE(alias,
          s.username || '@' || s.hostname || ':' || s.port || '/' || s.dbname)
     END AS server_alias,
     s.hostname, s.port, s.username,
     CASE WHEN s.password IS NULL THEN '<NULL>' ELSE '********' END AS password,
     s.dbname, s.frequency, s.retention::text AS retention,
     s.powa_coalesce::text AS powa_coalesce, s.allow_ui_connection,
     CASE WHEN coalesce(m.snapts, '-infinity') = '-infinity'::timestamptz THEN
        NULL
     ELSE
        clock_timestamp() - m.snapts
     END::text AS last_snapshot,
     CASE WHEN coalesce(m.snapts, '-infinity') = '-infinity'::timestamptz THEN
        NULL
     ELSE
        m.snapts
     END AS snapts,
     errors IS NULL AS no_err,
     CASE WHEN s.id = 0 THEN
       coalesce(a.val, 'stopped')
     ELSE
       'unknown'
     END AS collector_status
     FROM powa_servers s
     LEFT JOIN powa_snapshot_metas m ON s.id = m.srvid
     LEFT JOIN (SELECT
        CASE WHEN current_setting('powa.frequency') = '-1' THEN 'disabled'
            ELSE 'running'
        END AS val, application_name
       FROM pg_stat_activity
     ) a ON a.application_name LIKE 'PoWA - %%'
     ORDER BY 2"""

    def process(self, val, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("RemoteConfigOverview", val["id"])
        return val

    def post_process(self, data, **kwargs):
        if (len(data["data"])):
            raw = self.notify_collector('WORKERS_STATUS', timeout=1)
            if (not raw):
                return data

            line = None
            # get the first correct response only, if multiple answers were
            # returned
            while (line is None and len(raw) > 0):
                tmp = raw.pop(0)
                if ("OK" in tmp):
                    line = tmp["OK"]

            # nothing correct, give up
            if (line is None or line == {}):
                return data

            stats = json.loads(line)

            for row in data["data"]:
                srvid = str(row["id"])
                if srvid in stats:
                    row["collector_status"] = stats[srvid]

        return data
Ejemplo n.º 21
0
class QueryOverviewMetricGroup(MetricGroupDef):
    """
    Metric Group for the graphs on the by query page.
    """
    name = "query_overview"
    xaxis = "ts"
    data_url = r"/server/(\d+)/metrics/database/([^\/]+)/query/(-?\d+)"
    rows = MetricDef(label="#Rows",
                     desc="Sum of the number of rows returned by the query"
                     " per second")
    calls = MetricDef(label="#Calls",
                      desc="Number of time the query has been executed"
                      " per second")
    shared_blks_read = MetricDef(label="Shared read",
                                 type="sizerate",
                                 desc="Amount of data found in OS cache or"
                                 " read from disk")
    shared_blks_hit = MetricDef(label="Shared hit",
                                type="sizerate",
                                desc="Amount of data found in shared buffers")
    shared_blks_dirtied = MetricDef(label="Shared dirtied",
                                    type="sizerate",
                                    desc="Amount of data modified in shared"
                                    " buffers")
    shared_blks_written = MetricDef(label="Shared written",
                                    type="sizerate",
                                    desc="Amount of shared buffers written to"
                                    " disk")
    local_blks_read = MetricDef(label="Local read",
                                type="sizerate",
                                desc="Amount of local buffers found from OS"
                                " cache or read from disk")
    local_blks_hit = MetricDef(label="Local hit",
                               type="sizerate",
                               desc="Amount of local buffers found in shared"
                               " buffers")
    local_blks_dirtied = MetricDef(label="Local dirtied",
                                   type="sizerate",
                                   desc="Amount of data modified in local"
                                   " buffers")
    local_blks_written = MetricDef(label="Local written",
                                   type="sizerate",
                                   desc="Amount of local buffers written to"
                                   " disk")
    temp_blks_read = MetricDef(label="Temp read",
                               type="sizerate",
                               desc="Amount of data read from temporary file")
    temp_blks_written = MetricDef(label="Temp written",
                                  type="sizerate",
                                  desc="Amount of data written to temporary"
                                  " file")
    blk_read_time = MetricDef(label="Read time",
                              type="duration",
                              desc="Time spent reading data")
    blk_write_time = MetricDef(label="Write time",
                               type="duration",
                               desc="Time spent writing data")
    avg_runtime = MetricDef(label="Avg runtime",
                            type="duration",
                            desc="Average query duration")
    hit_ratio = MetricDef(label="Shared buffers hit ratio",
                          type="percent",
                          desc="Percentage of data found in shared buffers")
    miss_ratio = MetricDef(label="Shared buffers miss ratio",
                           type="percent",
                           desc="Percentage of data found in OS cache or read"
                           " from disk")

    reads = MetricDef(label="Physical read",
                      type="sizerate",
                      desc="Amount of data read from disk")
    writes = MetricDef(label="Physical writes",
                       type="sizerate",
                       desc="Amount of data written to disk")
    user_time = MetricDef(label="CPU user time / Query time",
                          type="percent",
                          desc="CPU time spent executing the query")
    system_time = MetricDef(label="CPU system time / Query time",
                            type="percent",
                            desc="CPU time used by the OS")
    other_time = MetricDef(label="CPU other time / Query time",
                           type="percent",
                           desc="Time spent otherwise")
    sys_hit_ratio = MetricDef(label="System cache hit ratio",
                              type="percent",
                              desc="Percentage of data found in OS cache")
    disk_hit_ratio = MetricDef(label="Disk hit ratio",
                               type="percent",
                               desc="Percentage of data read from disk")
    minflts = MetricDef(label="Soft page faults",
                        type="number",
                        desc="Memory pages not found in the processor's MMU")
    majflts = MetricDef(label="Hard page faults",
                        type="number",
                        desc="Memory pages not found in memory and loaded"
                        " from storage")
    # not maintained on GNU/Linux, and not available on Windows
    # nswaps = MetricDef(label="Swaps", type="number")
    # msgsnds = MetricDef(label="IPC messages sent", type="number")
    # msgrcvs = MetricDef(label="IPC messages received", type="number")
    # nsignals = MetricDef(label="Signals received", type="number")
    nvcsws = MetricDef(label="Voluntary context switches",
                       type="number",
                       desc="Number of voluntary context switches")
    nivcsws = MetricDef(label="Involuntary context switches",
                        type="number",
                        desc="Number of involuntary context switches")

    @classmethod
    def _get_metrics(cls, handler, **params):
        base = cls.metrics.copy()
        if not handler.has_extension(params["server"], "pg_stat_kcache"):
            for key in (
                    "reads",
                    "writes",
                    "user_time",
                    "system_time",
                    "other_time",
                    "sys_hit_ratio",
                    "disk_hit_ratio",
                    "minflts",
                    "majflts",
                    # "nswaps", "msgsnds", "msgrcvs", "nsignals",
                    "nvcsws",
                    "nivcsws"):
                base.pop(key)
        else:
            base.pop("miss_ratio")

        return base

    @property
    def query(self):
        query = powa_getstatdata_sample("query", bindparam("server"))
        query = query.where((column("datname") == bindparam("database"))
                            & (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c
        total_blocks = ((sum(c.shared_blks_read) +
                         sum(c.shared_blks_hit)).label("total_blocks"))

        def get_ts():
            return extract("epoch", greatest(c.mesure_interval, '1 second'))

        def sumps(col):
            return (sum(col) / get_ts()).label(col.name)

        def bps(col):
            return (mulblock(sum(col)) / get_ts()).label(col.name)

        cols = [
            to_epoch(c.ts),
            sumps(c.rows),
            sumps(c.calls),
            case([(total_blocks == 0, 0)],
                 else_=cast(sum(c.shared_blks_hit), Numeric) * 100 /
                 total_blocks).label("hit_ratio"),
            bps(c.shared_blks_read),
            bps(c.shared_blks_hit),
            bps(c.shared_blks_dirtied),
            bps(c.shared_blks_written),
            bps(c.local_blks_read),
            bps(c.local_blks_hit),
            bps(c.local_blks_dirtied),
            bps(c.local_blks_written),
            bps(c.temp_blks_read),
            bps(c.temp_blks_written),
            sumps(c.blk_read_time),
            sumps(c.blk_write_time),
            (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime")
        ]

        from_clause = query
        if self.has_extension(self.path_args[0], "pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            # and detailed hit ratio.
            kcache_query = kcache_getstatdata_sample("query")
            kc = inner_cc(kcache_query)
            kcache_query = (kcache_query.where(
                (kc.srvid == bindparam("server"))
                & (kc.datname == bindparam("database"))
                & (kc.queryid == bindparam("query"))).alias())
            kc = kcache_query.c
            sys_hits = (greatest(
                mulblock(sum(c.shared_blks_read)) - sum(kc.reads),
                0).label("kcache_hitblocks"))
            sys_hitratio = (cast(sys_hits, Numeric) * 100 /
                            mulblock(total_blocks))
            disk_hit_ratio = (sum(kc.reads) * 100 / mulblock(total_blocks))
            total_time = greatest(sum(c.runtime), 1)
            # Rusage can return values > real time due to sampling bias
            # aligned to kernel ticks. As such, we have to clamp values to 100%
            total_time_percent = lambda x: least(100, (x * 100) / total_time)

            def per_sec(col):
                ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
                return (sum(col) / ts).label(col.name)

            cols.extend([
                per_sec(kc.reads),
                per_sec(kc.writes),
                per_sec(kc.minflts),
                per_sec(kc.majflts),
                # per_sec(kc.nswaps),
                # per_sec(kc.msgsnds),
                # per_sec(kc.msgrcvs),
                # per_sec(kc.nsignals),
                per_sec(kc.nvcsws),
                per_sec(kc.nivcsws),
                total_time_percent(sum(kc.user_time) *
                                   1000).label("user_time"),
                total_time_percent(sum(kc.system_time) *
                                   1000).label("system_time"),
                greatest(
                    total_time_percent(
                        sum(c.runtime) -
                        ((sum(kc.user_time) + sum(kc.system_time)) * 1000)),
                    0).label("other_time"),
                case([(total_blocks == 0, 0)],
                     else_=disk_hit_ratio).label("disk_hit_ratio"),
                case([(total_blocks == 0, 0)],
                     else_=sys_hitratio).label("sys_hit_ratio")
            ])
            from_clause = from_clause.join(
                kcache_query,
                and_(kcache_query.c.ts == c.ts,
                     kcache_query.c.queryid == c.queryid,
                     kcache_query.c.userid == c.userid,
                     kcache_query.c.dbid == c.dbid))
        else:
            cols.extend([
                case([(total_blocks == 0, 0)],
                     else_=cast(sum(c.shared_blks_read), Numeric) * 100 /
                     total_blocks).label("miss_ratio")
            ])

        return (select(cols).select_from(from_clause).where(
            c.calls != '0').group_by(c.ts, block_size.c.block_size,
                                     c.mesure_interval).order_by(
                                         c.ts).params(samples=100))
Ejemplo n.º 22
0
class PgExtensionsMetricGroup(MetricGroupDef):
    """
    Metric group for the pg_settings grid.
    """

    name = "pg_extensions"
    xaxis = "extname"
    data_url = r"/config/(\d+)/pg_extensions/"
    axis_type = "category"
    available = MetricDef(label="Available", type="bool")
    installed = MetricDef(label="Installed", type="bool")
    handled = MetricDef(label="Sampled", type="bool")
    extversion = MetricDef(label="Version", type="string")
    params = ["server"]

    @property
    def query(self):
        if (self.path_args[0] == '0'):
            return """
            SELECT DISTINCT s.extname,
              CASE WHEN avail.name IS NULL then false ELSE true END AS available,
              CASE WHEN ins.extname IS NULL then false ELSE true END AS installed,
              CASE WHEN f.module IS NULL then false ELSE true END AS handled,
              COALESCE(ins.extversion, '-') AS extversion
            FROM (
                 SELECT 'pg_stat_statements' AS extname
                 UNION SELECT 'pg_qualstats'
                 UNION SELECT 'pg_stat_kcache'
                 UNION SELECT 'pg_track_settings'
                 UNION SELECT 'hypopg'
                 UNION SELECT 'powa'
                 UNION SELECT 'pg_wait_sampling'
            ) s
            LEFT JOIN pg_available_extensions avail on s.extname = avail.name
            LEFT JOIN pg_extension ins on s.extname = ins.extname
            LEFT JOIN powa_functions f ON s.extname = f.module
                AND f.srvid = 0
            ORDER BY 1
             """
        else:
            return """
            SELECT DISTINCT s.extname,
              '-' AS extversion,
              CASE WHEN f.module IS NULL then false ELSE true END AS handled
            FROM (
                 SELECT 'pg_stat_statements' AS extname
                 UNION SELECT 'pg_qualstats'
                 UNION SELECT 'pg_stat_kcache'
                 UNION SELECT 'pg_track_settings'
                 UNION SELECT 'hypopg'
                 UNION SELECT 'powa'
                 UNION SELECT 'pg_wait_sampling'
            ) s
            LEFT JOIN powa_functions f ON s.extname = f.module
                AND f.srvid = %(server)s
            ORDER BY 1
             """

    def post_process(self, data, server, **kwargs):
        if (server == '0'):
            return data

        res = None
        try:
            res = self.execute("""
            SELECT DISTINCT s.extname,
              CASE WHEN avail.name IS NULL then false ELSE true END AS available,
              CASE WHEN ins.extname IS NULL then false ELSE true END AS installed,
              COALESCE(ins.extversion, '-') AS extversion
            FROM (
                 SELECT 'pg_stat_statements' AS extname
                 UNION SELECT 'pg_qualstats'
                 UNION SELECT 'pg_stat_kcache'
                 UNION SELECT 'pg_track_settings'
                 UNION SELECT 'hypopg'
                 UNION SELECT 'powa'
                 UNION SELECT 'pg_wait_sampling'
            ) s
            LEFT JOIN pg_available_extensions avail on s.extname = avail.name
            LEFT JOIN pg_extension ins on s.extname = ins.extname
            ORDER BY 1
                    """,
                               srvid=server)
        except Exception:
            # ignore any connection or remote execution error
            pass

        # if we couldn't connect to the remote server, send what we have
        if (res is None):
            data["messages"] = {
                'alert':
                ["Could not retrieve extensions" + " on remote server"]
            }
            return data

        remote_exts = res.fetchall()

        alerts = []
        for ext in data["data"]:
            for r in remote_exts:
                if (r["extname"] == ext["extname"]):
                    ext["available"] = r["available"]
                    ext["installed"] = r["installed"]
                    ext["extversion"] = r["extversion"]
                    break

            if (ext["handled"] and not ext["installed"]):
                alerts.append(ext["extname"])

        if (len(alerts) > 0):
            data["messages"] = {
                'alert': [("%d extensions need to be installed:%s" %
                           (len(alerts), ' '.join(alerts)))]
            }

        return data
Ejemplo n.º 23
0
class GlobalBgwriterMetricGroup(MetricGroupDef):
    """
    Metric group used by bgwriter graphs.
    """
    name = "bgwriter"
    xaxis = "ts"
    data_url = r"/server/(\d+)/metrics/bgwriter/"
    checkpoints_timed = MetricDef(label="# of scheduled checkpoints",
                                  type="number",
                                  desc="Number of scheduled checkpoints that"
                                  " have been performed")
    checkpoints_req = MetricDef(label="# of requested checkpoints",
                                type="number",
                                desc="Number of requested checkpoints that"
                                " have been performed")
    checkpoint_write_time = MetricDef(label="Write time",
                                      type="duration",
                                      desc="Total amount of time that has been"
                                      " spent in the portion of checkpoint"
                                      " processing where files are written to"
                                      " disk, in milliseconds")
    checkpoint_sync_time = MetricDef(label="Sync time",
                                     type="duration",
                                     desc="Total amount of time that has been"
                                     " spent in the portion of checkpoint"
                                     " processing where files are synchronized"
                                     " to disk, in milliseconds")
    buffers_checkpoint = MetricDef(label="Buffers checkpoint",
                                   type="sizerate",
                                   desc="Number of buffers written during"
                                   " checkpoints")
    buffers_clean = MetricDef(label="Buffers clean",
                              type="sizerate",
                              desc="Number of buffers written by the"
                              " background writer")
    maxwritten_clean = MetricDef(label="Maxwritten clean",
                                 type="number",
                                 desc="Number of times the background writer"
                                 " stopped a cleaning scan because it had"
                                 " written too many buffers")
    buffers_backend = MetricDef(label="Buffers backend",
                                type="sizerate",
                                desc="Number of buffers written directly by a"
                                " backend")
    buffers_backend_fsync = MetricDef(
        label="Buffers backend fsync",
        type="number",
        desc="Number of times a backend had to"
        " execute its own fsync call"
        " (normally the background writer handles"
        " those even when the backend does its"
        " own write")
    buffers_alloc = MetricDef(label="Buffers alloc",
                              type="sizerate",
                              desc="Number of buffers allocated")

    @property
    def query(self):
        bs = block_size.c.block_size
        query = powa_get_bgwriter_sample(bindparam("server"))
        query = query.alias()
        c = query.c

        def sum_per_sec(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (sum(col) / ts).label(col.name)

        from_clause = query

        cols = [
            c.srvid,
            extract("epoch", c.ts).label("ts"),
            sum(c.checkpoints_timed).label("checkpoints_timed"),
            sum(c.checkpoints_req).label("checkpoints_req"),
            sum_per_sec(c.checkpoint_write_time),
            sum_per_sec(c.checkpoint_sync_time),
            sum_per_sec(mulblock(c.buffers_checkpoint)),
            sum_per_sec(mulblock(c.buffers_clean)),
            sum_per_sec(c.maxwritten_clean),
            sum_per_sec(mulblock(c.buffers_backend)),
            sum_per_sec(c.buffers_backend_fsync),
            sum_per_sec(mulblock(c.buffers_alloc))
        ]

        return (select(cols).select_from(from_clause).group_by(
            c.srvid, c.ts, bs,
            c.mesure_interval).order_by(c.ts).params(samples=100))
Ejemplo n.º 24
0
class ByQueryMetricGroup(MetricGroupDef):
    """Metric group for indivual query stats (displayed on the grid)."""
    name = "all_queries"
    xaxis = "queryid"
    axis_type = "category"
    data_url = r"/server/(\d+)/metrics/database_all_queries/([^\/]+)/"
    calls = MetricDef(label="#", type="integer")
    plantime = MetricDef(label="Plantime", type="duration")
    runtime = MetricDef(label="Time", type="duration", direction="descending")
    avg_runtime = MetricDef(label="Avg time", type="duration")
    blks_read_time = MetricDef(label="Read", type="duration")
    blks_write_time = MetricDef(label="Write", type="duration")
    shared_blks_read = MetricDef(label="Read", type="size")
    shared_blks_hit = MetricDef(label="Hit", type="size")
    shared_blks_dirtied = MetricDef(label="Dirtied", type="size")
    shared_blks_written = MetricDef(label="Written", type="size")
    temp_blks_read = MetricDef(label="Read", type="size")
    temp_blks_written = MetricDef(label="Written", type="size")
    wal_records = MetricDef(label="#Wal records", type="integer")
    wal_fpi = MetricDef(label="#Wal FPI", type="integer")
    wal_bytes = MetricDef(label="Wal bytes", type="size")

    @classmethod
    def _get_metrics(cls, handler, **params):
        base = cls.metrics.copy()

        if not handler.has_extension_version(handler.path_args[0],
                                             'pg_stat_statements', '1.8'):
            for key in ("plantime", "wal_records", "wal_fpi", "wal_bytes"):
                base.pop(key)
        return base

    # TODO: refactor with GlobalDatabasesMetricGroup
    @property
    def query(self):
        # Working from the statdata detailed_db base query
        inner_query = powa_getstatdata_detailed_db(bindparam("server"))
        inner_query = inner_query.alias()
        c = inner_query.c
        ps = powa_statements
        # Multiply each measure by the size of one block.
        columns = [
            c.srvid, c.queryid, ps.c.query,
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            sum(mulblock(c.shared_blks_read)).label("shared_blks_read"),
            sum(mulblock(c.shared_blks_hit)).label("shared_blks_hit"),
            sum(mulblock(c.shared_blks_dirtied)).label("shared_blks_dirtied"),
            sum(mulblock(c.shared_blks_written)).label("shared_blks_written"),
            sum(mulblock(c.temp_blks_read)).label("temp_blks_read"),
            sum(mulblock(c.temp_blks_written)).label("temp_blks_written"),
            (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
            sum(c.blk_read_time).label("blks_read_time"),
            sum(c.blk_write_time).label("blks_write_time")
        ]

        if self.has_extension_version(self.path_args[0], 'pg_stat_statements',
                                      '1.8'):
            columns.extend([
                sum(c.plantime).label("plantime"),
                sum(c.wal_records).label("wal_records"),
                sum(c.wal_fpi).label("wal_fpi"),
                sum(c.wal_bytes).label("wal_bytes")
            ])

        from_clause = inner_query.join(
            ps, (ps.c.srvid == c.srvid) & (ps.c.queryid == c.queryid) &
            (ps.c.userid == c.userid) & (ps.c.dbid == c.dbid))
        return (select(columns).select_from(from_clause).where(
            c.datname == bindparam("database")).group_by(
                c.srvid, c.queryid,
                ps.c.query).order_by(sum(c.runtime).desc()))

    def process(self, val, database=None, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("QueryOverview", val["srvid"], database,
                                      val["queryid"])
        return val
Ejemplo n.º 25
0
class DatabaseAllRelMetricGroup(MetricGroupDef):
    """
    Metric group used by "Database objects" graphs.
    """
    name = "all_relations"
    xaxis = "ts"
    data_url = r"/server/(\d+)/metrics/database_all_relations/([^\/]+)/"
    idx_ratio = MetricDef(label="Index scans ratio",
                          type="percent",
                          desc="Ratio of index scan / seq scan")
    idx_scan = MetricDef(label="Index scans",
                         type="number",
                         desc="Number of index scan per second")
    seq_scan = MetricDef(label="Sequential scans",
                         type="number",
                         desc="Number of sequential scan per second")
    n_tup_ins = MetricDef(label="Tuples inserted",
                          type="number",
                          desc="Number of tuples inserted per second")
    n_tup_upd = MetricDef(label="Tuples updated",
                          type="number",
                          desc="Number of tuples updated per second")
    n_tup_hot_upd = MetricDef(label="Tuples HOT updated",
                              type="number",
                              desc="Number of tuples HOT updated per second")
    n_tup_del = MetricDef(label="Tuples deleted",
                          type="number",
                          desc="Number of tuples deleted per second")
    vacuum_count = MetricDef(label="# Vacuum",
                             type="number",
                             desc="Number of vacuum per second")
    autovacuum_count = MetricDef(label="# Autovacuum",
                                 type="number",
                                 desc="Number of autovacuum per second")
    analyze_count = MetricDef(label="# Analyze",
                              type="number",
                              desc="Number of analyze per second")
    autoanalyze_count = MetricDef(label="# Autoanalyze",
                                  type="number",
                                  desc="Number of autoanalyze per second")

    @property
    def query(self):
        query = powa_get_all_tbl_sample(bindparam("server"))
        query = query.alias()
        c = query.c

        def sum_per_sec(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (sum(col) / ts).label(col.name)

        from_clause = query

        cols = [
            c.srvid,
            extract("epoch", c.ts).label("ts"),
            case([(sum(c.idx_scan + c.seq_scan) == 0, 0)],
                 else_=cast(sum(c.idx_scan), Numeric) * 100 /
                 sum(c.idx_scan + c.seq_scan)).label("idx_ratio"),
            sum_per_sec(c.idx_scan),
            sum_per_sec(c.seq_scan),
            sum_per_sec(c.n_tup_ins),
            sum_per_sec(c.n_tup_upd),
            sum_per_sec(c.n_tup_hot_upd),
            sum_per_sec(c.n_tup_del),
            sum_per_sec(c.vacuum_count),
            sum_per_sec(c.autovacuum_count),
            sum_per_sec(c.analyze_count),
            sum_per_sec(c.autoanalyze_count)
        ]

        return (select(cols).select_from(from_clause).where(
            c.datname == bindparam("database")).group_by(
                c.srvid, c.ts,
                c.mesure_interval).order_by(c.ts).params(samples=100))
Ejemplo n.º 26
0
class ByQueryMetricGroup(MetricGroupDef):
    """Metric group for indivual query stats (displayed on the grid)."""
    name = "all_queries"
    xaxis = "queryid"
    axis_type = "category"
    data_url = r"/server/(\d+)/metrics/database_all_queries/([^\/]+)/"
    calls = MetricDef(label="#", type="number")
    runtime = MetricDef(label="Time", type="duration", direction="descending")
    avg_runtime = MetricDef(label="Avg time", type="duration")
    blks_read_time = MetricDef(label="Read", type="duration")
    blks_write_time = MetricDef(label="Write", type="duration")
    shared_blks_read = MetricDef(label="Read", type="size")
    shared_blks_hit = MetricDef(label="Hit", type="size")
    shared_blks_dirtied = MetricDef(label="Dirtied", type="size")
    shared_blks_written = MetricDef(label="Written", type="size")
    temp_blks_read = MetricDef(label="Read", type="size")
    temp_blks_written = MetricDef(label="Written", type="size")

    # TODO: refactor with GlobalDatabasesMetricGroup
    @property
    def query(self):
        # Working from the statdata detailed_db base query
        inner_query = powa_getstatdata_detailed_db(bindparam("server"))
        inner_query = inner_query.alias()
        c = inner_query.c
        ps = powa_statements
        # Multiply each measure by the size of one block.
        columns = [
            c.srvid, c.queryid, ps.c.query,
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            sum(mulblock(c.shared_blks_read)).label("shared_blks_read"),
            sum(mulblock(c.shared_blks_hit)).label("shared_blks_hit"),
            sum(mulblock(c.shared_blks_dirtied)).label("shared_blks_dirtied"),
            sum(mulblock(c.shared_blks_written)).label("shared_blks_written"),
            sum(mulblock(c.temp_blks_read)).label("temp_blks_read"),
            sum(mulblock(c.temp_blks_written)).label("temp_blks_written"),
            (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
            sum(c.blk_read_time).label("blks_read_time"),
            sum(c.blk_write_time).label("blks_write_time")
        ]
        from_clause = inner_query.join(ps, (ps.c.queryid == c.queryid) &
                                       (ps.c.userid == c.userid) &
                                       (ps.c.dbid == c.dbid))
        return (select(columns).select_from(from_clause).where(
            c.datname == bindparam("database")).group_by(
                c.srvid, c.queryid,
                ps.c.query).order_by(sum(c.runtime).desc()))

    def process(self, val, database=None, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("QueryOverview", val["srvid"], database,
                                      val["queryid"])
        return val
Ejemplo n.º 27
0
class DatabaseOverviewMetricGroup(MetricGroupDef):
    """Metric group for the database global graphs."""
    name = "database_overview"
    xaxis = "ts"
    data_url = r"/server/(\d+)/metrics/database_overview/([^\/]+)/"
    avg_runtime = MetricDef(label="Avg runtime",
                            type="duration",
                            desc="Average query duration")
    calls = MetricDef(label="Queries per sec",
                      type="number",
                      desc="Number of time the query has been executed, "
                      "per second")
    load = MetricDef(label="Runtime per sec",
                     type="duration",
                     desc="Total duration of queries executed, per second")
    total_blks_hit = MetricDef(label="Total shared buffers hit",
                               type="sizerate",
                               desc="Amount of data found in shared buffers")
    total_blks_read = MetricDef(label="Total shared buffers miss",
                                type="sizerate",
                                desc="Amount of data found in OS cache or"
                                " read from disk")

    total_sys_hit = MetricDef(label="Total system cache hit",
                              type="sizerate",
                              desc="Amount of data found in OS cache")
    total_disk_read = MetricDef(label="Total disk read",
                                type="sizerate",
                                desc="Amount of data read from disk")
    minflts = MetricDef(label="Soft page faults",
                        type="number",
                        desc="Memory pages not found in the processor's MMU")
    majflts = MetricDef(label="Hard page faults",
                        type="number",
                        desc="Memory pages not found in memory and loaded"
                        " from storage")
    # not maintained on GNU/Linux, and not available on Windows
    # nswaps = MetricDef(label="Swaps", type="number")
    # msgsnds = MetricDef(label="IPC messages sent", type="number")
    # msgrcvs = MetricDef(label="IPC messages received", type="number")
    # nsignals = MetricDef(label="Signals received", type="number")
    nvcsws = MetricDef(label="Voluntary context switches",
                       type="number",
                       desc="Number of voluntary context switches")
    nivcsws = MetricDef(label="Involuntary context switches",
                        type="number",
                        desc="Number of involuntary context switches")

    @classmethod
    def _get_metrics(cls, handler, **params):
        base = cls.metrics.copy()
        if not handler.has_extension(params["server"], "pg_stat_kcache"):
            for key in (
                    "total_sys_hit",
                    "total_disk_read",
                    "minflts",
                    "majflts",
                    # "nswaps", "msgsnds", "msgrcvs", "nsignals",
                    "nvcsws",
                    "nivcsws"):
                base.pop(key)
        else:
            base.pop("total_blks_read")

        return base

    @property
    def query(self):
        # Fetch the base query for sample, and filter them on the database
        bs = block_size.c.block_size
        subquery = powa_getstatdata_sample("db", bindparam("server"))
        # Put the where clause inside the subquery
        subquery = subquery.where(column("datname") == bindparam("database"))
        query = subquery.alias()
        c = query.c

        cols = [
            c.srvid,
            to_epoch(c.ts),
            (sum(c.calls) /
             greatest(extract("epoch", c.mesure_interval), 1)).label("calls"),
            (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"),
            (sum(c.runtime) /
             greatest(extract("epoch", c.mesure_interval), 1)).label("load"),
            total_read(c),
            total_hit(c)
        ]

        from_clause = query
        if self.has_extension(self.path_args[0], "pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            kcache_query = kcache_getstatdata_sample("db")
            kc = inner_cc(kcache_query)
            kcache_query = (kcache_query.where(
                (kc.srvid == bindparam("server"))
                & (kc.datname == bindparam("database"))).alias())
            kc = kcache_query.c

            def sum_per_sec(col):
                ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
                return (sum(col) / ts).label(col.name)

            total_sys_hit = (total_read(c) - sum(kc.reads) /
                             greatest(extract("epoch", c.mesure_interval), 1.)
                             ).label("total_sys_hit")
            total_disk_read = (sum(kc.reads) /
                               greatest(extract("epoch", c.mesure_interval),
                                        1.)).label("total_disk_read")
            minflts = sum_per_sec(kc.minflts)
            majflts = sum_per_sec(kc.majflts)
            # nswaps = sum_per_sec(kc.nswaps)
            # msgsnds = sum_per_sec(kc.msgsnds)
            # msgrcvs = sum_per_sec(kc.msgrcvs)
            # nsignals = sum_per_sec(kc.nsignals)
            nvcsws = sum_per_sec(kc.nvcsws)
            nivcsws = sum_per_sec(kc.nivcsws)

            cols.extend([
                total_sys_hit,
                total_disk_read,
                minflts,
                majflts,
                # nswaps, msgsnds, msgrcvs, nsignals,
                nvcsws,
                nivcsws
            ])
            from_clause = from_clause.join(kcache_query,
                                           kcache_query.c.ts == c.ts)

        return (select(cols).select_from(from_clause).where(
            c.calls is not None).group_by(c.srvid, c.ts, bs,
                                          c.mesure_interval).order_by(
                                              c.ts).params(samples=100))
Ejemplo n.º 28
0
class GlobalDatabasesMetricGroup(MetricGroupDef):
    """
    Metric group used by summarized graphs.
    """
    name = "all_databases"
    xaxis = "ts"
    data_url = r"/server/(\d+)/metrics/databases_globals/"
    avg_runtime = MetricDef(label="Avg runtime", type="duration",
                            desc="Average query duration")
    calls = MetricDef(label="Queries per sec", type="number",
                      desc="Number of time the query has been executed")
    planload = MetricDef(label="Plantime per sec", type="duration",
                         desc="Total planning duration")
    load = MetricDef(label="Runtime per sec", type="duration",
                     desc="Total duration of queries executed")
    total_blks_hit = MetricDef(label="Total hit", type="sizerate",
                               desc="Amount of data found in shared buffers")
    total_blks_read = MetricDef(label="Total read", type="sizerate",
                                desc="Amount of data found in OS cache or"
                                     " read from disk")
    wal_records = MetricDef(label="#Wal records", type="integer",
                            desc="Number of WAL records generated")
    wal_fpi = MetricDef(label="#Wal FPI", type="integer",
                        desc="Number of WAL full-page images generated")
    wal_bytes = MetricDef(label="Wal bytes", type="size",
                          desc="Amount of WAL bytes generated")

    total_sys_hit = MetricDef(label="Total system cache hit", type="sizerate",
                              desc="Amount of data found in OS cache")
    total_disk_read = MetricDef(label="Total disk read", type="sizerate",
                                desc="Amount of data read from disk")
    minflts = MetricDef(label="Soft page faults", type="number",
                        desc="Memory pages not found in the processor's MMU")
    majflts = MetricDef(label="Hard page faults", type="number",
                        desc="Memory pages not found in memory and loaded"
                             " from storage")
    # not maintained on GNU/Linux, and not available on Windows
    # nswaps = MetricDef(label="Swaps", type="number")
    # msgsnds = MetricDef(label="IPC messages sent", type="number")
    # msgrcvs = MetricDef(label="IPC messages received", type="number")
    # nsignals = MetricDef(label="Signals received", type="number")
    nvcsws = MetricDef(label="Voluntary context switches", type="number",
                       desc="Number of voluntary context switches")
    nivcsws = MetricDef(label="Involuntary context switches", type="number",
                        desc="Number of involuntary context switches")

    @classmethod
    def _get_metrics(cls, handler, **params):
        base = cls.metrics.copy()
        if not handler.has_extension(params["server"], "pg_stat_kcache"):
            for key in ("total_sys_hit", "total_disk_read", "minflts",
                        "majflts",
                        # "nswaps", "msgsnds", "msgrcvs", "nsignals",
                        "nvcsws", "nivcsws"):
                base.pop(key)
        else:
            base.pop("total_blks_read")

        if not handler.has_extension_version(params["server"],
                                             'pg_stat_statements', '1.8'):
            for key in ("planload", "wal_records", "wal_fpi", "wal_bytes"):
                base.pop(key)
        return base

    @property
    def query(self):
        bs = block_size.c.block_size
        query = powa_getstatdata_sample("db", bindparam("server"))
        query = query.alias()
        c = query.c

        cols = [c.srvid,
                extract("epoch", c.ts).label("ts"),
                (sum(c.calls) / greatest(extract("epoch", c.mesure_interval),
                                         1)).label("calls"),
                (sum(c.runtime) / greatest(sum(c.calls),
                                           1)).label("avg_runtime"),
                (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval),
                                           1)).label("load"),
                total_read(c),
                total_hit(c)
                ]

        if self.has_extension_version(self.path_args[0],
                                      'pg_stat_statements', '1.8'):
            cols.extend([
                (sum(c.plantime) / greatest(extract("epoch", c.mesure_interval),
                                            1)).label("planload"),
                (sum(c.wal_records) / greatest(extract("epoch",
                                                       c.mesure_interval),
                                               1)).label("wal_records"),
                (sum(c.wal_fpi) / greatest(extract("epoch",
                                                   c.mesure_interval),
                                           1)).label("wal_fpi"),
                (sum(c.wal_bytes) / greatest(extract("epoch",
                                                     c.mesure_interval),
                                             1)).label("wal_bytes")
                ])

        from_clause = query
        if self.has_extension(self.path_args[0], "pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            kcache_query = kcache_getstatdata_sample("db")
            kc = inner_cc(kcache_query)
            kcache_query = (
                kcache_query
                .where(
                    (kc.srvid == bindparam("server"))
                    )
                .alias())
            kc = kcache_query.c

            def sum_per_sec(col):
                ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
                return (sum(col) / ts).label(col.name)

            total_sys_hit = (total_read(c) - sum(kc.reads) /
                             greatest(extract("epoch", c.mesure_interval), 1.)
                             ).label("total_sys_hit")
            total_disk_read = (sum(kc.reads) /
                               greatest(extract("epoch", c.mesure_interval), 1.)
                               ).label("total_disk_read")
            minflts = sum_per_sec(kc.minflts)
            majflts = sum_per_sec(kc.majflts)
            # nswaps = sum_per_sec(kc.nswaps)
            # msgsnds = sum_per_sec(kc.msgsnds)
            # msgrcvs = sum_per_sec(kc.msgrcvs)
            # nsignals = sum_per_sec(kc.nsignals)
            nvcsws = sum_per_sec(kc.nvcsws)
            nivcsws = sum_per_sec(kc.nivcsws)

            cols.extend([total_sys_hit, total_disk_read, minflts, majflts,
                         # nswaps, msgsnds, msgrcvs, nsignals,
                         nvcsws, nivcsws])
            from_clause = from_clause.join(
                kcache_query,
                and_(kcache_query.c.dbid == c.dbid,
                     kcache_query.c.ts == c.ts))

        return (select(cols)
                .select_from(from_clause)
                .where(c.calls != '0')
                .group_by(c.srvid, c.ts, bs, c.mesure_interval)
                .order_by(c.ts)
                .params(samples=100))
Ejemplo n.º 29
0
class ByDatabaseMetricGroup(MetricGroupDef):
    """
    Metric group used by the "by database" grid
    """
    name = "by_database"
    xaxis = "datname"
    data_url = r"/server/(\d+)/metrics/by_databases/"
    axis_type = "category"
    calls = MetricDef(label="#Calls", type="integer", direction="descending")
    plantime = MetricDef(label="Plantime", type="duration")
    runtime = MetricDef(label="Runtime", type="duration")
    avg_runtime = MetricDef(label="Avg runtime", type="duration")
    shared_blks_read = MetricDef(label="Blocks read", type="size")
    shared_blks_hit = MetricDef(label="Blocks hit", type="size")
    shared_blks_dirtied = MetricDef(label="Blocks dirtied", type="size")
    shared_blks_written = MetricDef(label="Blocks written", type="size")
    temp_blks_written = MetricDef(label="Temp Blocks written", type="size")
    io_time = MetricDef(label="I/O time", type="duration")
    wal_records = MetricDef(label="#Wal records", type="integer")
    wal_fpi = MetricDef(label="#Wal FPI", type="integer")
    wal_bytes = MetricDef(label="Wal bytes", type="size")
    params = ["server"]

    @classmethod
    def _get_metrics(cls, handler, **params):
        base = cls.metrics.copy()

        if not handler.has_extension_version(handler.path_args[0],
                                             'pg_stat_statements', '1.8'):
            for key in ("plantime", "wal_records", "wal_fpi", "wal_bytes"):
                base.pop(key)
        return base

    @property
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db(bindparam("server")).alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            and_(c.dbid == powa_databases.c.oid,
                 c.srvid == powa_databases.c.srvid))

        cols = [powa_databases.c.srvid,
                powa_databases.c.datname,
                sum(c.calls).label("calls"),
                sum(c.runtime).label("runtime"),
                round(cast(sum(c.runtime), Numeric) /
                      greatest(sum(c.calls), 1), 2).label("avg_runtime"),
                mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
                mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
                mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
                mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
                mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
                round(cast(sum(c.blk_read_time + c.blk_write_time),
                           Numeric), 2).label("io_time")
                ]

        if self.has_extension_version(self.path_args[0], 'pg_stat_statements',
                                      '1.8'):
            cols.extend([
                sum(c.plantime).label("plantime"),
                sum(c.wal_records).label("wal_records"),
                sum(c.wal_fpi).label("wal_fpi"),
                sum(c.wal_bytes).label("wal_bytes")
                ])

        return (select(cols)
                .select_from(from_clause)
                .order_by(sum(c.calls).desc())
                .group_by(powa_databases.c.srvid,
                          powa_databases.c.datname, bs))

    def process(self, val, **kwargs):
        val = dict(val)
        val["url"] = self.reverse_url("DatabaseOverview", val["srvid"],
                                      val["datname"])
        return val
Ejemplo n.º 30
0
class WaitsQueryOverviewMetricGroup(MetricGroupDef):
    """
    Metric Group for the wait event graph on the by query page.
    """
    name = "waits_query_overview"
    xaxis = "ts"
    data_url = r"/metrics/database/([^\/]+)/query/(-?\d+)/wait_events_sampled"
    # pg 9.6 only metrics
    count_lwlocknamed = MetricDef(label="Lightweight Named")
    count_lwlocktranche = MetricDef(label="Lightweight Tranche")
    # pg 10+ metrics
    count_lwlock = MetricDef(label="Lightweight Lock")
    count_lock = MetricDef(label="Lock")
    count_bufferpin = MetricDef(label="Buffer pin")
    count_activity = MetricDef(label="Activity")
    count_client = MetricDef(label="Client")
    count_extension = MetricDef(label="Extension")
    count_ipc = MetricDef(label="IPC")
    count_timeout = MetricDef(label="Timeout")
    count_io = MetricDef(label="IO")

    def prepare(self):
        if not self.has_extension("pg_wait_sampling"):
            raise HTTPError(501, "pg_wait_sampling is not installed")

    @property
    def query(self):

        query = powa_getwaitdata_sample("query")
        query = query.where((column("datname") == bindparam("database"))
                            & (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c

        def wps(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (col / ts).label(col.name)

        cols = [to_epoch(c.ts)]

        pg_version_num = self.get_pg_version_num()
        if pg_version_num < 100000:
            cols += [
                wps(c.count_lwlocknamed),
                wps(c.count_lwlocktranche),
                wps(c.count_lock),
                wps(c.count_bufferpin)
            ]
        else:
            cols += [
                wps(c.count_lwlock),
                wps(c.count_lock),
                wps(c.count_bufferpin),
                wps(c.count_activity),
                wps(c.count_client),
                wps(c.count_extension),
                wps(c.count_ipc),
                wps(c.count_timeout),
                wps(c.count_io)
            ]

        from_clause = query

        return (select(cols).select_from(from_clause)
                #.where(c.count != None)
                .order_by(c.ts).params(samples=100))