コード例 #1
0
ファイル: query.py プロジェクト: leorenc/powa-web
    def get(self, database, query):
        bs = block_size.c.block_size
        stmt = powa_getstatdata_detailed_db()
        stmt = stmt.where(
            (column("datname") == bindparam("database")) &
            (column("queryid") == bindparam("query")))
        stmt = stmt.alias()
        from_clause = outerjoin(powa_statements, stmt,
                           and_(powa_statements.c.queryid == stmt.c.queryid, powa_statements.c.dbid == stmt.c.dbid))
        c = stmt.c
        rblk = mulblock(sum(c.shared_blks_read).label("shared_blks_read"))
        wblk = mulblock(sum(c.shared_blks_hit).label("shared_blks_hit"))
        stmt = (select([
            column("query"),
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            rblk,
            wblk,
            (rblk + wblk).label("total_blks")])
            .select_from(from_clause)
            .where(powa_statements.c.queryid == bindparam("query"))
            .group_by(column("query"), bs))

        value = self.execute(stmt, params={
            "query": query,
            "database": database,
            "from": self.get_argument("from"),
            "to": self.get_argument("to")
        })
        if value.rowcount < 1:
            self.render("xhr.html", content="No data")
            return
        self.render("database/query/detail.html", stats=value.first())
コード例 #2
0
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db(bindparam("server")).alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            and_(c.dbid == powa_databases.c.oid,
                 c.srvid == powa_databases.c.srvid))

        return (select([
            powa_databases.c.srvid, powa_databases.c.datname,
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            round(
                cast(sum(c.runtime), Numeric) / greatest(sum(c.calls), 1),
                2).label("avg_runtime"),
            mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
            mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
            mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
            mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
            mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
            round(cast(sum(c.blk_read_time + c.blk_write_time), Numeric),
                  2).label("io_time")
        ]).select_from(from_clause).order_by(sum(c.calls).desc()).group_by(
            powa_databases.c.srvid, powa_databases.c.datname, bs))
コード例 #3
0
    def query(self):
        bs = block_size.c.block_size
        query = powa_get_bgwriter_sample(bindparam("server"))
        query = query.alias()
        c = query.c

        def sum_per_sec(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (sum(col) / ts).label(col.name)

        from_clause = query

        cols = [
            c.srvid,
            extract("epoch", c.ts).label("ts"),
            sum(c.checkpoints_timed).label("checkpoints_timed"),
            sum(c.checkpoints_req).label("checkpoints_req"),
            sum_per_sec(c.checkpoint_write_time),
            sum_per_sec(c.checkpoint_sync_time),
            sum_per_sec(mulblock(c.buffers_checkpoint)),
            sum_per_sec(mulblock(c.buffers_clean)),
            sum_per_sec(c.maxwritten_clean),
            sum_per_sec(mulblock(c.buffers_backend)),
            sum_per_sec(c.buffers_backend_fsync),
            sum_per_sec(mulblock(c.buffers_alloc))
        ]

        return (select(cols).select_from(from_clause).group_by(
            c.srvid, c.ts, bs,
            c.mesure_interval).order_by(c.ts).params(samples=100))
コード例 #4
0
ファイル: database.py プロジェクト: balothia/powa-web
 def query(self):
     # Working from the statdata detailed_db base query
     inner_query = powa_getstatdata_detailed_db(bindparam("server"))
     inner_query = inner_query.alias()
     c = inner_query.c
     ps = powa_statements
     # Multiply each measure by the size of one block.
     columns = [
         c.srvid, c.queryid, ps.c.query,
         sum(c.calls).label("calls"),
         sum(c.runtime).label("runtime"),
         sum(mulblock(c.shared_blks_read)).label("shared_blks_read"),
         sum(mulblock(c.shared_blks_hit)).label("shared_blks_hit"),
         sum(mulblock(c.shared_blks_dirtied)).label("shared_blks_dirtied"),
         sum(mulblock(c.shared_blks_written)).label("shared_blks_written"),
         sum(mulblock(c.temp_blks_read)).label("temp_blks_read"),
         sum(mulblock(c.temp_blks_written)).label("temp_blks_written"),
         (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
         sum(c.blk_read_time).label("blks_read_time"),
         sum(c.blk_write_time).label("blks_write_time")
     ]
     from_clause = inner_query.join(ps, (ps.c.queryid == c.queryid) &
                                    (ps.c.userid == c.userid) &
                                    (ps.c.dbid == c.dbid))
     return (select(columns).select_from(from_clause).where(
         c.datname == bindparam("database")).group_by(
             c.srvid, c.queryid,
             ps.c.query).order_by(sum(c.runtime).desc()))
コード例 #5
0
ファイル: database.py プロジェクト: champeric/powa-web
 def query(self):
     # Working from the statdata detailed_db base query
     inner_query = powa_getstatdata_detailed_db()
     inner_query = inner_query.alias()
     c = inner_query.c
     ps = powa_statements
     # Multiply each measure by the size of one block.
     columns = [c.queryid,
                ps.c.query,
                sum(c.calls).label("calls"),
                sum(c.runtime).label("runtime"),
                sum(mulblock(c.shared_blks_read)).label("shared_blks_read"),
                sum(mulblock(c.shared_blks_hit)).label("shared_blks_hit"),
                sum(mulblock(c.shared_blks_dirtied)).label("shared_blks_dirtied"),
                sum(mulblock(c.shared_blks_written)).label("shared_blks_written"),
                sum(mulblock(c.temp_blks_read)).label("temp_blks_read"),
                sum(mulblock(c.temp_blks_written)).label("temp_blks_written"),
                (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
                sum(c.blk_read_time).label("blks_read_time"),
                sum(c.blk_write_time).label("blks_write_time")]
     from_clause = inner_query.join(ps,
                                    (ps.c.queryid == c.queryid) &
                                    (ps.c.userid == c.userid) &
                                    (ps.c.dbid == c.dbid))
     return (select(columns)
             .select_from(from_clause)
             .where(c.datname == bindparam("database"))
             .group_by(c.queryid, ps.c.query)
             .order_by(sum(c.runtime).desc()))
コード例 #6
0
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db(bindparam("server")).alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            and_(c.dbid == powa_databases.c.oid,
                 c.srvid == powa_databases.c.srvid))

        cols = [powa_databases.c.srvid,
                powa_databases.c.datname,
                sum(c.calls).label("calls"),
                sum(c.runtime).label("runtime"),
                round(cast(sum(c.runtime), Numeric) /
                      greatest(sum(c.calls), 1), 2).label("avg_runtime"),
                mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
                mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
                mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
                mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
                mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
                round(cast(sum(c.blk_read_time + c.blk_write_time),
                           Numeric), 2).label("io_time")
                ]

        if self.has_extension_version(self.path_args[0], 'pg_stat_statements',
                                      '1.8'):
            cols.extend([
                sum(c.plantime).label("plantime"),
                sum(c.wal_records).label("wal_records"),
                sum(c.wal_fpi).label("wal_fpi"),
                sum(c.wal_bytes).label("wal_bytes")
                ])

        return (select(cols)
                .select_from(from_clause)
                .order_by(sum(c.calls).desc())
                .group_by(powa_databases.c.srvid,
                          powa_databases.c.datname, bs))
コード例 #7
0
ファイル: overview.py プロジェクト: girgen/powa-web
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db().alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            c.dbid == powa_databases.c.oid)

        return (select([
            powa_databases.c.datname,
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            round(cast(sum(c.runtime), Numeric) / greatest(sum(c.calls), 1), 2).label("avg_runtime"),
            mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
            mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
            mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
            mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
            mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
            round(cast(sum(c.blk_read_time + c.blk_write_time), Numeric), 2).label("io_time")
        ])
            .select_from(from_clause)
            .order_by(sum(c.calls).desc())
            .group_by(powa_databases.c.datname, bs))
コード例 #8
0
ファイル: query.py プロジェクト: leorenc/powa-web
 def bps(col):
     ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
     return (mulblock(col) / ts).label(col.name)
コード例 #9
0
ファイル: query.py プロジェクト: leorenc/powa-web
    def query(self):
        query = powa_getstatdata_sample("query")
        query = query.where(
            (column("datname") == bindparam("database")) &
            (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c
        total_blocks = ((c.shared_blks_read + c.shared_blks_hit)
                        .label("total_blocks"))

        def bps(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (mulblock(col) / ts).label(col.name)
        cols = [to_epoch(c.ts),
                c.rows,
                c.calls,
                case([(total_blocks == 0, 0)],
                     else_=cast(c.shared_blks_hit, Numeric) * 100 /
                     total_blocks).label("hit_ratio"),
                bps(c.shared_blks_read),
                bps(c.shared_blks_hit),
                bps(c.shared_blks_dirtied),
                bps(c.shared_blks_written),
                bps(c.local_blks_read),
                bps(c.local_blks_hit),
                bps(c.local_blks_dirtied),
                bps(c.local_blks_written),
                bps(c.temp_blks_read),
                bps(c.temp_blks_written),
                c.blk_read_time,
                c.blk_write_time,
                (c.runtime / greatest(c.calls, 1)).label("avg_runtime")]

        from_clause = query
        if self.has_extension("pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            # and detailed hit ratio.
            kcache_query = kcache_getstatdata_sample()
            kc = inner_cc(kcache_query)
            kcache_query = (
                kcache_query
                .where(kc.queryid == bindparam("query"))
                .alias())
            kc = kcache_query.c
            sys_hits = (greatest(mulblock(c.shared_blks_read) -
                                 kc.reads, 0)
                        .label("kcache_hitblocks"))
            sys_hitratio = (cast(sys_hits, Numeric) * 100 /
                            mulblock(total_blocks))
            disk_hit_ratio = (kc.reads /
                              mulblock(total_blocks))
            total_time = greatest(c.runtime, 1);
            # Rusage can return values > real time due to sampling bias
            # aligned to kernel ticks. As such, we have to clamp values to 100%
            total_time_percent = lambda x: least(100, (x * 100) /
                                                 total_time)
            cols.extend([
                kc.reads,
                kc.writes,
                total_time_percent(kc.user_time * 1000).label("user_time"),
                total_time_percent(kc.system_time * 1000).label("system_time"),
                greatest(total_time_percent(
                    c.runtime - (kc.user_time + kc.system_time) *
                    1000), 0).label("other_time"),
                case([(total_blocks == 0, 0)],
                     else_=disk_hit_ratio).label("disk_hit_ratio"),
                case([(total_blocks == 0, 0)],
                     else_=sys_hitratio).label("sys_hit_ratio")])
            from_clause = from_clause.join(
                kcache_query,
                kcache_query.c.ts == c.ts)
        else:
            cols.extend([
                case([(total_blocks == 0, 0)],
                     else_=cast(c.shared_blks_read, Numeric) * 100 /
                     total_blocks).label("miss_ratio")
            ])

        return (select(cols)
                .select_from(from_clause)
                .where(c.calls != None)
                .order_by(c.ts)
                .params(samples=100))
コード例 #10
0
ファイル: query.py プロジェクト: frost242/powa-web
 def bps(col):
     return (mulblock(sum(col)) / get_ts()).label(col.name)
コード例 #11
0
ファイル: query.py プロジェクト: frost242/powa-web
    def query(self):
        query = powa_getstatdata_sample("query", bindparam("server"))
        query = query.where((column("datname") == bindparam("database"))
                            & (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c
        total_blocks = ((sum(c.shared_blks_read) +
                         sum(c.shared_blks_hit)).label("total_blocks"))

        def get_ts():
            return extract("epoch", greatest(c.mesure_interval, '1 second'))

        def sumps(col):
            return (sum(col) / get_ts()).label(col.name)

        def bps(col):
            return (mulblock(sum(col)) / get_ts()).label(col.name)

        cols = [
            to_epoch(c.ts),
            sumps(c.rows),
            sumps(c.calls),
            case([(total_blocks == 0, 0)],
                 else_=cast(sum(c.shared_blks_hit), Numeric) * 100 /
                 total_blocks).label("hit_ratio"),
            bps(c.shared_blks_read),
            bps(c.shared_blks_hit),
            bps(c.shared_blks_dirtied),
            bps(c.shared_blks_written),
            bps(c.local_blks_read),
            bps(c.local_blks_hit),
            bps(c.local_blks_dirtied),
            bps(c.local_blks_written),
            bps(c.temp_blks_read),
            bps(c.temp_blks_written),
            sumps(c.blk_read_time),
            sumps(c.blk_write_time),
            (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime")
        ]

        from_clause = query
        if self.has_extension(self.path_args[0], "pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            # and detailed hit ratio.
            kcache_query = kcache_getstatdata_sample("query")
            kc = inner_cc(kcache_query)
            kcache_query = (kcache_query.where(
                (kc.srvid == bindparam("server"))
                & (kc.datname == bindparam("database"))
                & (kc.queryid == bindparam("query"))).alias())
            kc = kcache_query.c
            sys_hits = (greatest(
                mulblock(sum(c.shared_blks_read)) - sum(kc.reads),
                0).label("kcache_hitblocks"))
            sys_hitratio = (cast(sys_hits, Numeric) * 100 /
                            mulblock(total_blocks))
            disk_hit_ratio = (sum(kc.reads) * 100 / mulblock(total_blocks))
            total_time = greatest(sum(c.runtime), 1)
            # Rusage can return values > real time due to sampling bias
            # aligned to kernel ticks. As such, we have to clamp values to 100%
            total_time_percent = lambda x: least(100, (x * 100) / total_time)

            def per_sec(col):
                ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
                return (sum(col) / ts).label(col.name)

            cols.extend([
                per_sec(kc.reads),
                per_sec(kc.writes),
                per_sec(kc.minflts),
                per_sec(kc.majflts),
                # per_sec(kc.nswaps),
                # per_sec(kc.msgsnds),
                # per_sec(kc.msgrcvs),
                # per_sec(kc.nsignals),
                per_sec(kc.nvcsws),
                per_sec(kc.nivcsws),
                total_time_percent(sum(kc.user_time) *
                                   1000).label("user_time"),
                total_time_percent(sum(kc.system_time) *
                                   1000).label("system_time"),
                greatest(
                    total_time_percent(
                        sum(c.runtime) -
                        ((sum(kc.user_time) + sum(kc.system_time)) * 1000)),
                    0).label("other_time"),
                case([(total_blocks == 0, 0)],
                     else_=disk_hit_ratio).label("disk_hit_ratio"),
                case([(total_blocks == 0, 0)],
                     else_=sys_hitratio).label("sys_hit_ratio")
            ])
            from_clause = from_clause.join(
                kcache_query,
                and_(kcache_query.c.ts == c.ts,
                     kcache_query.c.queryid == c.queryid,
                     kcache_query.c.userid == c.userid,
                     kcache_query.c.dbid == c.dbid))
        else:
            cols.extend([
                case([(total_blocks == 0, 0)],
                     else_=cast(sum(c.shared_blks_read), Numeric) * 100 /
                     total_blocks).label("miss_ratio")
            ])

        return (select(cols).select_from(from_clause).where(
            c.calls != '0').group_by(c.ts, block_size.c.block_size,
                                     c.mesure_interval).order_by(
                                         c.ts).params(samples=100))