예제 #1
0
 def query(self):
     bs = block_size.c.block_size
     query = powa_getstatdata_sample("db")
     query = query.alias()
     c = query.c
     return (select([
         extract("epoch", c.ts).label("ts"),
         (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
         (sum(c.runtime) /
          greatest(extract("epoch", c.mesure_interval), 1)).label("load"),
         total_read(c),
         total_hit(c)
     ]).where(c.calls != None).group_by(
         c.ts, bs, c.mesure_interval).order_by(c.ts).params(samples=100))
예제 #2
0
파일: overview.py 프로젝트: girgen/powa-web
 def query(self):
     bs = block_size.c.block_size
     query = powa_getstatdata_sample("db")
     query = query.alias()
     c = query.c
     return (select([
             extract("epoch", c.ts).label("ts"),
             (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
             (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval),1)).label("load"),
             total_read(c),
             total_hit(c)])
         .where(c.calls != None)
         .group_by(c.ts, bs, c.mesure_interval)
         .order_by(c.ts)
         .params(samples=100))
예제 #3
0
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db(bindparam("server")).alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            and_(c.dbid == powa_databases.c.oid,
                 c.srvid == powa_databases.c.srvid))

        return (select([
            powa_databases.c.srvid, powa_databases.c.datname,
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            round(
                cast(sum(c.runtime), Numeric) / greatest(sum(c.calls), 1),
                2).label("avg_runtime"),
            mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
            mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
            mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
            mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
            mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
            round(cast(sum(c.blk_read_time + c.blk_write_time), Numeric),
                  2).label("io_time")
        ]).select_from(from_clause).order_by(sum(c.calls).desc()).group_by(
            powa_databases.c.srvid, powa_databases.c.datname, bs))
예제 #4
0
 def query(self):
     # Working from the statdata detailed_db base query
     inner_query = powa_getstatdata_detailed_db()
     inner_query = inner_query.alias()
     c = inner_query.c
     ps = powa_statements
     # Multiply each measure by the size of one block.
     columns = [c.queryid,
                ps.c.query,
                sum(c.calls).label("calls"),
                sum(c.runtime).label("runtime"),
                sum(mulblock(c.shared_blks_read)).label("shared_blks_read"),
                sum(mulblock(c.shared_blks_hit)).label("shared_blks_hit"),
                sum(mulblock(c.shared_blks_dirtied)).label("shared_blks_dirtied"),
                sum(mulblock(c.shared_blks_written)).label("shared_blks_written"),
                sum(mulblock(c.temp_blks_read)).label("temp_blks_read"),
                sum(mulblock(c.temp_blks_written)).label("temp_blks_written"),
                (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
                sum(c.blk_read_time).label("blks_read_time"),
                sum(c.blk_write_time).label("blks_write_time")]
     from_clause = inner_query.join(ps,
                                    (ps.c.queryid == c.queryid) &
                                    (ps.c.userid == c.userid) &
                                    (ps.c.dbid == c.dbid))
     return (select(columns)
             .select_from(from_clause)
             .where(c.datname == bindparam("database"))
             .group_by(c.queryid, ps.c.query)
             .order_by(sum(c.runtime).desc()))
예제 #5
0
 def query(self):
     # Working from the statdata detailed_db base query
     inner_query = powa_getstatdata_detailed_db(bindparam("server"))
     inner_query = inner_query.alias()
     c = inner_query.c
     ps = powa_statements
     # Multiply each measure by the size of one block.
     columns = [
         c.srvid, c.queryid, ps.c.query,
         sum(c.calls).label("calls"),
         sum(c.runtime).label("runtime"),
         sum(mulblock(c.shared_blks_read)).label("shared_blks_read"),
         sum(mulblock(c.shared_blks_hit)).label("shared_blks_hit"),
         sum(mulblock(c.shared_blks_dirtied)).label("shared_blks_dirtied"),
         sum(mulblock(c.shared_blks_written)).label("shared_blks_written"),
         sum(mulblock(c.temp_blks_read)).label("temp_blks_read"),
         sum(mulblock(c.temp_blks_written)).label("temp_blks_written"),
         (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"),
         sum(c.blk_read_time).label("blks_read_time"),
         sum(c.blk_write_time).label("blks_write_time")
     ]
     from_clause = inner_query.join(ps, (ps.c.queryid == c.queryid) &
                                    (ps.c.userid == c.userid) &
                                    (ps.c.dbid == c.dbid))
     return (select(columns).select_from(from_clause).where(
         c.datname == bindparam("database")).group_by(
             c.srvid, c.queryid,
             ps.c.query).order_by(sum(c.runtime).desc()))
예제 #6
0
 def query(self):
     # Fetch the base query for sample, and filter them on the database
     bs = block_size.c.block_size
     subquery = powa_getstatdata_sample("db")
     # Put the where clause inside the subquery
     subquery = subquery.where(column("datname") == bindparam("database"))
     query = subquery.alias()
     c = query.c
     return (select([
         to_epoch(c.ts),
         (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"),
         (sum(c.runtime) /
          greatest(extract("epoch", c.mesure_interval), 1)).label("load"),
         total_read(c),
         total_hit(c)
     ]).where(c.calls != None).group_by(
         c.ts, bs, c.mesure_interval).order_by(c.ts).params(samples=100))
예제 #7
0
 def query(self):
     # Fetch the base query for sample, and filter them on the database
     bs = block_size.c.block_size
     subquery = powa_getstatdata_sample("db")
     # Put the where clause inside the subquery
     subquery = subquery.where(column("datname") == bindparam("database"))
     query = subquery.alias()
     c = query.c
     return (select([
         to_epoch(c.ts),
         (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"),
         (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval),
                                    1)).label("load"),
         total_read(c),
         total_hit(c)])
             .where(c.calls != None)
             .group_by(c.ts, bs, c.mesure_interval)
             .order_by(c.ts)
             .params(samples=100))
예제 #8
0
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db(bindparam("server")).alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            and_(c.dbid == powa_databases.c.oid,
                 c.srvid == powa_databases.c.srvid))

        cols = [powa_databases.c.srvid,
                powa_databases.c.datname,
                sum(c.calls).label("calls"),
                sum(c.runtime).label("runtime"),
                round(cast(sum(c.runtime), Numeric) /
                      greatest(sum(c.calls), 1), 2).label("avg_runtime"),
                mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
                mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
                mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
                mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
                mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
                round(cast(sum(c.blk_read_time + c.blk_write_time),
                           Numeric), 2).label("io_time")
                ]

        if self.has_extension_version(self.path_args[0], 'pg_stat_statements',
                                      '1.8'):
            cols.extend([
                sum(c.plantime).label("plantime"),
                sum(c.wal_records).label("wal_records"),
                sum(c.wal_fpi).label("wal_fpi"),
                sum(c.wal_bytes).label("wal_bytes")
                ])

        return (select(cols)
                .select_from(from_clause)
                .order_by(sum(c.calls).desc())
                .group_by(powa_databases.c.srvid,
                          powa_databases.c.datname, bs))
예제 #9
0
파일: overview.py 프로젝트: girgen/powa-web
    def query(self):
        bs = block_size.c.block_size
        inner_query = powa_getstatdata_db().alias()
        c = inner_query.c
        from_clause = inner_query.join(
            powa_databases,
            c.dbid == powa_databases.c.oid)

        return (select([
            powa_databases.c.datname,
            sum(c.calls).label("calls"),
            sum(c.runtime).label("runtime"),
            round(cast(sum(c.runtime), Numeric) / greatest(sum(c.calls), 1), 2).label("avg_runtime"),
            mulblock(sum(c.shared_blks_read).label("shared_blks_read")),
            mulblock(sum(c.shared_blks_hit).label("shared_blks_hit")),
            mulblock(sum(c.shared_blks_dirtied).label("shared_blks_dirtied")),
            mulblock(sum(c.shared_blks_written).label("shared_blks_written")),
            mulblock(sum(c.temp_blks_written).label("temp_blks_written")),
            round(cast(sum(c.blk_read_time + c.blk_write_time), Numeric), 2).label("io_time")
        ])
            .select_from(from_clause)
            .order_by(sum(c.calls).desc())
            .group_by(powa_databases.c.datname, bs))
예제 #10
0
 def sum_per_sec(col):
     ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
     return (sum(col) / ts).label(col.name)
예제 #11
0
 def wps(col):
     ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
     return (col / ts).label(col.name)
예제 #12
0
파일: query.py 프로젝트: champeric/powa-web
 def bps(col):
     ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
     return (mulblock(col) / ts).label(col.name)
예제 #13
0
파일: query.py 프로젝트: leorenc/powa-web
    def query(self):
        query = powa_getstatdata_sample("query")
        query = query.where(
            (column("datname") == bindparam("database")) &
            (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c
        total_blocks = ((c.shared_blks_read + c.shared_blks_hit)
                        .label("total_blocks"))

        def bps(col):
            ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
            return (mulblock(col) / ts).label(col.name)
        cols = [to_epoch(c.ts),
                c.rows,
                c.calls,
                case([(total_blocks == 0, 0)],
                     else_=cast(c.shared_blks_hit, Numeric) * 100 /
                     total_blocks).label("hit_ratio"),
                bps(c.shared_blks_read),
                bps(c.shared_blks_hit),
                bps(c.shared_blks_dirtied),
                bps(c.shared_blks_written),
                bps(c.local_blks_read),
                bps(c.local_blks_hit),
                bps(c.local_blks_dirtied),
                bps(c.local_blks_written),
                bps(c.temp_blks_read),
                bps(c.temp_blks_written),
                c.blk_read_time,
                c.blk_write_time,
                (c.runtime / greatest(c.calls, 1)).label("avg_runtime")]

        from_clause = query
        if self.has_extension("pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            # and detailed hit ratio.
            kcache_query = kcache_getstatdata_sample()
            kc = inner_cc(kcache_query)
            kcache_query = (
                kcache_query
                .where(kc.queryid == bindparam("query"))
                .alias())
            kc = kcache_query.c
            sys_hits = (greatest(mulblock(c.shared_blks_read) -
                                 kc.reads, 0)
                        .label("kcache_hitblocks"))
            sys_hitratio = (cast(sys_hits, Numeric) * 100 /
                            mulblock(total_blocks))
            disk_hit_ratio = (kc.reads /
                              mulblock(total_blocks))
            total_time = greatest(c.runtime, 1);
            # Rusage can return values > real time due to sampling bias
            # aligned to kernel ticks. As such, we have to clamp values to 100%
            total_time_percent = lambda x: least(100, (x * 100) /
                                                 total_time)
            cols.extend([
                kc.reads,
                kc.writes,
                total_time_percent(kc.user_time * 1000).label("user_time"),
                total_time_percent(kc.system_time * 1000).label("system_time"),
                greatest(total_time_percent(
                    c.runtime - (kc.user_time + kc.system_time) *
                    1000), 0).label("other_time"),
                case([(total_blocks == 0, 0)],
                     else_=disk_hit_ratio).label("disk_hit_ratio"),
                case([(total_blocks == 0, 0)],
                     else_=sys_hitratio).label("sys_hit_ratio")])
            from_clause = from_clause.join(
                kcache_query,
                kcache_query.c.ts == c.ts)
        else:
            cols.extend([
                case([(total_blocks == 0, 0)],
                     else_=cast(c.shared_blks_read, Numeric) * 100 /
                     total_blocks).label("miss_ratio")
            ])

        return (select(cols)
                .select_from(from_clause)
                .where(c.calls != None)
                .order_by(c.ts)
                .params(samples=100))
예제 #14
0
파일: query.py 프로젝트: frost242/powa-web
 def get_ts():
     return extract("epoch", greatest(c.mesure_interval, '1 second'))
예제 #15
0
파일: query.py 프로젝트: frost242/powa-web
    def query(self):
        query = powa_getstatdata_sample("query", bindparam("server"))
        query = query.where((column("datname") == bindparam("database"))
                            & (column("queryid") == bindparam("query")))
        query = query.alias()
        c = query.c
        total_blocks = ((sum(c.shared_blks_read) +
                         sum(c.shared_blks_hit)).label("total_blocks"))

        def get_ts():
            return extract("epoch", greatest(c.mesure_interval, '1 second'))

        def sumps(col):
            return (sum(col) / get_ts()).label(col.name)

        def bps(col):
            return (mulblock(sum(col)) / get_ts()).label(col.name)

        cols = [
            to_epoch(c.ts),
            sumps(c.rows),
            sumps(c.calls),
            case([(total_blocks == 0, 0)],
                 else_=cast(sum(c.shared_blks_hit), Numeric) * 100 /
                 total_blocks).label("hit_ratio"),
            bps(c.shared_blks_read),
            bps(c.shared_blks_hit),
            bps(c.shared_blks_dirtied),
            bps(c.shared_blks_written),
            bps(c.local_blks_read),
            bps(c.local_blks_hit),
            bps(c.local_blks_dirtied),
            bps(c.local_blks_written),
            bps(c.temp_blks_read),
            bps(c.temp_blks_written),
            sumps(c.blk_read_time),
            sumps(c.blk_write_time),
            (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime")
        ]

        from_clause = query
        if self.has_extension(self.path_args[0], "pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            # and detailed hit ratio.
            kcache_query = kcache_getstatdata_sample("query")
            kc = inner_cc(kcache_query)
            kcache_query = (kcache_query.where(
                (kc.srvid == bindparam("server"))
                & (kc.datname == bindparam("database"))
                & (kc.queryid == bindparam("query"))).alias())
            kc = kcache_query.c
            sys_hits = (greatest(
                mulblock(sum(c.shared_blks_read)) - sum(kc.reads),
                0).label("kcache_hitblocks"))
            sys_hitratio = (cast(sys_hits, Numeric) * 100 /
                            mulblock(total_blocks))
            disk_hit_ratio = (sum(kc.reads) * 100 / mulblock(total_blocks))
            total_time = greatest(sum(c.runtime), 1)
            # Rusage can return values > real time due to sampling bias
            # aligned to kernel ticks. As such, we have to clamp values to 100%
            total_time_percent = lambda x: least(100, (x * 100) / total_time)

            def per_sec(col):
                ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
                return (sum(col) / ts).label(col.name)

            cols.extend([
                per_sec(kc.reads),
                per_sec(kc.writes),
                per_sec(kc.minflts),
                per_sec(kc.majflts),
                # per_sec(kc.nswaps),
                # per_sec(kc.msgsnds),
                # per_sec(kc.msgrcvs),
                # per_sec(kc.nsignals),
                per_sec(kc.nvcsws),
                per_sec(kc.nivcsws),
                total_time_percent(sum(kc.user_time) *
                                   1000).label("user_time"),
                total_time_percent(sum(kc.system_time) *
                                   1000).label("system_time"),
                greatest(
                    total_time_percent(
                        sum(c.runtime) -
                        ((sum(kc.user_time) + sum(kc.system_time)) * 1000)),
                    0).label("other_time"),
                case([(total_blocks == 0, 0)],
                     else_=disk_hit_ratio).label("disk_hit_ratio"),
                case([(total_blocks == 0, 0)],
                     else_=sys_hitratio).label("sys_hit_ratio")
            ])
            from_clause = from_clause.join(
                kcache_query,
                and_(kcache_query.c.ts == c.ts,
                     kcache_query.c.queryid == c.queryid,
                     kcache_query.c.userid == c.userid,
                     kcache_query.c.dbid == c.dbid))
        else:
            cols.extend([
                case([(total_blocks == 0, 0)],
                     else_=cast(sum(c.shared_blks_read), Numeric) * 100 /
                     total_blocks).label("miss_ratio")
            ])

        return (select(cols).select_from(from_clause).where(
            c.calls != '0').group_by(c.ts, block_size.c.block_size,
                                     c.mesure_interval).order_by(
                                         c.ts).params(samples=100))
예제 #16
0
    def query(self):
        bs = block_size.c.block_size
        query = powa_getstatdata_sample("db", bindparam("server"))
        query = query.alias()
        c = query.c

        cols = [c.srvid,
                extract("epoch", c.ts).label("ts"),
                (sum(c.calls) / greatest(extract("epoch", c.mesure_interval),
                                         1)).label("calls"),
                (sum(c.runtime) / greatest(sum(c.calls),
                                           1)).label("avg_runtime"),
                (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval),
                                           1)).label("load"),
                total_read(c),
                total_hit(c)
                ]

        if self.has_extension_version(self.path_args[0],
                                      'pg_stat_statements', '1.8'):
            cols.extend([
                (sum(c.plantime) / greatest(extract("epoch", c.mesure_interval),
                                            1)).label("planload"),
                (sum(c.wal_records) / greatest(extract("epoch",
                                                       c.mesure_interval),
                                               1)).label("wal_records"),
                (sum(c.wal_fpi) / greatest(extract("epoch",
                                                   c.mesure_interval),
                                           1)).label("wal_fpi"),
                (sum(c.wal_bytes) / greatest(extract("epoch",
                                                     c.mesure_interval),
                                             1)).label("wal_bytes")
                ])

        from_clause = query
        if self.has_extension(self.path_args[0], "pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            kcache_query = kcache_getstatdata_sample("db")
            kc = inner_cc(kcache_query)
            kcache_query = (
                kcache_query
                .where(
                    (kc.srvid == bindparam("server"))
                    )
                .alias())
            kc = kcache_query.c

            def sum_per_sec(col):
                ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
                return (sum(col) / ts).label(col.name)

            total_sys_hit = (total_read(c) - sum(kc.reads) /
                             greatest(extract("epoch", c.mesure_interval), 1.)
                             ).label("total_sys_hit")
            total_disk_read = (sum(kc.reads) /
                               greatest(extract("epoch", c.mesure_interval), 1.)
                               ).label("total_disk_read")
            minflts = sum_per_sec(kc.minflts)
            majflts = sum_per_sec(kc.majflts)
            # nswaps = sum_per_sec(kc.nswaps)
            # msgsnds = sum_per_sec(kc.msgsnds)
            # msgrcvs = sum_per_sec(kc.msgrcvs)
            # nsignals = sum_per_sec(kc.nsignals)
            nvcsws = sum_per_sec(kc.nvcsws)
            nivcsws = sum_per_sec(kc.nivcsws)

            cols.extend([total_sys_hit, total_disk_read, minflts, majflts,
                         # nswaps, msgsnds, msgrcvs, nsignals,
                         nvcsws, nivcsws])
            from_clause = from_clause.join(
                kcache_query,
                and_(kcache_query.c.dbid == c.dbid,
                     kcache_query.c.ts == c.ts))

        return (select(cols)
                .select_from(from_clause)
                .where(c.calls != '0')
                .group_by(c.srvid, c.ts, bs, c.mesure_interval)
                .order_by(c.ts)
                .params(samples=100))
예제 #17
0
    def query(self):
        # Fetch the base query for sample, and filter them on the database
        bs = block_size.c.block_size
        subquery = powa_getstatdata_sample("db", bindparam("server"))
        # Put the where clause inside the subquery
        subquery = subquery.where(column("datname") == bindparam("database"))
        query = subquery.alias()
        c = query.c

        cols = [
            c.srvid,
            to_epoch(c.ts),
            (sum(c.calls) /
             greatest(extract("epoch", c.mesure_interval), 1)).label("calls"),
            (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"),
            (sum(c.runtime) /
             greatest(extract("epoch", c.mesure_interval), 1)).label("load"),
            total_read(c),
            total_hit(c)
        ]

        from_clause = query
        if self.has_extension(self.path_args[0], "pg_stat_kcache"):
            # Add system metrics from pg_stat_kcache,
            kcache_query = kcache_getstatdata_sample("db")
            kc = inner_cc(kcache_query)
            kcache_query = (kcache_query.where(
                (kc.srvid == bindparam("server"))
                & (kc.datname == bindparam("database"))).alias())
            kc = kcache_query.c

            def sum_per_sec(col):
                ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
                return (sum(col) / ts).label(col.name)

            total_sys_hit = (total_read(c) - sum(kc.reads) /
                             greatest(extract("epoch", c.mesure_interval), 1.)
                             ).label("total_sys_hit")
            total_disk_read = (sum(kc.reads) /
                               greatest(extract("epoch", c.mesure_interval),
                                        1.)).label("total_disk_read")
            minflts = sum_per_sec(kc.minflts)
            majflts = sum_per_sec(kc.majflts)
            # nswaps = sum_per_sec(kc.nswaps)
            # msgsnds = sum_per_sec(kc.msgsnds)
            # msgrcvs = sum_per_sec(kc.msgrcvs)
            # nsignals = sum_per_sec(kc.nsignals)
            nvcsws = sum_per_sec(kc.nvcsws)
            nivcsws = sum_per_sec(kc.nivcsws)

            cols.extend([
                total_sys_hit,
                total_disk_read,
                minflts,
                majflts,
                # nswaps, msgsnds, msgrcvs, nsignals,
                nvcsws,
                nivcsws
            ])
            from_clause = from_clause.join(kcache_query,
                                           kcache_query.c.ts == c.ts)

        return (select(cols).select_from(from_clause).where(
            c.calls is not None).group_by(c.srvid, c.ts, bs,
                                          c.mesure_interval).order_by(
                                              c.ts).params(samples=100))
예제 #18
0
 def bps(col):
     ts = extract("epoch", greatest(c.mesure_interval, '1 second'))
     return (mulblock(sum(col)) / ts).label(col.name)