def get(self, database, query, qual): stmt = qualstat_getstatdata() c = inner_cc(stmt) stmt = stmt.alias() stmt = (stmt.select().where((c.qualid == bindparam("qualid")) & ( c.queryid == bindparam("query"))).where(stmt.c.count > 0).column( (c.queryid == bindparam("query")).label("is_my_query"))) quals = list( self.execute(stmt, params={ "query": query, "from": self.get_argument("from"), "to": self.get_argument("to"), "qualid": qual })) quals = resolve_quals(self.connect(database=database), quals) my_qual = None other_queries = {} for qual in quals: if qual['is_my_query']: my_qual = qual else: other_queries[qual['queryid']] = qual['query'] if my_qual is None: self.render("xhr.html", content="nodata") return self.render("database/query/qualdetail.html", qual=my_qual, database=database, other_queries=other_queries)
def get(self, database, query, qual): stmt = qualstat_getstatdata() c = inner_cc(stmt) stmt = stmt.alias() stmt = (stmt.select() .where((c.qualid == bindparam("qualid"))) .where(stmt.c.occurences > 0) .column((c.queryid == bindparam("query")).label("is_my_query"))) quals = list(self.execute( stmt, params={"query": query, "from": self.get_argument("from"), "to": self.get_argument("to"), "qualid": qual})) my_qual = None other_queries = {} for qual in quals: if qual['is_my_query']: my_qual = resolve_quals(self.connect(database=database), [qual])[0] else: other_queries[qual['queryid']] = qual['query'] if my_qual is None: self.render("xhr.html", content="nodata") return self.render("database/query/qualdetail.html", qual=my_qual, database=database, other_queries=other_queries)
def query(self): query = (qual_constants("most_used", text(""" datname = :database AND s.queryid = :query AND qn.qualid = :qual AND coalesce_range && tstzrange(:from, :to)"""), top=10)) base = qualstat_getstatdata() c = inner_cc(base) base = base.where(c.queryid == bindparam("query")).alias() totals = (base.select() .where((c.qualid == bindparam("qual")) & (c.queryid == bindparam("query")))).alias() return (query.alias().select() .column(totals.c.occurences.label('total_occurences')) .correlate(query))
def query(self): query = (qual_constants("most_used", text(""" datname = :database AND s.queryid = :query AND qn.qualid = :qual AND coalesce_range && tstzrange(:from, :to)"""), top=10)) base = qualstat_getstatdata() c = inner_cc(base) base = base.where(c.queryid == bindparam("query")).alias() totals = ( base.select().where((c.qualid == bindparam("qual")) & (c.queryid == bindparam("query")))).alias() return (query.alias().select().column( totals.c.occurences.label('total_occurences')).correlate(query))
def get(self, database, query): if not self.has_extension("pg_qualstats"): raise HTTPError(501, "PG qualstats is not installed") base_query = qualstat_getstatdata() c = inner_cc(base_query) base_query.append_from(text("""LATERAL unnest(quals) as qual""")) base_query = (base_query.where(c.queryid == query).having( func.bool_or(column('eval_type') == 'f')).having( c.execution_count > 1000).having(c.occurences > 0).having( c.filter_ratio > 0.5).params(**{ 'from': '-infinity', 'to': 'infinity' })) optimizable = list(self.execute(base_query, params={'query': query})) optimizable = resolve_quals(self.connect(database=database), optimizable, 'quals') hypoplan = None indexes = {} for qual in optimizable: indexes[qual.where_clause] = possible_indexes(qual) hypo_version = self.has_extension("hypopg", database=database) if indexes and hypo_version and hypo_version >= "0.0.3": # identify indexes # create them allindexes = [ ind for indcollection in indexes.values() for ind in indcollection ] for ind in allindexes: ddl = ind.hypo_ddl if ddl is not None: ind.name = self.execute(ddl, database=database).scalar() # Build the query and fetch the plans querystr = get_any_sample_query(self, database, query, self.get_argument("from"), self.get_argument("to")) try: hypoplan = get_hypoplans(self.connect(database=database), querystr, allindexes) except: # TODO: offer the possibility to fill in parameters from the UI self.flash("We couldn't get plans for this query, presumably " "because some parameters are missing ") self.render("database/query/indexes.html", indexes=indexes, hypoplan=hypoplan)
def get(self, database, query): if not self.has_extension("pg_qualstats"): raise HTTPError(501, "PG qualstats is not installed") base_query = qualstat_getstatdata() c = inner_cc(base_query) base_query.append_from(text("""LATERAL unnest(quals) as qual""")) base_query = (base_query .where(c.queryid == query) .having(func.bool_or(column('eval_type') == 'f')) .having(c.execution_count > 1000) .having(c.occurences > 0) .having(c.filter_ratio > 0.5) .params(**{'from': '-infinity', 'to': 'infinity'})) optimizable = list(self.execute(base_query, params={'query': query})) optimizable = resolve_quals(self.connect(database=database), optimizable, 'quals') hypoplan = None indexes = {} for qual in optimizable: indexes[qual.where_clause] = possible_indexes(qual) hypo_version = self.has_extension("hypopg", database=database) if hypo_version and hypo_version >= "0.0.3": # identify indexes # create them allindexes = [ind for indcollection in indexes.values() for ind in indcollection] for ind in allindexes: ddl = ind.hypo_ddl if ddl is not None: ind.name = self.execute(ddl, database=database).scalar() # Build the query and fetch the plans querystr = get_any_sample_query(self, database, query, self.get_argument("from"), self.get_argument("to")) try: hypoplan = get_hypoplans(self.connect(database=database), querystr, allindexes) except: # TODO: offer the possibility to fill in parameters from the UI self.flash("We couldn't get plans for this query, presumably " "because some parameters are missing ") self.render("database/query/indexes.html", indexes=indexes, hypoplan=hypoplan)
def get(self, server, database, query, qual): try: # Check remote access first remote_conn = self.connect(server, database=database, remote_access=True) except Exception as e: raise HTTPError(501, "Could not connect to remote server: %s" % str(e)) stmt = qualstat_getstatdata(server) c = inner_cc(stmt) stmt = stmt.alias() stmt = (stmt.select().where((c.qualid == bindparam("qualid"))).where( stmt.c.occurences > 0).column( (c.queryid == bindparam("query")).label("is_my_query"))) quals = list( self.execute(stmt, params={ "server": server, "query": query, "from": self.get_argument("from"), "to": self.get_argument("to"), "queryids": [query], "qualid": qual })) my_qual = None other_queries = {} for qual in quals: if qual['is_my_query']: my_qual = resolve_quals(remote_conn, [qual])[0] if my_qual is None: self.render("xhr.html", content="No data") return self.render("database/query/qualdetail.html", qual=my_qual, database=database, server=server)
def get(self, database, query): if not self.has_extension("pg_qualstats"): raise HTTPError(501, "PG qualstats is not installed") base_query = qualstat_getstatdata() c = inner_cc(base_query) base_query.append_from(text("""LATERAL unnest(quals) as qual""")) base_query = (base_query .where(c.queryid == query) .having(func.bool_or(column('eval_type') == 'f')) .having(c.count > 1000) .having(c.filter_ratio > 0.5) .params(**{'from': '-infinity', 'to': 'infinity'})) optimizable = list(self.execute(base_query, params={'query': query})) optimizable = resolve_quals(self.connect(database=database), optimizable, 'quals') qual_indexes = {} for line in optimizable: qual_indexes[line['where_clause']] = possible_indexes( line['quals']) self.render("database/query/indexes.html", indexes=qual_indexes)
def query(self): # Fetch the base query for sample, and filter them on the database bs = block_size.c.block_size subquery = powa_getstatdata_sample("db", bindparam("server")) # Put the where clause inside the subquery subquery = subquery.where(column("datname") == bindparam("database")) query = subquery.alias() c = query.c cols = [ c.srvid, to_epoch(c.ts), (sum(c.calls) / greatest(extract("epoch", c.mesure_interval), 1)).label("calls"), (sum(c.runtime) / greatest(sum(c.calls), 1.)).label("avg_runtime"), (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval), 1)).label("load"), total_read(c), total_hit(c) ] from_clause = query if self.has_extension(self.path_args[0], "pg_stat_kcache"): # Add system metrics from pg_stat_kcache, kcache_query = kcache_getstatdata_sample("db") kc = inner_cc(kcache_query) kcache_query = (kcache_query.where( (kc.srvid == bindparam("server")) & (kc.datname == bindparam("database"))).alias()) kc = kcache_query.c def sum_per_sec(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (sum(col) / ts).label(col.name) total_sys_hit = (total_read(c) - sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.) ).label("total_sys_hit") total_disk_read = (sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.)).label("total_disk_read") minflts = sum_per_sec(kc.minflts) majflts = sum_per_sec(kc.majflts) # nswaps = sum_per_sec(kc.nswaps) # msgsnds = sum_per_sec(kc.msgsnds) # msgrcvs = sum_per_sec(kc.msgrcvs) # nsignals = sum_per_sec(kc.nsignals) nvcsws = sum_per_sec(kc.nvcsws) nivcsws = sum_per_sec(kc.nivcsws) cols.extend([ total_sys_hit, total_disk_read, minflts, majflts, # nswaps, msgsnds, msgrcvs, nsignals, nvcsws, nivcsws ]) from_clause = from_clause.join(kcache_query, kcache_query.c.ts == c.ts) return (select(cols).select_from(from_clause).where( c.calls is not None).group_by(c.srvid, c.ts, bs, c.mesure_interval).order_by( c.ts).params(samples=100))
def query(self): query = powa_getstatdata_sample("query") query = query.where( (column("datname") == bindparam("database")) & (column("queryid") == bindparam("query"))) query = query.alias() c = query.c total_blocks = ((c.shared_blks_read + c.shared_blks_hit) .label("total_blocks")) def bps(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (mulblock(col) / ts).label(col.name) cols = [to_epoch(c.ts), c.rows, c.calls, case([(total_blocks == 0, 0)], else_=cast(c.shared_blks_hit, Numeric) * 100 / total_blocks).label("hit_ratio"), bps(c.shared_blks_read), bps(c.shared_blks_hit), bps(c.shared_blks_dirtied), bps(c.shared_blks_written), bps(c.local_blks_read), bps(c.local_blks_hit), bps(c.local_blks_dirtied), bps(c.local_blks_written), bps(c.temp_blks_read), bps(c.temp_blks_written), c.blk_read_time, c.blk_write_time, (c.runtime / greatest(c.calls, 1)).label("avg_runtime")] from_clause = query if self.has_extension("pg_stat_kcache"): # Add system metrics from pg_stat_kcache, # and detailed hit ratio. kcache_query = kcache_getstatdata_sample() kc = inner_cc(kcache_query) kcache_query = ( kcache_query .where(kc.queryid == bindparam("query")) .alias()) kc = kcache_query.c sys_hits = (greatest(mulblock(c.shared_blks_read) - kc.reads, 0) .label("kcache_hitblocks")) sys_hitratio = (cast(sys_hits, Numeric) * 100 / mulblock(total_blocks)) disk_hit_ratio = (kc.reads / mulblock(total_blocks)) total_time = greatest(c.runtime, 1); # Rusage can return values > real time due to sampling bias # aligned to kernel ticks. As such, we have to clamp values to 100% total_time_percent = lambda x: least(100, (x * 100) / total_time) cols.extend([ kc.reads, kc.writes, total_time_percent(kc.user_time * 1000).label("user_time"), total_time_percent(kc.system_time * 1000).label("system_time"), greatest(total_time_percent( c.runtime - (kc.user_time + kc.system_time) * 1000), 0).label("other_time"), case([(total_blocks == 0, 0)], else_=disk_hit_ratio).label("disk_hit_ratio"), case([(total_blocks == 0, 0)], else_=sys_hitratio).label("sys_hit_ratio")]) from_clause = from_clause.join( kcache_query, kcache_query.c.ts == c.ts) else: cols.extend([ case([(total_blocks == 0, 0)], else_=cast(c.shared_blks_read, Numeric) * 100 / total_blocks).label("miss_ratio") ]) return (select(cols) .select_from(from_clause) .where(c.calls != None) .order_by(c.ts) .params(samples=100))
def query(self): base = qualstat_getstatdata() c = inner_cc(base) return (base.where(c.queryid == bindparam("query")))
def query(self): query = powa_getstatdata_sample("query", bindparam("server")) query = query.where((column("datname") == bindparam("database")) & (column("queryid") == bindparam("query"))) query = query.alias() c = query.c total_blocks = ((sum(c.shared_blks_read) + sum(c.shared_blks_hit)).label("total_blocks")) def get_ts(): return extract("epoch", greatest(c.mesure_interval, '1 second')) def sumps(col): return (sum(col) / get_ts()).label(col.name) def bps(col): return (mulblock(sum(col)) / get_ts()).label(col.name) cols = [ to_epoch(c.ts), sumps(c.rows), sumps(c.calls), case([(total_blocks == 0, 0)], else_=cast(sum(c.shared_blks_hit), Numeric) * 100 / total_blocks).label("hit_ratio"), bps(c.shared_blks_read), bps(c.shared_blks_hit), bps(c.shared_blks_dirtied), bps(c.shared_blks_written), bps(c.local_blks_read), bps(c.local_blks_hit), bps(c.local_blks_dirtied), bps(c.local_blks_written), bps(c.temp_blks_read), bps(c.temp_blks_written), sumps(c.blk_read_time), sumps(c.blk_write_time), (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime") ] from_clause = query if self.has_extension(self.path_args[0], "pg_stat_kcache"): # Add system metrics from pg_stat_kcache, # and detailed hit ratio. kcache_query = kcache_getstatdata_sample("query") kc = inner_cc(kcache_query) kcache_query = (kcache_query.where( (kc.srvid == bindparam("server")) & (kc.datname == bindparam("database")) & (kc.queryid == bindparam("query"))).alias()) kc = kcache_query.c sys_hits = (greatest( mulblock(sum(c.shared_blks_read)) - sum(kc.reads), 0).label("kcache_hitblocks")) sys_hitratio = (cast(sys_hits, Numeric) * 100 / mulblock(total_blocks)) disk_hit_ratio = (sum(kc.reads) * 100 / mulblock(total_blocks)) total_time = greatest(sum(c.runtime), 1) # Rusage can return values > real time due to sampling bias # aligned to kernel ticks. As such, we have to clamp values to 100% total_time_percent = lambda x: least(100, (x * 100) / total_time) def per_sec(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (sum(col) / ts).label(col.name) cols.extend([ per_sec(kc.reads), per_sec(kc.writes), per_sec(kc.minflts), per_sec(kc.majflts), # per_sec(kc.nswaps), # per_sec(kc.msgsnds), # per_sec(kc.msgrcvs), # per_sec(kc.nsignals), per_sec(kc.nvcsws), per_sec(kc.nivcsws), total_time_percent(sum(kc.user_time) * 1000).label("user_time"), total_time_percent(sum(kc.system_time) * 1000).label("system_time"), greatest( total_time_percent( sum(c.runtime) - ((sum(kc.user_time) + sum(kc.system_time)) * 1000)), 0).label("other_time"), case([(total_blocks == 0, 0)], else_=disk_hit_ratio).label("disk_hit_ratio"), case([(total_blocks == 0, 0)], else_=sys_hitratio).label("sys_hit_ratio") ]) from_clause = from_clause.join( kcache_query, and_(kcache_query.c.ts == c.ts, kcache_query.c.queryid == c.queryid, kcache_query.c.userid == c.userid, kcache_query.c.dbid == c.dbid)) else: cols.extend([ case([(total_blocks == 0, 0)], else_=cast(sum(c.shared_blks_read), Numeric) * 100 / total_blocks).label("miss_ratio") ]) return (select(cols).select_from(from_clause).where( c.calls != '0').group_by(c.ts, block_size.c.block_size, c.mesure_interval).order_by( c.ts).params(samples=100))
def query(self): bs = block_size.c.block_size query = powa_getstatdata_sample("db", bindparam("server")) query = query.alias() c = query.c cols = [c.srvid, extract("epoch", c.ts).label("ts"), (sum(c.calls) / greatest(extract("epoch", c.mesure_interval), 1)).label("calls"), (sum(c.runtime) / greatest(sum(c.calls), 1)).label("avg_runtime"), (sum(c.runtime) / greatest(extract("epoch", c.mesure_interval), 1)).label("load"), total_read(c), total_hit(c) ] if self.has_extension_version(self.path_args[0], 'pg_stat_statements', '1.8'): cols.extend([ (sum(c.plantime) / greatest(extract("epoch", c.mesure_interval), 1)).label("planload"), (sum(c.wal_records) / greatest(extract("epoch", c.mesure_interval), 1)).label("wal_records"), (sum(c.wal_fpi) / greatest(extract("epoch", c.mesure_interval), 1)).label("wal_fpi"), (sum(c.wal_bytes) / greatest(extract("epoch", c.mesure_interval), 1)).label("wal_bytes") ]) from_clause = query if self.has_extension(self.path_args[0], "pg_stat_kcache"): # Add system metrics from pg_stat_kcache, kcache_query = kcache_getstatdata_sample("db") kc = inner_cc(kcache_query) kcache_query = ( kcache_query .where( (kc.srvid == bindparam("server")) ) .alias()) kc = kcache_query.c def sum_per_sec(col): ts = extract("epoch", greatest(c.mesure_interval, '1 second')) return (sum(col) / ts).label(col.name) total_sys_hit = (total_read(c) - sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.) ).label("total_sys_hit") total_disk_read = (sum(kc.reads) / greatest(extract("epoch", c.mesure_interval), 1.) ).label("total_disk_read") minflts = sum_per_sec(kc.minflts) majflts = sum_per_sec(kc.majflts) # nswaps = sum_per_sec(kc.nswaps) # msgsnds = sum_per_sec(kc.msgsnds) # msgrcvs = sum_per_sec(kc.msgrcvs) # nsignals = sum_per_sec(kc.nsignals) nvcsws = sum_per_sec(kc.nvcsws) nivcsws = sum_per_sec(kc.nivcsws) cols.extend([total_sys_hit, total_disk_read, minflts, majflts, # nswaps, msgsnds, msgrcvs, nsignals, nvcsws, nivcsws]) from_clause = from_clause.join( kcache_query, and_(kcache_query.c.dbid == c.dbid, kcache_query.c.ts == c.ts)) return (select(cols) .select_from(from_clause) .where(c.calls != '0') .group_by(c.srvid, c.ts, bs, c.mesure_interval) .order_by(c.ts) .params(samples=100))