def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard self._dashboard = Dashboard( "Configuration overview", # [[ServerDetails], # [Grid("Extensions", [[ Grid("Extensions", columns=[{ "name": "extname", "label": "Extension", }], metrics=PgExtensionsMetricGroup.all()) ], [ Grid("PostgreSQL settings", columns=[{ "name": "setting_name", "label": "Setting", }], metrics=PgSettingsMetricGroup.all()) ]]) return self._dashboard
class ConfigOverview(DashboardPage): """ Dashboard page for configuration page. """ base_url = r"/config/" datasources = [PgSettingsMetricGroup, PgExtensionsMetricGroup] dashboard = Dashboard("Configuration overview", [[ Grid("Extensions", columns=[{ "name": "extname", "label": "Extensions", "url_attr": "url" }], metrics=PgExtensionsMetricGroup.all()), Grid("PostgreSQL settings", columns=[{ "name": "setting_name", "label": "Setting", "url_attr": "url" }], metrics=PgSettingsMetricGroup.all()) ]]) @classmethod def get_menutitle(cls, handler, params): return "Configuration overview"
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard dashes = [[ Graph("Query runtime per second (all databases)", metrics=[ GlobalDatabasesMetricGroup.avg_runtime, GlobalDatabasesMetricGroup.load ]), Graph("Block access in Bps", metrics=[ GlobalDatabasesMetricGroup.total_blks_hit, GlobalDatabasesMetricGroup.total_blks_read ], color_scheme=['#73c03a', '#cb513a']) ], [ Grid("Details for all databases", columns=[{ "name": "datname", "label": "Database", "url_attr": "url" }], metrics=ByDatabaseMetricGroup.all()) ]] if self.has_extension("pg_wait_sampling"): dashes.append([ Grid("Wait events for all databases", columns=[{ "name": "datname", "label": "Database", "url_attr": "url" }, { "name": "event_type", "label": "Event Type", }, { "name": "event", "label": "Event", }], metrics=ByDatabaseWaitSamplingMetricGroup.all()) ]) self._dashboard = Dashboard("All databases", dashes) return self._dashboard
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard self._dashboard = Dashboard("Database overview for %(database)s") self._dashboard.widgets.extend( [[ Graph("Calls (On database %(database)s)", metrics=[ DatabaseOverviewMetricGroup.avg_runtime, DatabaseOverviewMetricGroup.load ]), Graph("Blocks (On database %(database)s)", metrics=[ DatabaseOverviewMetricGroup.total_blks_read, DatabaseOverviewMetricGroup.total_blks_hit ]) ], [ Grid("Details for all queries", toprow=[{ 'merge': True }, { 'name': 'Execution', 'merge': False, 'colspan': 3 }, { 'name': 'I/O Time', 'merge': False, 'colspan': 2 }, { 'name': 'Blocks', 'merge': False, 'colspan': 4, }, { 'name': 'Temp blocks', 'merge': False, 'colspan': 2 }], columns=[{ "name": "query", "label": "Query", "type": "query", "url_attr": "url", "max_length": 70 }], metrics=ByQueryMetricGroup.all()) ]]) self._dashboard.widgets.extend([[Wizard("Index suggestions")]]) return self._dashboard
class DatabaseOverview(DashboardPage): """DatabaseOverview Dashboard.""" base_url = r"/database/(\w+)/overview" datasources = [DatabaseOverviewMetricGroup, ByQueryMetricGroup] params = ["database"] parent = Overview dashboard = Dashboard( "Database overview for %(database)s", [[ Graph("Calls (On database %(database)s)", metrics=[DatabaseOverviewMetricGroup.avg_runtime]), Graph("Blocks (On database %(database)s)", metrics=[ DatabaseOverviewMetricGroup.total_blks_read, DatabaseOverviewMetricGroup.total_blks_hit ]) ], [ Grid("Details for all queries", toprow=[{ 'merge': True }, { 'name': 'Execution', 'merge': False, 'colspan': 3 }, { 'name': 'I/O Time', 'merge': False, 'colspan': 2 }, { 'name': 'Blocks', 'merge': False, 'colspan': 4, }, { 'name': 'Temp blocks', 'merge': False, 'colspan': 2 }], columns=[{ "name": "query", "label": "Query", "type": "query", "url_attr": "url", "max_length": 70 }], metrics=ByQueryMetricGroup.all()) ]]) @classmethod def get_menutitle(cls, handler, params): return params.get("database")
class ConfigOverview(DashboardPage): """ Dashboard page for configuration page. """ base_url = r"/config/" datasources = [PgSettingsMetricGroup, PgExtensionsMetricGroup] title = 'Configuration' dashboard = Dashboard("Configuration overview", [[ Grid("Extensions", columns=[{ "name": "extname", "label": "Extension", }], metrics=PgExtensionsMetricGroup.all()), Grid("PostgreSQL settings", columns=[{ "name": "setting_name", "label": "Setting", }], metrics=PgSettingsMetricGroup.all()) ]])
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard dashes = [[ Grid("All servers", columns=[{ "name": "alias", "label": "Instance", "url_attr": "url", "direction": "descending" }], metrics=OverviewMetricGroup.all()) ]] self._dashboard = Dashboard("All servers", dashes) return self._dashboard
class Overview(DashboardPage): """ Overview dashboard page. """ base_url = r"/overview/" datasources = [GlobalDatabasesMetricGroup, ByDatabaseMetricGroup] dashboard = Dashboard( "All databases", [[ Graph("Query runtime per second (all databases)", metrics=[ GlobalDatabasesMetricGroup.avg_runtime, GlobalDatabasesMetricGroup.load ]), Graph("Block access in Bps", metrics=[ GlobalDatabasesMetricGroup.total_blks_hit, GlobalDatabasesMetricGroup.total_blks_read ], color_scheme=['#73c03a', '#cb513a']) ], [ Grid("Details for all databases", columns=[{ "name": "datname", "label": "Database", "url_attr": "url" }], metrics=ByDatabaseMetricGroup.all()) ]]) @classmethod def get_menutitle(cls, handler, params): return "All databases" @classmethod def get_childmenu(cls, handler, params): from powa.database import DatabaseOverview children = [] for d in list(handler.databases): new_params = params.copy() new_params["database"] = d children.append(DatabaseOverview.get_selfmenu(handler, new_params)) return children
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard self._dashboard = Dashboard( "Server list", [[AllCollectorsDetail], [ Grid("Servers", columns=[{ "name": "server_alias", "label": "Server", "url_attr": "url", "direction": "ascending" }], metrics=PowaServersMetricGroup.all()) ], [ServersErrors]]) return self._dashboard
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard self._dashboard = Dashboard( "Qual %(qual)s", [[QualDetail], [ Grid("Other queries", metrics=OtherQueriesMetricGroup.all(), columns=[]) ], [ Graph("Most executed values", metrics=[QualConstantsMetricGroup.occurences], x_label_attr="constants", renderer="pie") ]]) return self._dashboard
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard self._dashboard = Dashboard("Database overview for %(database)s") block_graph = Graph( "Blocks (On database %(database)s)", metrics=[DatabaseOverviewMetricGroup.total_blks_hit], color_scheme=None) db_graphs = [ Graph("Calls (On database %(database)s)", metrics=[ DatabaseOverviewMetricGroup.avg_runtime, DatabaseOverviewMetricGroup.load, DatabaseOverviewMetricGroup.calls ]), block_graph ] graphs_dash = [Dashboard("General Overview", [db_graphs])] graphs = [TabContainer("All databases", graphs_dash)] # Add powa_stat_all_relations graphs all_rel_graphs = [ Graph("Access pattern", metrics=[ DatabaseAllRelMetricGroup.seq_scan, DatabaseAllRelMetricGroup.idx_scan, DatabaseAllRelMetricGroup.idx_ratio ]), Graph("DML activity", metrics=[ DatabaseAllRelMetricGroup.n_tup_del, DatabaseAllRelMetricGroup.n_tup_hot_upd, DatabaseAllRelMetricGroup.n_tup_upd, DatabaseAllRelMetricGroup.n_tup_ins ]), Graph("Vacuum activity", metrics=[ DatabaseAllRelMetricGroup.autoanalyze_count, DatabaseAllRelMetricGroup.analyze_count, DatabaseAllRelMetricGroup.autovacuum_count, DatabaseAllRelMetricGroup.vacuum_count ]) ] graphs_dash.append(Dashboard("Database Objects", [all_rel_graphs])) if self.has_extension(self.path_args[0], "pg_stat_kcache"): block_graph.metrics.insert( 0, DatabaseOverviewMetricGroup.total_sys_hit) block_graph.metrics.insert( 0, DatabaseOverviewMetricGroup.total_disk_read) block_graph.color_scheme = ['#cb513a', '#65b9ac', '#73c03a'] sys_graphs = [ Graph( "System resources (events per sec)", url= "https://powa.readthedocs.io/en/latest/stats_extensions/pg_stat_kcache.html", metrics=[ DatabaseOverviewMetricGroup.majflts, DatabaseOverviewMetricGroup.minflts, # DatabaseOverviewMetricGroup.nswaps, # DatabaseOverviewMetricGroup.msgsnds, # DatabaseOverviewMetricGroup.msgrcvs, # DatabaseOverviewMetricGroup.nsignals, DatabaseOverviewMetricGroup.nvcsws, DatabaseOverviewMetricGroup.nivcsws ]) ] graphs_dash.append(Dashboard("System resources", [sys_graphs])) else: block_graph.metrics.insert( 0, DatabaseOverviewMetricGroup.total_blks_read) block_graph.color_scheme = ['#cb513a', '#73c03a'] if (self.has_extension(self.path_args[0], "pg_wait_sampling")): metrics = None if self.get_pg_version_num(self.path_args[0]) < 100000: metrics = [ DatabaseWaitOverviewMetricGroup.count_lwlocknamed, DatabaseWaitOverviewMetricGroup.count_lwlocktranche, DatabaseWaitOverviewMetricGroup.count_lock, DatabaseWaitOverviewMetricGroup.count_bufferpin ] else: metrics = [ DatabaseWaitOverviewMetricGroup.count_lwlock, DatabaseWaitOverviewMetricGroup.count_lock, DatabaseWaitOverviewMetricGroup.count_bufferpin, DatabaseWaitOverviewMetricGroup.count_activity, DatabaseWaitOverviewMetricGroup.count_client, DatabaseWaitOverviewMetricGroup.count_extension, DatabaseWaitOverviewMetricGroup.count_ipc, DatabaseWaitOverviewMetricGroup.count_timeout, DatabaseWaitOverviewMetricGroup.count_io ] graphs_dash.append( Dashboard("Wait Events", [[ Graph( "Wait Events (per second)", url= "https://powa.readthedocs.io/en/latest/stats_extensions/pg_wait_sampling.html", metrics=metrics) ]])) self._dashboard.widgets.extend([ graphs, [ Grid("Details for all queries", toprow=[{ 'merge': True }, { 'name': 'Execution', 'merge': False, 'colspan': 3 }, { 'name': 'I/O Time', 'merge': False, 'colspan': 2 }, { 'name': 'Blocks', 'merge': False, 'colspan': 4, }, { 'name': 'Temp blocks', 'merge': False, 'colspan': 2 }], columns=[{ "name": "query", "label": "Query", "type": "query", "url_attr": "url", "max_length": 70 }], metrics=ByQueryMetricGroup.all()) ] ]) if self.has_extension(self.path_args[0], "pg_wait_sampling"): self._dashboard.widgets.extend([[ Grid( "Wait events for all queries", url= "https://powa.readthedocs.io/en/latest/stats_extensions/pg_wait_sampling.html", columns=[{ "name": "query", "label": "Query", "type": "query", "url_attr": "url", "max_length": 70 }, { "name": "event_type", "label": "Event Type", }, { "name": "event", "label": "Event", }], metrics=ByQueryWaitSamplingMetricGroup.all()) ]]) self._dashboard.widgets.extend([[Wizard("Index suggestions")]]) return self._dashboard
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard hit_ratio_graph = Graph("Hit ratio", metrics=[QueryOverviewMetricGroup.hit_ratio], renderer="bar", stack=True, color_scheme=['#73c03a','#65b9ac','#cb513a']) dashes = [] dashes.append(Dashboard("Query detail", [[Graph("General", metrics=[QueryOverviewMetricGroup.avg_runtime, QueryOverviewMetricGroup.rows, QueryOverviewMetricGroup.calls ])]])) dashes.append(Dashboard( "PG Cache", [[Graph("Shared block (in Bps)", metrics=[QueryOverviewMetricGroup.shared_blks_read, QueryOverviewMetricGroup.shared_blks_hit, QueryOverviewMetricGroup.shared_blks_dirtied, QueryOverviewMetricGroup.shared_blks_written]), Graph("Local block (in Bps)", metrics=[QueryOverviewMetricGroup.local_blks_read, QueryOverviewMetricGroup.local_blks_hit, QueryOverviewMetricGroup.local_blks_dirtied, QueryOverviewMetricGroup.local_blks_written]), Graph("Temp block (in Bps)", metrics=[QueryOverviewMetricGroup.temp_blks_read, QueryOverviewMetricGroup.temp_blks_written])]])) iodash = Dashboard("IO", [[hit_ratio_graph, Graph("Read / Write time", metrics=[QueryOverviewMetricGroup.blk_read_time, QueryOverviewMetricGroup.blk_write_time])]]) dashes.append(iodash) if self.has_extension("pg_stat_kcache"): iodash.widgets.extend([[ Graph("Physical block (in Bps)", metrics=[QueryOverviewMetricGroup.reads, QueryOverviewMetricGroup.writes]), Graph("CPU Time repartition", metrics=[QueryOverviewMetricGroup.user_time, QueryOverviewMetricGroup.system_time, QueryOverviewMetricGroup.other_time], renderer="bar", stack=True, color_scheme=['#73c03a','#cb513a','#65b9ac'])]]) hit_ratio_graph.metrics.append( QueryOverviewMetricGroup.sys_hit_ratio) hit_ratio_graph.metrics.append( QueryOverviewMetricGroup.disk_hit_ratio) else: hit_ratio_graph.metrics.append( QueryOverviewMetricGroup.miss_ratio) if self.has_extension("pg_qualstats"): dashes.append(Dashboard("Predicates", [[ Grid("Predicates used by this query", columns=[{ "name": "where_clause", "label": "Predicate", "type": "query", "max_length": 60, "url_attr": "url" }], metrics=QualList.all())], [QueryIndexes], [QueryExplains]])) self._dashboard = Dashboard("Query %(query)s on database %(database)s", [[QueryDetail], [ TabContainer("Query %(query)s on database %(database)s", dashes)]]) return self._dashboard
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard hit_ratio_graph = Graph("Hit ratio", metrics=[QueryOverviewMetricGroup.hit_ratio], renderer="bar", stack=True, color_scheme=['#73c03a', '#65b9ac', '#cb513a']) dashes = [] dashes.append( Dashboard("Query detail", [[ Graph("General", metrics=[ QueryOverviewMetricGroup.avg_runtime, QueryOverviewMetricGroup.rows, QueryOverviewMetricGroup.calls ]) ]])) dashes.append( Dashboard("PG Cache", [[ Graph("Shared block (in Bps)", metrics=[ QueryOverviewMetricGroup.shared_blks_read, QueryOverviewMetricGroup.shared_blks_hit, QueryOverviewMetricGroup.shared_blks_dirtied, QueryOverviewMetricGroup.shared_blks_written ]), Graph("Local block (in Bps)", metrics=[ QueryOverviewMetricGroup.local_blks_read, QueryOverviewMetricGroup.local_blks_hit, QueryOverviewMetricGroup.local_blks_dirtied, QueryOverviewMetricGroup.local_blks_written ]), Graph("Temp block (in Bps)", metrics=[ QueryOverviewMetricGroup.temp_blks_read, QueryOverviewMetricGroup.temp_blks_written ]) ]])) iodash = Dashboard("IO", [[ hit_ratio_graph, Graph("Read / Write time", url=self.docs_stats_url + "pg_stat_kcache.html", metrics=[ QueryOverviewMetricGroup.blk_read_time, QueryOverviewMetricGroup.blk_write_time ]) ]]) dashes.append(iodash) if self.has_extension(self.path_args[0], "pg_stat_kcache"): iodash.widgets.extend([[ Graph("Physical block (in Bps)", url=self.docs_stats_url + "pg_stat_kcache.html", metrics=[ QueryOverviewMetricGroup.reads, QueryOverviewMetricGroup.writes ]), Graph("CPU Time repartition", url=self.docs_stats_url + "pg_stat_kcache.html", metrics=[ QueryOverviewMetricGroup.user_time, QueryOverviewMetricGroup.system_time, QueryOverviewMetricGroup.other_time ], renderer="bar", stack=True, color_scheme=['#73c03a', '#cb513a', '#65b9ac']) ]]) hit_ratio_graph.metrics.append( QueryOverviewMetricGroup.sys_hit_ratio) hit_ratio_graph.metrics.append( QueryOverviewMetricGroup.disk_hit_ratio) sys_graphs = [ Graph( "System resources (events per sec)", url=self.docs_stats_url + "pg_stat_kcache.html", metrics=[ QueryOverviewMetricGroup.majflts, QueryOverviewMetricGroup.minflts, # QueryOverviewMetricGroup.nswaps, # QueryOverviewMetricGroup.msgsnds, # QueryOverviewMetricGroup.msgrcvs, # QueryOverviewMetricGroup.nsignals, QueryOverviewMetricGroup.nvcsws, QueryOverviewMetricGroup.nivcsws ]) ] dashes.append(Dashboard("System resources", [sys_graphs])) else: hit_ratio_graph.metrics.append(QueryOverviewMetricGroup.miss_ratio) if self.has_extension(self.path_args[0], "pg_wait_sampling"): # Get the metrics depending on the pg server version metrics = None if self.get_pg_version_num(self.path_args[0]) < 100000: metrics = [ WaitsQueryOverviewMetricGroup.count_lwlocknamed, WaitsQueryOverviewMetricGroup.count_lwlocktranche, WaitsQueryOverviewMetricGroup.count_lock, WaitsQueryOverviewMetricGroup.count_bufferpin ] else: metrics = [ WaitsQueryOverviewMetricGroup.count_lwlock, WaitsQueryOverviewMetricGroup.count_lock, WaitsQueryOverviewMetricGroup.count_bufferpin, WaitsQueryOverviewMetricGroup.count_activity, WaitsQueryOverviewMetricGroup.count_client, WaitsQueryOverviewMetricGroup.count_extension, WaitsQueryOverviewMetricGroup.count_ipc, WaitsQueryOverviewMetricGroup.count_timeout, WaitsQueryOverviewMetricGroup.count_io ] dashes.append( Dashboard("Wait Events", [[ Graph("Wait Events (per second)", url=self.docs_stats_url + "pg_wait_sampling.html", metrics=metrics), Grid("Wait events summary", url=self.docs_stats_url + "pg_wait_sampling.html", columns=[{ "name": "event_type", "label": "Event Type", }, { "name": "event", "label": "Event", }], metrics=WaitSamplingList.all()) ]])) if self.has_extension(self.path_args[0], "pg_qualstats"): dashes.append( Dashboard("Predicates", [[ Grid("Predicates used by this query", columns=[{ "name": "where_clause", "label": "Predicate", "type": "query", "max_length": 60, "url_attr": "url" }], metrics=QualList.all()) ], [QueryIndexes], [QueryExplains]])) self._dashboard = Dashboard( "Query %(query)s on database %(database)s", [[QueryDetail], [ TabContainer("Query %(query)s on database %(database)s", dashes) ]]) return self._dashboard
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard block_graph = Graph( "Block access in Bps", metrics=[GlobalDatabasesMetricGroup.total_blks_hit], color_scheme=None) all_db_graphs = [ Graph("Query runtime per second (all databases)", metrics=[ GlobalDatabasesMetricGroup.avg_runtime, GlobalDatabasesMetricGroup.load, GlobalDatabasesMetricGroup.calls ]), block_graph ] graphs_dash = [Dashboard("General Overview", [all_db_graphs])] graphs = [TabContainer("All databases", graphs_dash)] # Add pg_stat_bgwriter graphs bgw_graphs = [ [ Graph("Checkpointer scheduling", metrics=[ GlobalBgwriterMetricGroup.checkpoints_timed, GlobalBgwriterMetricGroup.checkpoints_req ]), Graph("Checkpointer activity", metrics=[ GlobalBgwriterMetricGroup.checkpoint_write_time, GlobalBgwriterMetricGroup.checkpoint_sync_time, GlobalBgwriterMetricGroup.buffers_checkpoint, GlobalBgwriterMetricGroup.buffers_alloc ]) ], [ Graph("Background writer", metrics=[ GlobalBgwriterMetricGroup.buffers_clean, GlobalBgwriterMetricGroup.maxwritten_clean ]), Graph("Backends", metrics=[ GlobalBgwriterMetricGroup.buffers_backend, GlobalBgwriterMetricGroup.buffers_backend_fsync ]) ] ] graphs_dash.append(Dashboard("Background Writer", bgw_graphs)) # Add powa_stat_all_relations graphs all_rel_graphs = [ Graph("Access pattern", metrics=[ GlobalAllRelMetricGroup.seq_scan, GlobalAllRelMetricGroup.idx_scan, GlobalAllRelMetricGroup.idx_ratio ]), Graph("DML activity", metrics=[ GlobalAllRelMetricGroup.n_tup_del, GlobalAllRelMetricGroup.n_tup_hot_upd, GlobalAllRelMetricGroup.n_tup_upd, GlobalAllRelMetricGroup.n_tup_ins ]), Graph("Vacuum activity", metrics=[ GlobalAllRelMetricGroup.autoanalyze_count, GlobalAllRelMetricGroup.analyze_count, GlobalAllRelMetricGroup.autovacuum_count, GlobalAllRelMetricGroup.vacuum_count ]) ] graphs_dash.append(Dashboard("Database Objects", [all_rel_graphs])) if self.has_extension(self.path_args[0], "pg_stat_kcache"): block_graph.metrics.insert( 0, GlobalDatabasesMetricGroup.total_sys_hit) block_graph.metrics.insert( 0, GlobalDatabasesMetricGroup.total_disk_read) block_graph.color_scheme = ['#cb513a', '#65b9ac', '#73c03a'] sys_graphs = [ Graph( "System resources (events per sec)", url=self.docs_stats_url + "pg_stat_kcache.html", metrics=[ GlobalDatabasesMetricGroup.majflts, GlobalDatabasesMetricGroup.minflts, # GlobalDatabasesMetricGroup.nswaps, # GlobalDatabasesMetricGroup.msgsnds, # GlobalDatabasesMetricGroup.msgrcvs, # GlobalDatabasesMetricGroup.nsignals, GlobalDatabasesMetricGroup.nvcsws, GlobalDatabasesMetricGroup.nivcsws ]) ] graphs_dash.append(Dashboard("System resources", [sys_graphs])) else: block_graph.metrics.insert( 0, GlobalDatabasesMetricGroup.total_blks_read) block_graph.color_scheme = ['#cb513a', '#73c03a'] if (self.has_extension(self.path_args[0], "pg_wait_sampling")): metrics = None pg_version_num = self.get_pg_version_num(self.path_args[0]) # if we can't connect to the remote server, assume pg10 or above if pg_version_num is None or pg_version_num < 100000: metrics = [ GlobalWaitsMetricGroup.count_lwlocknamed, GlobalWaitsMetricGroup.count_lwlocktranche, GlobalWaitsMetricGroup.count_lock, GlobalWaitsMetricGroup.count_bufferpin ] else: metrics = [ GlobalWaitsMetricGroup.count_lwlock, GlobalWaitsMetricGroup.count_lock, GlobalWaitsMetricGroup.count_bufferpin, GlobalWaitsMetricGroup.count_activity, GlobalWaitsMetricGroup.count_client, GlobalWaitsMetricGroup.count_extension, GlobalWaitsMetricGroup.count_ipc, GlobalWaitsMetricGroup.count_timeout, GlobalWaitsMetricGroup.count_io ] graphs_dash.append( Dashboard("Wait Events", [[ Graph("Wait Events (per second)", url=self.docs_stats_url + "pg_wait_sampling.html", metrics=metrics) ]])) dashes = [ graphs, [ Grid("Details for all databases", columns=[{ "name": "datname", "label": "Database", "url_attr": "url" }], metrics=ByDatabaseMetricGroup.all()) ] ] if self.has_extension(self.path_args[0], "pg_wait_sampling"): dashes.append([ Grid("Wait events for all databases", url=self.docs_stats_url + "pg_wait_sampling.html", columns=[{ "name": "datname", "label": "Database", "url_attr": "url" }, { "name": "event_type", "label": "Event Type", }, { "name": "event", "label": "Event", }], metrics=ByDatabaseWaitSamplingMetricGroup.all()) ]) self._dashboard = Dashboard("All databases", dashes) return self._dashboard