def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard self._dashboard = Dashboard("Database overview for %(database)s") block_graph = Graph( "Blocks (On database %(database)s)", metrics=[DatabaseOverviewMetricGroup.total_blks_hit], color_scheme=None) db_graphs = [ Graph("Calls (On database %(database)s)", metrics=[ DatabaseOverviewMetricGroup.avg_runtime, DatabaseOverviewMetricGroup.load, DatabaseOverviewMetricGroup.calls ]), block_graph ] graphs_dash = [Dashboard("General Overview", [db_graphs])] graphs = [TabContainer("All databases", graphs_dash)] # Add powa_stat_all_relations graphs all_rel_graphs = [ Graph("Access pattern", metrics=[ DatabaseAllRelMetricGroup.seq_scan, DatabaseAllRelMetricGroup.idx_scan, DatabaseAllRelMetricGroup.idx_ratio ]), Graph("DML activity", metrics=[ DatabaseAllRelMetricGroup.n_tup_del, DatabaseAllRelMetricGroup.n_tup_hot_upd, DatabaseAllRelMetricGroup.n_tup_upd, DatabaseAllRelMetricGroup.n_tup_ins ]), Graph("Vacuum activity", metrics=[ DatabaseAllRelMetricGroup.autoanalyze_count, DatabaseAllRelMetricGroup.analyze_count, DatabaseAllRelMetricGroup.autovacuum_count, DatabaseAllRelMetricGroup.vacuum_count ]) ] graphs_dash.append(Dashboard("Database Objects", [all_rel_graphs])) if self.has_extension(self.path_args[0], "pg_stat_kcache"): block_graph.metrics.insert( 0, DatabaseOverviewMetricGroup.total_sys_hit) block_graph.metrics.insert( 0, DatabaseOverviewMetricGroup.total_disk_read) block_graph.color_scheme = ['#cb513a', '#65b9ac', '#73c03a'] sys_graphs = [ Graph( "System resources (events per sec)", url= "https://powa.readthedocs.io/en/latest/stats_extensions/pg_stat_kcache.html", metrics=[ DatabaseOverviewMetricGroup.majflts, DatabaseOverviewMetricGroup.minflts, # DatabaseOverviewMetricGroup.nswaps, # DatabaseOverviewMetricGroup.msgsnds, # DatabaseOverviewMetricGroup.msgrcvs, # DatabaseOverviewMetricGroup.nsignals, DatabaseOverviewMetricGroup.nvcsws, DatabaseOverviewMetricGroup.nivcsws ]) ] graphs_dash.append(Dashboard("System resources", [sys_graphs])) else: block_graph.metrics.insert( 0, DatabaseOverviewMetricGroup.total_blks_read) block_graph.color_scheme = ['#cb513a', '#73c03a'] if (self.has_extension(self.path_args[0], "pg_wait_sampling")): metrics = None if self.get_pg_version_num(self.path_args[0]) < 100000: metrics = [ DatabaseWaitOverviewMetricGroup.count_lwlocknamed, DatabaseWaitOverviewMetricGroup.count_lwlocktranche, DatabaseWaitOverviewMetricGroup.count_lock, DatabaseWaitOverviewMetricGroup.count_bufferpin ] else: metrics = [ DatabaseWaitOverviewMetricGroup.count_lwlock, DatabaseWaitOverviewMetricGroup.count_lock, DatabaseWaitOverviewMetricGroup.count_bufferpin, DatabaseWaitOverviewMetricGroup.count_activity, DatabaseWaitOverviewMetricGroup.count_client, DatabaseWaitOverviewMetricGroup.count_extension, DatabaseWaitOverviewMetricGroup.count_ipc, DatabaseWaitOverviewMetricGroup.count_timeout, DatabaseWaitOverviewMetricGroup.count_io ] graphs_dash.append( Dashboard("Wait Events", [[ Graph( "Wait Events (per second)", url= "https://powa.readthedocs.io/en/latest/stats_extensions/pg_wait_sampling.html", metrics=metrics) ]])) self._dashboard.widgets.extend([ graphs, [ Grid("Details for all queries", toprow=[{ 'merge': True }, { 'name': 'Execution', 'merge': False, 'colspan': 3 }, { 'name': 'I/O Time', 'merge': False, 'colspan': 2 }, { 'name': 'Blocks', 'merge': False, 'colspan': 4, }, { 'name': 'Temp blocks', 'merge': False, 'colspan': 2 }], columns=[{ "name": "query", "label": "Query", "type": "query", "url_attr": "url", "max_length": 70 }], metrics=ByQueryMetricGroup.all()) ] ]) if self.has_extension(self.path_args[0], "pg_wait_sampling"): self._dashboard.widgets.extend([[ Grid( "Wait events for all queries", url= "https://powa.readthedocs.io/en/latest/stats_extensions/pg_wait_sampling.html", columns=[{ "name": "query", "label": "Query", "type": "query", "url_attr": "url", "max_length": 70 }, { "name": "event_type", "label": "Event Type", }, { "name": "event", "label": "Event", }], metrics=ByQueryWaitSamplingMetricGroup.all()) ]]) self._dashboard.widgets.extend([[Wizard("Index suggestions")]]) return self._dashboard
def dashboard(self): # This COULD be initialized in the constructor, but tornado < 3 doesn't # call it if getattr(self, '_dashboard', None) is not None: return self._dashboard block_graph = Graph( "Block access in Bps", metrics=[GlobalDatabasesMetricGroup.total_blks_hit], color_scheme=None) all_db_graphs = [ Graph("Query runtime per second (all databases)", metrics=[ GlobalDatabasesMetricGroup.avg_runtime, GlobalDatabasesMetricGroup.load, GlobalDatabasesMetricGroup.calls ]), block_graph ] graphs_dash = [Dashboard("General Overview", [all_db_graphs])] graphs = [TabContainer("All databases", graphs_dash)] # Add pg_stat_bgwriter graphs bgw_graphs = [ [ Graph("Checkpointer scheduling", metrics=[ GlobalBgwriterMetricGroup.checkpoints_timed, GlobalBgwriterMetricGroup.checkpoints_req ]), Graph("Checkpointer activity", metrics=[ GlobalBgwriterMetricGroup.checkpoint_write_time, GlobalBgwriterMetricGroup.checkpoint_sync_time, GlobalBgwriterMetricGroup.buffers_checkpoint, GlobalBgwriterMetricGroup.buffers_alloc ]) ], [ Graph("Background writer", metrics=[ GlobalBgwriterMetricGroup.buffers_clean, GlobalBgwriterMetricGroup.maxwritten_clean ]), Graph("Backends", metrics=[ GlobalBgwriterMetricGroup.buffers_backend, GlobalBgwriterMetricGroup.buffers_backend_fsync ]) ] ] graphs_dash.append(Dashboard("Background Writer", bgw_graphs)) # Add powa_stat_all_relations graphs all_rel_graphs = [ Graph("Access pattern", metrics=[ GlobalAllRelMetricGroup.seq_scan, GlobalAllRelMetricGroup.idx_scan, GlobalAllRelMetricGroup.idx_ratio ]), Graph("DML activity", metrics=[ GlobalAllRelMetricGroup.n_tup_del, GlobalAllRelMetricGroup.n_tup_hot_upd, GlobalAllRelMetricGroup.n_tup_upd, GlobalAllRelMetricGroup.n_tup_ins ]), Graph("Vacuum activity", metrics=[ GlobalAllRelMetricGroup.autoanalyze_count, GlobalAllRelMetricGroup.analyze_count, GlobalAllRelMetricGroup.autovacuum_count, GlobalAllRelMetricGroup.vacuum_count ]) ] graphs_dash.append(Dashboard("Database Objects", [all_rel_graphs])) if self.has_extension(self.path_args[0], "pg_stat_kcache"): block_graph.metrics.insert( 0, GlobalDatabasesMetricGroup.total_sys_hit) block_graph.metrics.insert( 0, GlobalDatabasesMetricGroup.total_disk_read) block_graph.color_scheme = ['#cb513a', '#65b9ac', '#73c03a'] sys_graphs = [ Graph( "System resources (events per sec)", url=self.docs_stats_url + "pg_stat_kcache.html", metrics=[ GlobalDatabasesMetricGroup.majflts, GlobalDatabasesMetricGroup.minflts, # GlobalDatabasesMetricGroup.nswaps, # GlobalDatabasesMetricGroup.msgsnds, # GlobalDatabasesMetricGroup.msgrcvs, # GlobalDatabasesMetricGroup.nsignals, GlobalDatabasesMetricGroup.nvcsws, GlobalDatabasesMetricGroup.nivcsws ]) ] graphs_dash.append(Dashboard("System resources", [sys_graphs])) else: block_graph.metrics.insert( 0, GlobalDatabasesMetricGroup.total_blks_read) block_graph.color_scheme = ['#cb513a', '#73c03a'] if (self.has_extension(self.path_args[0], "pg_wait_sampling")): metrics = None pg_version_num = self.get_pg_version_num(self.path_args[0]) # if we can't connect to the remote server, assume pg10 or above if pg_version_num is None or pg_version_num < 100000: metrics = [ GlobalWaitsMetricGroup.count_lwlocknamed, GlobalWaitsMetricGroup.count_lwlocktranche, GlobalWaitsMetricGroup.count_lock, GlobalWaitsMetricGroup.count_bufferpin ] else: metrics = [ GlobalWaitsMetricGroup.count_lwlock, GlobalWaitsMetricGroup.count_lock, GlobalWaitsMetricGroup.count_bufferpin, GlobalWaitsMetricGroup.count_activity, GlobalWaitsMetricGroup.count_client, GlobalWaitsMetricGroup.count_extension, GlobalWaitsMetricGroup.count_ipc, GlobalWaitsMetricGroup.count_timeout, GlobalWaitsMetricGroup.count_io ] graphs_dash.append( Dashboard("Wait Events", [[ Graph("Wait Events (per second)", url=self.docs_stats_url + "pg_wait_sampling.html", metrics=metrics) ]])) dashes = [ graphs, [ Grid("Details for all databases", columns=[{ "name": "datname", "label": "Database", "url_attr": "url" }], metrics=ByDatabaseMetricGroup.all()) ] ] if self.has_extension(self.path_args[0], "pg_wait_sampling"): dashes.append([ Grid("Wait events for all databases", url=self.docs_stats_url + "pg_wait_sampling.html", columns=[{ "name": "datname", "label": "Database", "url_attr": "url" }, { "name": "event_type", "label": "Event Type", }, { "name": "event", "label": "Event", }], metrics=ByDatabaseWaitSamplingMetricGroup.all()) ]) self._dashboard = Dashboard("All databases", dashes) return self._dashboard