def all(self, hostId, graph=False): hostId, hostUiName = hosts.ensureHostIdAndUIShortname(hostId) graph_list = None all_sprocs = None if not graph: all_sprocs = sprocdata.getAllActiveSprocNames(hostId) else: sprocs = self.get_data(hostId) graph_list = [] i = 0 for s in sprocs: print ('s') print (s) d = sprocdata.getSingleSprocData(hostId, s, "('now'::timestamp - '4 days'::interval)") i += 1 graph= flotgraph.TimeGraph("graph"+str(i)) graph.addSeries('Avg.', 'avg') for p in d['avg_time']: graph.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_list.append( {'graph': graph.render() , 'name': s[0:s.find("(")] , 'i': i } ) tpl = tplE.env.get_template('all_sprocs.html') return tpl.render(graphs=graph_list, hostuiname = hostUiName, hostname = hosts.getHostData()[int(hostId)]['uilongname'], all_sprocs = all_sprocs)
def allgraph(self, hostId): hostId, hostUiName = hosts.ensureHostIdAndUIShortname(hostId) sprocs = self.get_data(hostId) tpl = tplE.env.get_template('all_sprocs.html') list = [] i = 0 for s in sprocs: d = sprocdata.getSingleSprocData( hostId, s, "('now'::timestamp - '4 days'::interval)") i += 1 graph = flotgraph.TimeGraph("graph" + str(i)) graph.addSeries('Avg.', 'avg') for p in d['avg_time']: graph.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000), p[1]) list.append({ 'graph': graph.render(), 'name': s[0:s.find("(")], 'i': i }) return tpl.render( graphs=list, hostuiname=hostUiName, hostname=hosts.getHostData()[int(hostId)]['uilongname'], all_sprocs=None)
def raw(self, host, limit=10): # raw should contain all data to build up the page dynamically for example hostId, host_ui_name = hosts.ensureHostIdAndUIShortname(host) cpuload = topsprocs.getCpuLoad(hostId) load = topsprocs.getLoad(hostId) walvolumes = topsprocs.getWalVolumes(hostId) blocked_processes = topsprocs.getBlockedProcessesCounts(hostId) sizes = tabledata.getDatabaseSizes(hostId) dbstats = reportdata.getDatabaseStatistics(hostId) result = {'load': load, 'cpuload': cpuload, 'walvolumes': walvolumes, 'blocked_processes': blocked_processes, 'sizes': sizes, 'dbstats': dbstats} return result
def graph(self, hostuiname, query_id, **params): hostid, hostuiname = hosts.ensureHostIdAndUIShortname(hostuiname) from_date = params.get('from_date', (datetime.datetime.now() - datetime.timedelta(1)).strftime('%Y-%m-%d')) to_date = params.get('to_date', (datetime.datetime.now() + datetime.timedelta(1)).strftime('%Y-%m-%d')) data = reportdata.getStatStatementsGraph(hostid, query_id, from_date, to_date) graphcalls = flotgraph.Graph("graphcalls") graphcalls.addSeries('Number of calls', 'calls') graphavgruntime = flotgraph.Graph("graphavgruntime") graphavgruntime.addSeries('Avg. runtime', 'avgruntime') graphruntime = flotgraph.Graph("graphruntime") graphruntime.addSeries('Runtime', 'runtime') graphblocksread = flotgraph.Graph("graphblocksread") graphblocksread.addSeries('Blocks read', 'blocksread') graphblocksread.addSeries('Temp blocks read', 'tempblocksread', '#FFFF00') graphblockswritten = flotgraph.Graph("graphblockswritten") graphblockswritten.addSeries('Blocks written', 'blockswritten') graphblockswritten.addSeries('Temp blocks written', 'tempblockswritten', '#FFFF00') prev_row = None for d in data: if prev_row is None: prev_row = d continue calls = d['calls'] - prev_row['calls'] graphcalls.addPoint('calls', int(time.mktime(d['timestamp'].timetuple()) * 1000), calls) runtime = d['total_time'] - prev_row['total_time'] graphruntime.addPoint('runtime', int(time.mktime(d['timestamp'].timetuple()) * 1000), runtime) avg_runtime = round(runtime / float(calls), 2) if calls > 0 else 0 graphavgruntime.addPoint('avgruntime', int(time.mktime(d['timestamp'].timetuple()) * 1000), avg_runtime) graphblocksread.addPoint('blocksread', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['blks_read'] - prev_row['blks_read']) graphblocksread.addPoint('tempblocksread', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['temp_blks_read'] - prev_row['temp_blks_read']) graphblockswritten.addPoint('blockswritten', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['blks_written'] - prev_row['blks_written']) graphblockswritten.addPoint('tempblockswritten', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['temp_blks_written'] - prev_row['temp_blks_written']) prev_row = d table = tplE.env.get_template('perf_stat_statements_detailed.html') return table.render(hostuiname=hostuiname, query = prev_row['query'], query_id = prev_row['query_id'], graphcalls=graphcalls.render(), graphavgruntime=graphavgruntime.render(), graphruntime=graphruntime.render(), graphblocksread=graphblocksread.render(), graphblockswritten=graphblockswritten.render(), from_date=from_date, to_date=to_date)
def graph(self, hostuiname, query_id, **params): hostid, hostuiname = hosts.ensureHostIdAndUIShortname(hostuiname) from_date = params.get('from_date', (datetime.datetime.now() - datetime.timedelta(1)).strftime('%Y-%m-%d')) to_date = params.get('to_date', (datetime.datetime.now() + datetime.timedelta(1)).strftime('%Y-%m-%d')) data = reportdata.getStatStatementsGraph(hostid, query_id, from_date, to_date) graphcalls = flotgraph.Graph("graphcalls") graphcalls.addSeries('Number of calls', 'calls') graphavgruntime = flotgraph.TimeGraph("graphavgruntime") graphavgruntime.addSeries('Avg. runtime', 'avgruntime') graphruntime = flotgraph.TimeGraph("graphruntime") graphruntime.addSeries('Runtime', 'runtime') graphblocksread = flotgraph.Graph("graphblocksread") graphblocksread.addSeries('Blocks read', 'blocksread') graphblocksread.addSeries('Temp blocks read', 'tempblocksread', '#FFFF00') graphblockswritten = flotgraph.Graph("graphblockswritten") graphblockswritten.addSeries('Blocks written', 'blockswritten') graphblockswritten.addSeries('Temp blocks written', 'tempblockswritten', '#FFFF00') prev_row = None for d in data: if prev_row is None: prev_row = d continue calls = d['calls'] - prev_row['calls'] graphcalls.addPoint('calls', int(time.mktime(d['timestamp'].timetuple()) * 1000), calls) runtime = d['total_time'] - prev_row['total_time'] graphruntime.addPoint('runtime', int(time.mktime(d['timestamp'].timetuple()) * 1000), runtime) avg_runtime = round(runtime / float(calls), 2) if calls > 0 else 0 graphavgruntime.addPoint('avgruntime', int(time.mktime(d['timestamp'].timetuple()) * 1000), avg_runtime) graphblocksread.addPoint('blocksread', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['blks_read'] - prev_row['blks_read']) graphblocksread.addPoint('tempblocksread', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['temp_blks_read'] - prev_row['temp_blks_read']) graphblockswritten.addPoint('blockswritten', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['blks_written'] - prev_row['blks_written']) graphblockswritten.addPoint('tempblockswritten', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['temp_blks_written'] - prev_row['temp_blks_written']) prev_row = d table = tplE.env.get_template('perf_stat_statements_detailed.html') return table.render(hostuiname=hostuiname, query = prev_row['query'], query_id = prev_row['query_id'], graphcalls=graphcalls.render(), graphavgruntime=graphavgruntime.render(), graphruntime=graphruntime.render(), graphblocksread=graphblocksread.render(), graphblockswritten=graphblockswritten.render(), from_date=from_date, to_date=to_date)
def all(self, hostId, active_days=4, graph=False): hostId, hostUiName = hosts.ensureHostIdAndUIShortname(hostId) graph_list = [] all_sprocs = None if not graph: all_sprocs = sprocdata.getAllActiveSprocNames(hostId, active_days) else: sprocs = self.get_data(hostId) i = 0 for s in sprocs: d = sprocdata.getSingleSprocData( hostId, s, "('now'::timestamp - '{} days'::interval)".format( active_days)) i += 1 graph = flotgraph.TimeGraph("graph" + str(i)) graph.addSeries('Avg.', 'avg') for p in d['avg_time']: graph.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_list.append({ 'graph': graph.render(), 'name': s[0:s.find("(")], 'i': i }) tpl = tplE.env.get_template('all_sprocs.html') return tpl.render( graphs=graph_list, hostuiname=hostUiName, hostname=hosts.getHostData()[int(hostId)]['uilongname'], active_days=active_days, all_sprocs=all_sprocs)
def raw( self, host, limit=10 ): # raw should contain all data to build up the page dynamically for example hostId, host_ui_name = hosts.ensureHostIdAndUIShortname(host) cpuload = topsprocs.getCpuLoad(hostId) load = topsprocs.getLoad(hostId) walvolumes = topsprocs.getWalVolumes(hostId) blocked_processes = topsprocs.getBlockedProcessesCounts(hostId) sizes = tabledata.getDatabaseSizes(hostId) dbstats = reportdata.getDatabaseStatistics(hostId) result = { 'load': load, 'cpuload': cpuload, 'walvolumes': walvolumes, 'blocked_processes': blocked_processes, 'sizes': sizes, 'dbstats': dbstats, } return result
def default(self, hostId = None): hostId, hostUiName = hosts.ensureHostIdAndUIShortname(max(hostId, self.hostId)) days = cherrypy.request.cookie['days'].value if 'days' in cherrypy.request.cookie else '8' sprocs_to_show = int(cherrypy.request.cookie['sprocs_to_show'].value) if 'sprocs_to_show' in cherrypy.request.cookie else 10 graph_load = None graph_wal = None graph_size = None graph_dbstats = None top_sprocs = None if tplE._settings['show_load']: graph_load = flotgraph.Graph("graph_load","left",30) graph_load.addSeries('CPU Load 15min avg','acpu_15min_avg','#FF0000') cpuload = topsprocs.getCpuLoad(hostId, days) for p in cpuload['load_15min_avg']: graph_load.addPoint('acpu_15min_avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) load = topsprocs.getLoad(hostId, days) graph_load.addSeries('Sproc Load 15 min', 'load_15min') for p in load['load_15min']: graph_load.addPoint('load_15min', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_load = graph_load.render() if tplE._settings['show_wal']: graph_wal = flotgraph.Graph("graph_wal", "left", 30) graph_wal.addSeries('WAL vol. 15 min (in MB)', 'wal_15min') walvolumes = topsprocs.getWalVolumes(hostId, days) for p in walvolumes['wal_15min_growth']: graph_wal.addPoint('wal_15min', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) if hosts.isHostFeatureEnabled(hostId, 'blockingStatsGatherInterval'): blocked_processes = topsprocs.getBlockedProcessesCounts(hostId, days) graph_wal.addSeries('#Blocked processes (> 5s)', 'blocked_processes', '#FF0000', None, 2) for p in blocked_processes: if len(walvolumes['wal_15min_growth']) > 0 \ and p[0].timetuple() >= walvolumes['wal_15min_growth'][0][0].timetuple(): # aligning timeline with WAL data graph_wal.addPoint('blocked_processes', int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_wal = graph_wal.render() if tplE._settings['show_db_size']: graph_size = flotgraph.SizeGraph("graph_size") sizes = tabledata.getDatabaseSizes(hostId, days) if hostId in sizes: tabledata.fillGraph(graph_size,sizes[hostId]) graph_size = graph_size.render() if tplE._settings['show_db_stats']: dbstats = reportdata.getDatabaseStatistics(hostId, days) if len(dbstats) > 0: graph_dbstats = flotgraph.SizeGraph("graph_dbstats") graph_dbstats.addSeries('Temp bytes written', 'temp_files_bytes') graph_dbstats.addSeries('#Backends / 10', 'numbackends', '#C0C0C0', None, 2) graph_dbstats.addSeries('#Deadlocks', 'deadlocks', '#FF0000', None, 2) graph_dbstats.addSeries('#Rollbacks [incl. exceptions]', 'rollbacks', '#FFFF00', None, 2) for d in dbstats: timestamp = int(time.mktime(d['timestamp'].timetuple()) * 1000) graph_dbstats.addPoint('temp_files_bytes', timestamp, d['temp_files_bytes']) graph_dbstats.addPoint('deadlocks', timestamp, d['deadlocks']) graph_dbstats.addPoint('numbackends', timestamp, d['numbackends'] / 10.0) graph_dbstats.addPoint('rollbacks', timestamp, d['rollbacks']) graph_dbstats = graph_dbstats.render() if tplE._settings['show_top_sprocs']: top_sprocs = {} top_sprocs['hours1avg'] = self.renderTop10LastHours(topsprocs.avgRuntimeOrder,1, hostId, sprocs_to_show) top_sprocs['hours3avg'] = self.renderTop10LastHours(topsprocs.avgRuntimeOrder,3, hostId, sprocs_to_show) top_sprocs['hours1total'] = self.renderTop10LastHours(topsprocs.totalRuntimeOrder,1, hostId,sprocs_to_show) top_sprocs['hours3total'] = self.renderTop10LastHours(topsprocs.totalRuntimeOrder,3, hostId,sprocs_to_show) top_sprocs['hours1calls'] = self.renderTop10LastHours(topsprocs.totalCallsOrder,1, hostId,sprocs_to_show) top_sprocs['hours3calls'] = self.renderTop10LastHours(topsprocs.totalCallsOrder,3, hostId,sprocs_to_show) tmpl = tplE.env.get_template('index.html') return tmpl.render(hostid=hostId, hostname=hosts.getHostnameByHostId(hostId), hostuiname=hostUiName, graph_load=graph_load, graph_wal=graph_wal, graph_size=graph_size, graph_dbstats=graph_dbstats, top_sprocs=top_sprocs, limit=sprocs_to_show, features=hosts.getActiveFeatures(hostId), target='World')
def default(self, *p, **params): if len(p) == 0: return """Error: Not enough URL parameters. Hostname needed""" hostId, hostName = hosts.ensureHostIdAndUIShortname(p[0]) sprocName = None if len(p) > 1: sprocName = p[1] if params.get('search'): sprocName = params.get('sproc_search') url = '/sprocs/show/' + hostName + '/' + sprocName raise cherrypy.HTTPRedirect(cherrypy.url(url)) interval = {} interval['from'] = params.get('from',(datetime.datetime.now() - datetime.timedelta(days=8)).strftime('%Y-%m-%d')) interval['to'] = params.get('to',(datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')) graphcalls= flotgraph.Graph("graphcalls") graphcalls.addSeries('Number of calls', 'calls') graphtime= flotgraph.TimeGraph("graphruntime") graphtime.addSeries('Total run time', 'runtime') graphavg= flotgraph.TimeGraph("graphavg") graphavg.addSeries('Average run time', 'avg') graphavgself= flotgraph.TimeGraph("graphselfavg") graphavgself.addSeries('Average self time', 'avgself') data = sprocdata.getSingleSprocData(hostId, sprocName, interval) if data['name']: # None if no data for sproc found for p in data['total_time']: graphtime.addPoint('runtime', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['calls']: graphcalls.addPoint('calls', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['avg_time']: graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['avg_self_time']: graphavgself.addPoint('avgself', int(time.mktime(p[0].timetuple()) * 1000), p[1]) sproc_name_wo_params = data['name'] if data['name'].find('(') == -1 else data['name'][0:data['name'].find('(')] sproc_params = "" if data['name'].find('(') == -1 else data['name'][data['name'].find('(')+1:-1] all_sprocs = sprocdata.getAllActiveSprocNames(hostId) table = tplE.env.get_template('sproc_detail.html') return table.render(hostid = hostId, hostname = hosts.getHostData()[int(hostId)]['uilongname'], hostuiname = hostName, name_w_params = data['name'] , params = sproc_params if data['name'] else None, name_wo_params = sproc_name_wo_params if data['name'] else None, interval = interval, sproc_name = sprocName, all_sprocs = all_sprocs, all_sprocs_json = json.dumps(all_sprocs), graphavg = graphavg.render(), graphselfavg = graphavgself.render(), graphcalls = graphcalls.render(), graphruntime = graphtime.render())
def default(self, *p, **params): if len(p) == 0: return """Error: Not enough URL parameters. Hostname needed""" hostId, hostName = hosts.ensureHostIdAndUIShortname(p[0]) sprocName = None if len(p) > 1: sprocName = p[1] if params.get('search'): sprocName = params.get('sproc_search') url = '/sprocs/show/' + hostName + '/' + sprocName raise cherrypy.HTTPRedirect(cherrypy.url(url)) interval = {} interval['from'] = params.get( 'from', (datetime.datetime.now() - datetime.timedelta(days=8)).strftime('%Y-%m-%d')) interval['to'] = params.get( 'to', (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')) graphcalls = flotgraph.Graph("graphcalls") graphcalls.addSeries('Number of calls', 'calls') graphtime = flotgraph.TimeGraph("graphruntime") graphtime.addSeries('Total run time', 'runtime') graphavg = flotgraph.TimeGraph("graphavg") graphavg.addSeries('Average run time', 'avg') graphavgself = flotgraph.TimeGraph("graphselfavg") graphavgself.addSeries('Average self time', 'avgself') data = sprocdata.getSingleSprocData(hostId, sprocName, interval) if data['name']: # None if no data for sproc found for p in data['total_time']: graphtime.addPoint('runtime', int(time.mktime(p[0].timetuple()) * 1000), p[1]) for p in data['calls']: graphcalls.addPoint('calls', int(time.mktime(p[0].timetuple()) * 1000), p[1]) for p in data['avg_time']: graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000), p[1]) for p in data['avg_self_time']: graphavgself.addPoint( 'avgself', int(time.mktime(p[0].timetuple()) * 1000), p[1]) sproc_name_wo_params = data['name'] if data['name'].find( '(') == -1 else data['name'][0:data['name'].find('(')] sproc_params = "" if data['name'].find( '(') == -1 else data['name'][data['name'].find('(') + 1:-1] all_sprocs = sprocdata.getAllActiveSprocNames(hostId) table = tplE.env.get_template('sproc_detail.html') return table.render( hostid=hostId, hostname=hosts.getHostData()[int(hostId)]['uilongname'], hostuiname=hostName, name_w_params=data['name'], params=sproc_params if data['name'] else None, name_wo_params=sproc_name_wo_params if data['name'] else None, interval=interval, sproc_name=sprocName, all_sprocs=all_sprocs, all_sprocs_json=json.dumps(all_sprocs), graphavg=graphavg.render(), graphselfavg=graphavgself.render(), graphcalls=graphcalls.render(), graphruntime=graphtime.render())
def default(self, hostId): hostId, hostUiName = hosts.ensureHostIdAndUIShortname( max(hostId, hostId)) days = (cherrypy.request.cookie['days'].value if 'days' in cherrypy.request.cookie else '8') sprocs_to_show = (int(cherrypy.request.cookie['sprocs_to_show'].value) if 'sprocs_to_show' in cherrypy.request.cookie else 10) graph_load = None graph_wal = None graph_size = None graph_dbstats = None top_sprocs = None top_statements = None graph_checkpoint = None global_announcement = reportdata.getGetActiveFrontendAnnouncementIfAny( ) # fyi - no escaping is performed deliberately if tplE._settings.get('show_load', True): graph_load = flotgraph.Graph('graph_load', 'left', 30) graph_load.addSeries('CPU Load 15min avg', 'acpu_15min_avg', '#FF0000') cpuload = topsprocs.getCpuLoad(hostId, days) for p in cpuload['load_15min_avg']: graph_load.addPoint('acpu_15min_avg', int(time.mktime(p[0].timetuple()) * 1000), p[1]) load = topsprocs.getLoad(hostId, days) graph_load.addSeries('Sproc Load 15 min', 'load_15min') for p in load['load_15min']: graph_load.addPoint('load_15min', int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_load = graph_load.render() if tplE._settings.get('show_wal', True): graph_wal = flotgraph.Graph('graph_wal', 'left', 30) graph_wal.addSeries('WAL vol. 15 min (in MB)', 'wal_15min') walvolumes = topsprocs.getWalVolumes(hostId, days) for p in walvolumes['wal_15min_growth']: graph_wal.addPoint('wal_15min', int(time.mktime(p[0].timetuple()) * 1000), p[1]) if hosts.isHostFeatureEnabled(hostId, 'blockingStatsGatherInterval'): blocked_processes = topsprocs.getBlockedProcessesCounts( hostId, days) graph_wal.addSeries('#Blocked processes (> 5s)', 'blocked_processes', '#FF0000', None, 2) for p in blocked_processes: if len(walvolumes['wal_15min_growth'] ) > 0 and p[0].timetuple( ) >= walvolumes['wal_15min_growth'][0][0].timetuple( ): # aligning timeline with WAL data graph_wal.addPoint( 'blocked_processes', int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_wal = graph_wal.render() if tplE._settings.get('show_db_size', True): graph_size = flotgraph.SizeGraph('graph_size') sizes = tabledata.getDatabaseSizes(hostId, days) if hostId in sizes: tabledata.fillGraph(graph_size, sizes[hostId]) graph_size = graph_size.render() if tplE._settings.get('show_db_stats', True): dbstats = reportdata.getDatabaseStatistics(hostId, days) if len(dbstats) > 0: graph_dbstats = flotgraph.SizeGraph('graph_dbstats') graph_dbstats.addSeries('Temp bytes written', 'temp_files_bytes') graph_dbstats.addSeries('#Backends / 10', 'numbackends', '#C0C0C0', None, 2) graph_dbstats.addSeries('#Deadlocks', 'deadlocks', '#FF0000', None, 2) graph_dbstats.addSeries('#Rollbacks [incl. exceptions]', 'rollbacks', '#FFFF00', None, 2) for d in dbstats: timestamp = int( time.mktime(d['timestamp'].timetuple()) * 1000) graph_dbstats.addPoint('temp_files_bytes', timestamp, d['temp_files_bytes']) graph_dbstats.addPoint('deadlocks', timestamp, d['deadlocks']) graph_dbstats.addPoint('numbackends', timestamp, d['numbackends'] / 10.0) graph_dbstats.addPoint('rollbacks', timestamp, d['rollbacks']) graph_dbstats = graph_dbstats.render() if tplE._settings.get('show_top_sprocs', True): top_sprocs = {} top_sprocs['hours1avg'] = self.renderTop10LastHours( topsprocs.avgRuntimeOrder, 1, hostId, sprocs_to_show) top_sprocs['hours3avg'] = self.renderTop10LastHours( topsprocs.avgRuntimeOrder, 3, hostId, sprocs_to_show) top_sprocs['hours1total'] = self.renderTop10LastHours( topsprocs.totalRuntimeOrder, 1, hostId, sprocs_to_show) top_sprocs['hours3total'] = self.renderTop10LastHours( topsprocs.totalRuntimeOrder, 3, hostId, sprocs_to_show) top_sprocs['hours1calls'] = self.renderTop10LastHours( topsprocs.totalCallsOrder, 1, hostId, sprocs_to_show) top_sprocs['hours3calls'] = self.renderTop10LastHours( topsprocs.totalCallsOrder, 3, hostId, sprocs_to_show) if tplE._settings.get('show_top_statements', False): top_statements = {} tsd = topstatements.getTopStatementsData(hostId, interval1='3hours', interval2='1hours', limit=sprocs_to_show) if tsd: top_statements[ 'hours1avg'] = self.renderTop10StatementsLastHours( hostId, tsd.get('avg_int2', [])) top_statements[ 'hours3avg'] = self.renderTop10StatementsLastHours( hostId, tsd.get('avg_int1', [])) top_statements[ 'hours1total'] = self.renderTop10StatementsLastHours( hostId, tsd.get('total_int2', [])) top_statements[ 'hours3total'] = self.renderTop10StatementsLastHours( hostId, tsd.get('total_int1', [])) top_statements[ 'hours1calls'] = self.renderTop10StatementsLastHours( hostId, tsd.get('calls_int2', [])) top_statements[ 'hours3calls'] = self.renderTop10StatementsLastHours( hostId, tsd.get('calls_int1', [])) if tplE._settings.get('show_bgwriter_stats', True): graph_checkpoint = self.get_rendered_bgwriter_graph( hostId, int(days)) tmpl = tplE.env.get_template('index.html') return tmpl.render( hostid=hostId, hostname=hosts.getHostnameByHostId(hostId), hostuiname=hostUiName, graph_load=graph_load, graph_wal=graph_wal, graph_size=graph_size, graph_dbstats=graph_dbstats, graph_checkpoint=graph_checkpoint, top_sprocs=top_sprocs, top_statements=top_statements, limit=sprocs_to_show, features=hosts.getActiveFeatures(hostId), global_announcement=global_announcement, target='World', )
def default(self, hostId): hostId, hostUiName = hosts.ensureHostIdAndUIShortname(max(hostId, hostId)) days = cherrypy.request.cookie["days"].value if "days" in cherrypy.request.cookie else "8" sprocs_to_show = ( int(cherrypy.request.cookie["sprocs_to_show"].value) if "sprocs_to_show" in cherrypy.request.cookie else 10 ) graph_load = None graph_wal = None graph_size = None graph_dbstats = None top_sprocs = None top_statements = None graph_checkpoint = None global_announcement = ( reportdata.getGetActiveFrontendAnnouncementIfAny() ) # fyi - no escaping is performed deliberately if tplE._settings.get("show_load", True): graph_load = flotgraph.Graph("graph_load", "left", 30) graph_load.addSeries("CPU Load 15min avg", "acpu_15min_avg", "#FF0000") cpuload = topsprocs.getCpuLoad(hostId, days) for p in cpuload["load_15min_avg"]: graph_load.addPoint("acpu_15min_avg", int(time.mktime(p[0].timetuple()) * 1000), p[1]) load = topsprocs.getLoad(hostId, days) graph_load.addSeries("Sproc Load 15 min", "load_15min") for p in load["load_15min"]: graph_load.addPoint("load_15min", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_load = graph_load.render() if tplE._settings.get("show_wal", True): graph_wal = flotgraph.Graph("graph_wal", "left", 30) graph_wal.addSeries("WAL vol. 15 min (in MB)", "wal_15min") walvolumes = topsprocs.getWalVolumes(hostId, days) for p in walvolumes["wal_15min_growth"]: graph_wal.addPoint("wal_15min", int(time.mktime(p[0].timetuple()) * 1000), p[1]) if hosts.isHostFeatureEnabled(hostId, "blockingStatsGatherInterval"): blocked_processes = topsprocs.getBlockedProcessesCounts(hostId, days) graph_wal.addSeries("#Blocked processes (> 5s)", "blocked_processes", "#FF0000", None, 2) for p in blocked_processes: if ( len(walvolumes["wal_15min_growth"]) > 0 and p[0].timetuple() >= walvolumes["wal_15min_growth"][0][0].timetuple() ): # aligning timeline with WAL data graph_wal.addPoint("blocked_processes", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_wal = graph_wal.render() if tplE._settings.get("show_db_size", True): graph_size = flotgraph.SizeGraph("graph_size") sizes = tabledata.getDatabaseSizes(hostId, days) if hostId in sizes: tabledata.fillGraph(graph_size, sizes[hostId]) graph_size = graph_size.render() if tplE._settings.get("show_db_stats", True): dbstats = reportdata.getDatabaseStatistics(hostId, days) if len(dbstats) > 0: graph_dbstats = flotgraph.SizeGraph("graph_dbstats") graph_dbstats.addSeries("Temp bytes written", "temp_files_bytes") graph_dbstats.addSeries("#Backends / 10", "numbackends", "#C0C0C0", None, 2) graph_dbstats.addSeries("#Deadlocks", "deadlocks", "#FF0000", None, 2) graph_dbstats.addSeries("#Rollbacks [incl. exceptions]", "rollbacks", "#FFFF00", None, 2) for d in dbstats: timestamp = int(time.mktime(d["timestamp"].timetuple()) * 1000) graph_dbstats.addPoint("temp_files_bytes", timestamp, d["temp_files_bytes"]) graph_dbstats.addPoint("deadlocks", timestamp, d["deadlocks"]) graph_dbstats.addPoint("numbackends", timestamp, d["numbackends"] / 10.0) graph_dbstats.addPoint("rollbacks", timestamp, d["rollbacks"]) graph_dbstats = graph_dbstats.render() if tplE._settings.get("show_top_sprocs", True): top_sprocs = {} top_sprocs["hours1avg"] = self.renderTop10LastHours(topsprocs.avgRuntimeOrder, 1, hostId, sprocs_to_show) top_sprocs["hours3avg"] = self.renderTop10LastHours(topsprocs.avgRuntimeOrder, 3, hostId, sprocs_to_show) top_sprocs["hours1total"] = self.renderTop10LastHours( topsprocs.totalRuntimeOrder, 1, hostId, sprocs_to_show ) top_sprocs["hours3total"] = self.renderTop10LastHours( topsprocs.totalRuntimeOrder, 3, hostId, sprocs_to_show ) top_sprocs["hours1calls"] = self.renderTop10LastHours(topsprocs.totalCallsOrder, 1, hostId, sprocs_to_show) top_sprocs["hours3calls"] = self.renderTop10LastHours(topsprocs.totalCallsOrder, 3, hostId, sprocs_to_show) if tplE._settings.get("show_top_statements", False): top_statements = {} tsd = topstatements.getTopStatementsData( hostId, interval1="3hours", interval2="1hours", limit=sprocs_to_show ) if tsd: top_statements["hours1avg"] = self.renderTop10StatementsLastHours(hostId, tsd.get("avg_int2", [])) top_statements["hours3avg"] = self.renderTop10StatementsLastHours(hostId, tsd.get("avg_int1", [])) top_statements["hours1total"] = self.renderTop10StatementsLastHours(hostId, tsd.get("total_int2", [])) top_statements["hours3total"] = self.renderTop10StatementsLastHours(hostId, tsd.get("total_int1", [])) top_statements["hours1calls"] = self.renderTop10StatementsLastHours(hostId, tsd.get("calls_int2", [])) top_statements["hours3calls"] = self.renderTop10StatementsLastHours(hostId, tsd.get("calls_int1", [])) if tplE._settings.get("show_bgwriter_stats", True): graph_checkpoint = self.get_rendered_bgwriter_graph(hostId, int(days)) tmpl = tplE.env.get_template("index.html") return tmpl.render( hostid=hostId, hostname=hosts.getHostnameByHostId(hostId), hostuiname=hostUiName, graph_load=graph_load, graph_wal=graph_wal, graph_size=graph_size, graph_dbstats=graph_dbstats, graph_checkpoint=graph_checkpoint, top_sprocs=top_sprocs, top_statements=top_statements, limit=sprocs_to_show, features=hosts.getActiveFeatures(hostId), global_announcement=global_announcement, target="World", )