def get_all_databases_size(self): # show a graph of all db sizes? size = tabledata.getDatabaseSizes() systems = [] hs = hosts.getHostData().values() for h in hs: g = flotgraph.SizeGraph("s" + str(h['host_id'])) tabledata.fillGraph(g,size[h['host_id']]) s = self.renderSizeTable(h['host_id']) # TODO doesnt exist systems.append({ 'id' : "s"+str(h['host_id']) , 't' : s , 'g' : g.render() , 'h' : h }) return systems
def get_rendered_bgwriter_graph(self, hostId, days): start_date = datetime.now() - timedelta(days) checkpoint_data = tabledata.retrieve_bgwriter_stats(hostId, start_date) if not checkpoint_data['avgWritesPerCheckpoint']: return '' graph_checkpoint = flotgraph.SizeGraph('graph_bgwriter') graph_checkpoint.addSeries('Avg. Size written per checkpoint', 'avgcheckpoint', '#00FF00', checkpoint_data['avgWritesPerCheckpoint'], 1) graph_checkpoint.addSeries( 'Checkpoint Request Count %', 'checkpointReqPct', '#FF0000', checkpoint_data['checkpointRequestPercentage'], 2) graph_checkpoint.addSeries( 'Checkpoint Write Size %', 'chkpWritePct', '#FF5500', checkpoint_data['checkpoint_write_percentage'], 2) graph_checkpoint.addSeries('Backend Write Size %', 'backWritePct', '#00C000', checkpoint_data['backend_write_percentage'], 2) return graph_checkpoint.render()
def index(self, hostId=None): if hostId is not None and not hostId.isdigit(): hostId = hosts.uiShortnameToHostId(hostId) weeks = 10 data = reportdata.getLoadReportData(hostId, weeks - 1) graph_load = [] graph_wal = [] if hostId: graph_data = reportdata.getLoadReportDataDailyAvg( hostId, weeks - 1) graph_load = flotgraph.Graph('graph_load', 'left', 30) graph_load.addSeries('CPU Load daily avg.', 'cpu') graph_wal = flotgraph.SizeGraph('graph_wal') graph_wal.addSeries('WAL daily avg.', 'wal') for p in graph_data: graph_load.addPoint( 'cpu', int(time.mktime(p['date'].timetuple()) * 1000), p['cpu_15']) graph_wal.addPoint( 'wal', int(time.mktime(p['date'].timetuple()) * 1000), p['wal_b']) graph_load = graph_load.render() graph_wal = graph_wal.render() table = tplE.env.get_template('report_basic.html') return table.render(hosts=hosts.hosts, data=data, graph_load=graph_load, graph_wal=graph_wal, weeks=weeks)
def index(self): size = tabledata.getDatabaseSizes() systems = [] hs = hosts.getHostData().values() for h in hs: g = flotgraph.SizeGraph("s" + str(h['host_id'])) tabledata.fillGraph(g, size[h['host_id']]) s = self.renderSizeTable(h['host_id']) systems.append({ 'id': "s" + str(h['host_id']), 't': s, 'g': g.render(), 'h': h }) tmpl = tplE.env.get_template('tables.html') return tmpl.render(systems=sorted( systems, key=lambda x: x['h']['settings']['uiShortName']), target='World')
def default(self, *p, **params): if len(p) < 2: return "" hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId(p[0]) hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname(p[0]) name = p[1] interval = {} if 'interval' in params: interval['interval'] = str(params['interval'])+' days' elif 'from' in params and 'to' in params: interval['from'] = params['from'] interval['to'] = params['to'] else: days = int(cherrypy.request.cookie['days'].value) if 'days' in cherrypy.request.cookie else 8 interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y-%m-%d') interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') data = tabledata.getTableData(hostId, name, interval) graph_table_size = flotgraph.SizeGraph ("graphtablesize","right") graph_table_size.addSeries("Table Size","size") for p in data['table_size']: graph_table_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_index_size = flotgraph.SizeGraph ("graphindexsize","right") graph_index_size.addSeries("Index Size", "size") for p in data['index_size']: graph_index_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_seq_scans = flotgraph.Graph ("graphseqscans","right") graph_seq_scans.addSeries("Sequential Scans","count") for p in data['seq_scans']: graph_seq_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_index_scans = flotgraph.Graph ("graphindexscans","right") graph_index_scans.addSeries("Index Scans","count") for p in data['index_scans']: graph_index_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_t_ins = flotgraph.Graph ("gtupins","right") graph_t_ins.addSeries("Inserts","count",'#FF0000') for p in data['ins']: graph_t_ins.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_t_upd = flotgraph.Graph ("gtupupd","right") graph_t_upd.addSeries("Updates","count",'#FF8800') graph_t_upd.addSeries("Hot Updates","hotcount",'#885500') for p in data['upd']: graph_t_upd.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['hot']: graph_t_upd.addPoint("hotcount", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_t_del = flotgraph.Graph ("gtupdel","right") graph_t_del.addSeries("Deletes","count") for p in data['del']: graph_t_del.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) data = tabledata.getTableIOData(hostId, name, interval) graph_index_iob = flotgraph.Graph ("graphindexiob","right") graph_index_iob.addSeries("Index_hit","ihit") for p in data['index_hit']: graph_index_iob.addPoint("ihit", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_index_iod = flotgraph.Graph ("graphindexiod","right") graph_index_iod.addSeries("Index_read","iread",'#FF0000') for p in data['index_read']: graph_index_iod.addPoint("iread", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_heap_iod = flotgraph.Graph ("graphheapiod","right") graph_heap_iod.addSeries("Heap_read","hread",'#FF0000') for p in data['heap_read']: graph_heap_iod.addPoint("hread", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_heap_iob = flotgraph.Graph ("graphheapiob","right") graph_heap_iob.addSeries("Heap_hit","hhit") for p in data['heap_hit']: graph_heap_iob.addPoint("hhit", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) tpl = tplE.env.get_template('table_detail.html') return tpl.render(name=name, schema_name=name[:name.find('.')], host=hostId, interval=interval, hostuiname = hostUiName, hostname = hosts.getHosts()[hostId]['uilongname'], graphtablesize=graph_table_size.render(), graphindexsize=graph_index_size.render(), graphseqscans=graph_seq_scans.render(), graphindexscans=graph_index_scans.render(), graphindexiod=graph_index_iod.render(), graphindexiob=graph_index_iob.render(), graphheapiod=graph_heap_iod.render(), graphheapiob=graph_heap_iob.render(), gtupins=graph_t_ins.render(), gtupupd=graph_t_upd.render(), gtupdel=graph_t_del.render(), target='World')
def default(self, *p, **params): if len(p) < 2: return "" hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId( p[0]) hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname( p[0]) table_name = p[1] if table_name.find('.') == -1: raise Exception('Full table name needed, e.g. schema_x.table_y') schema = table_name.split('.')[0] if 'from' in params and 'to' in params: interval = {} interval['from'] = params['from'] interval['to'] = params['to'] else: interval = {} interval['from'] = ( datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d') interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') data = indexdata.getIndexesDataForTable(hostId, table_name, interval['from'], interval['to']) all_graphs = [] i = 0 for x in data: one_index_graphs = [] for k, v in x['data'].iteritems(): i += 1 if k == 'size': graph = flotgraph.SizeGraph("index" + str(i), "right") else: graph = flotgraph.Graph("index" + str(i), "right") graph.addSeries(k, k) for p in v: graph.addPoint(k, int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph = graph.render() one_index_graphs.append({'data': graph, 'i': i, 'type': k}) one_index_graphs.sort(key=lambda x: x['type']) all_graphs.append({ 'name': x['index_name'], 'graphs': one_index_graphs, 'last_index_size': x['last_index_size'], 'total_end_size': x['total_end_size'], 'pct_of_total_end_size': x['pct_of_total_end_size'] }) all_graphs = sorted(all_graphs, key=lambda x: x['last_index_size'], reverse=True) tpl = tplE.env.get_template('table_indexes.html') return tpl.render(table_name=table_name, host=hostId, schema=schema, interval=interval, hostuiname=hostUiName, hostname=hosts.getHosts()[hostId]['uilongname'], all_graphs=all_graphs, target='World')
def default(self, hostId): hostId, hostUiName = hosts.ensureHostIdAndUIShortname( max(hostId, hostId)) days = (cherrypy.request.cookie['days'].value if 'days' in cherrypy.request.cookie else '8') sprocs_to_show = (int(cherrypy.request.cookie['sprocs_to_show'].value) if 'sprocs_to_show' in cherrypy.request.cookie else 10) graph_load = None graph_wal = None graph_size = None graph_dbstats = None top_sprocs = None top_statements = None graph_checkpoint = None global_announcement = reportdata.getGetActiveFrontendAnnouncementIfAny( ) # fyi - no escaping is performed deliberately if tplE._settings.get('show_load', True): graph_load = flotgraph.Graph('graph_load', 'left', 30) graph_load.addSeries('CPU Load 15min avg', 'acpu_15min_avg', '#FF0000') cpuload = topsprocs.getCpuLoad(hostId, days) for p in cpuload['load_15min_avg']: graph_load.addPoint('acpu_15min_avg', int(time.mktime(p[0].timetuple()) * 1000), p[1]) load = topsprocs.getLoad(hostId, days) graph_load.addSeries('Sproc Load 15 min', 'load_15min') for p in load['load_15min']: graph_load.addPoint('load_15min', int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_load = graph_load.render() if tplE._settings.get('show_wal', True): graph_wal = flotgraph.Graph('graph_wal', 'left', 30) graph_wal.addSeries('WAL vol. 15 min (in MB)', 'wal_15min') walvolumes = topsprocs.getWalVolumes(hostId, days) for p in walvolumes['wal_15min_growth']: graph_wal.addPoint('wal_15min', int(time.mktime(p[0].timetuple()) * 1000), p[1]) if hosts.isHostFeatureEnabled(hostId, 'blockingStatsGatherInterval'): blocked_processes = topsprocs.getBlockedProcessesCounts( hostId, days) graph_wal.addSeries('#Blocked processes (> 5s)', 'blocked_processes', '#FF0000', None, 2) for p in blocked_processes: if len(walvolumes['wal_15min_growth'] ) > 0 and p[0].timetuple( ) >= walvolumes['wal_15min_growth'][0][0].timetuple( ): # aligning timeline with WAL data graph_wal.addPoint( 'blocked_processes', int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_wal = graph_wal.render() if tplE._settings.get('show_db_size', True): graph_size = flotgraph.SizeGraph('graph_size') sizes = tabledata.getDatabaseSizes(hostId, days) if hostId in sizes: tabledata.fillGraph(graph_size, sizes[hostId]) graph_size = graph_size.render() if tplE._settings.get('show_db_stats', True): dbstats = reportdata.getDatabaseStatistics(hostId, days) if len(dbstats) > 0: graph_dbstats = flotgraph.SizeGraph('graph_dbstats') graph_dbstats.addSeries('Temp bytes written', 'temp_files_bytes') graph_dbstats.addSeries('#Backends / 10', 'numbackends', '#C0C0C0', None, 2) graph_dbstats.addSeries('#Deadlocks', 'deadlocks', '#FF0000', None, 2) graph_dbstats.addSeries('#Rollbacks [incl. exceptions]', 'rollbacks', '#FFFF00', None, 2) for d in dbstats: timestamp = int( time.mktime(d['timestamp'].timetuple()) * 1000) graph_dbstats.addPoint('temp_files_bytes', timestamp, d['temp_files_bytes']) graph_dbstats.addPoint('deadlocks', timestamp, d['deadlocks']) graph_dbstats.addPoint('numbackends', timestamp, d['numbackends'] / 10.0) graph_dbstats.addPoint('rollbacks', timestamp, d['rollbacks']) graph_dbstats = graph_dbstats.render() if tplE._settings.get('show_top_sprocs', True): top_sprocs = {} top_sprocs['hours1avg'] = self.renderTop10LastHours( topsprocs.avgRuntimeOrder, 1, hostId, sprocs_to_show) top_sprocs['hours3avg'] = self.renderTop10LastHours( topsprocs.avgRuntimeOrder, 3, hostId, sprocs_to_show) top_sprocs['hours1total'] = self.renderTop10LastHours( topsprocs.totalRuntimeOrder, 1, hostId, sprocs_to_show) top_sprocs['hours3total'] = self.renderTop10LastHours( topsprocs.totalRuntimeOrder, 3, hostId, sprocs_to_show) top_sprocs['hours1calls'] = self.renderTop10LastHours( topsprocs.totalCallsOrder, 1, hostId, sprocs_to_show) top_sprocs['hours3calls'] = self.renderTop10LastHours( topsprocs.totalCallsOrder, 3, hostId, sprocs_to_show) if tplE._settings.get('show_top_statements', False): top_statements = {} tsd = topstatements.getTopStatementsData(hostId, interval1='3hours', interval2='1hours', limit=sprocs_to_show) if tsd: top_statements[ 'hours1avg'] = self.renderTop10StatementsLastHours( hostId, tsd.get('avg_int2', [])) top_statements[ 'hours3avg'] = self.renderTop10StatementsLastHours( hostId, tsd.get('avg_int1', [])) top_statements[ 'hours1total'] = self.renderTop10StatementsLastHours( hostId, tsd.get('total_int2', [])) top_statements[ 'hours3total'] = self.renderTop10StatementsLastHours( hostId, tsd.get('total_int1', [])) top_statements[ 'hours1calls'] = self.renderTop10StatementsLastHours( hostId, tsd.get('calls_int2', [])) top_statements[ 'hours3calls'] = self.renderTop10StatementsLastHours( hostId, tsd.get('calls_int1', [])) if tplE._settings.get('show_bgwriter_stats', True): graph_checkpoint = self.get_rendered_bgwriter_graph( hostId, int(days)) tmpl = tplE.env.get_template('index.html') return tmpl.render( hostid=hostId, hostname=hosts.getHostnameByHostId(hostId), hostuiname=hostUiName, graph_load=graph_load, graph_wal=graph_wal, graph_size=graph_size, graph_dbstats=graph_dbstats, graph_checkpoint=graph_checkpoint, top_sprocs=top_sprocs, top_statements=top_statements, limit=sprocs_to_show, features=hosts.getActiveFeatures(hostId), global_announcement=global_announcement, target='World', )
def default(self, *p, **params): if len(p) < 2: return "" host = p[0] name = p[1] if 'interval' in params: interval = {} interval['interval'] = str(params['interval']) + ' days' elif 'from' in params and 'to' in params: interval = {} interval['from'] = params['from'] interval['to'] = params['to'] else: interval = {} interval['from'] = ( datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d') interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') data = tabledata.getTableData(host, name, interval) graph_table_size = flotgraph.SizeGraph("graphtablesize", "right") graph_table_size.addSeries("Table Size", "size") for p in data['table_size']: graph_table_size.addPoint( "size", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_index_size = flotgraph.SizeGraph("graphindexsize", "right") graph_index_size.addSeries("Index Size", "size") for p in data['index_size']: graph_index_size.addPoint( "size", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_seq_scans = flotgraph.Graph("graphseqscans", "right") graph_seq_scans.addSeries("Sequential Scans", "count") for p in data['seq_scans']: graph_seq_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_index_scans = flotgraph.Graph("graphindexscans", "right") graph_index_scans.addSeries("Index Scans", "count") for p in data['index_scans']: graph_index_scans.addPoint( "count", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_t_ins = flotgraph.Graph("gtupins", "right") graph_t_ins.addSeries("Inserts", "count", '#FF0000') for p in data['ins']: graph_t_ins.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_t_upd = flotgraph.Graph("gtupupd", "right") graph_t_upd.addSeries("Updates", "count", '#FF8800') graph_t_upd.addSeries("Hot Updates", "hotcount", '#885500') for p in data['upd']: graph_t_upd.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000), p[1]) for p in data['hot']: graph_t_upd.addPoint("hotcount", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_t_del = flotgraph.Graph("gtupdel", "right") graph_t_del.addSeries("Deletes", "count") for p in data['del']: graph_t_del.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000), p[1]) data = tabledata.getTableIOData(host, name) graph_index_iob = flotgraph.Graph("graphindexiob", "right") graph_index_iob.addSeries("Index_hit", "ihit") for p in data['index_hit']: graph_index_iob.addPoint("ihit", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_index_iod = flotgraph.Graph("graphindexiod", "right") graph_index_iod.addSeries("Index_read", "iread", '#FF0000') for p in data['index_read']: graph_index_iod.addPoint("iread", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_heap_iod = flotgraph.Graph("graphheapiod", "right") graph_heap_iod.addSeries("Heap_read", "hread", '#FF0000') for p in data['heap_read']: graph_heap_iod.addPoint("hread", int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph_heap_iob = flotgraph.Graph("graphheapiob", "right") graph_heap_iob.addSeries("Heap_hit", "hhit") for p in data['heap_hit']: graph_heap_iob.addPoint("hhit", int(time.mktime(p[0].timetuple()) * 1000), p[1]) tpl = tplE.env.get_template('table_detail.html') return tpl.render( name=name, host=host, interval=interval, hostname=hosts.getHostData()[int(host)]['settings']['uiLongName'], graphtablesize=graph_table_size.render(), graphindexsize=graph_index_size.render(), graphseqscans=graph_seq_scans.render(), graphindexscans=graph_index_scans.render(), graphindexiod=graph_index_iod.render(), graphindexiob=graph_index_iob.render(), graphheapiod=graph_heap_iod.render(), graphheapiob=graph_heap_iob.render(), gtupins=graph_t_ins.render(), gtupupd=graph_t_upd.render(), gtupdel=graph_t_del.render(), target='World')