def raw(self, host, table, from_date=None, to_date=None): host = int(host) if host.isdigit() else hosts.uiShortnameToHostId(host) if not from_date: from_date = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d') if not to_date: to_date = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') return indexdata.getIndexesDataForTable(host, table, from_date, to_date)
def index(self, hostId=None): if hostId is not None and not hostId.isdigit(): hostId = hosts.uiShortnameToHostId(hostId) weeks = 10 data = reportdata.getLoadReportData(hostId, weeks-1) graph_load = [] graph_wal = [] print ('hostId') print (hostId) if hostId: graph_data = reportdata.getLoadReportDataDailyAvg(hostId, weeks-1) print (graph_data) graph_load = flotgraph.Graph('graph_load', 'left', 30) graph_load.addSeries('CPU Load daily avg.', 'cpu') graph_wal = flotgraph.SizeGraph('graph_wal') graph_wal.addSeries('WAL daily avg.', 'wal') for p in graph_data: graph_load.addPoint('cpu', int(time.mktime(p['date'].timetuple()) * 1000), p['cpu_15']) graph_wal.addPoint('wal', int(time.mktime(p['date'].timetuple()) * 1000), p['wal_b']) graph_load = graph_load.render() graph_wal = graph_wal.render() table = tplE.env.get_template('report_basic.html') return table.render(hosts=hosts.hosts, data=data, graph_load=graph_load, graph_wal=graph_wal, weeks=weeks)
def index(self, hostId=None): if hostId is not None and not hostId.isdigit(): hostId = hosts.uiShortnameToHostId(hostId) weeks = 10 data = reportdata.getLoadReportData(hostId, weeks - 1) graph_load = [] graph_wal = [] if hostId: graph_data = reportdata.getLoadReportDataDailyAvg(hostId, weeks - 1) graph_load = flotgraph.Graph("graph_load", "left", 30) graph_load.addSeries("CPU Load daily avg.", "cpu") graph_wal = flotgraph.SizeGraph("graph_wal") graph_wal.addSeries("WAL daily avg.", "wal") for p in graph_data: graph_load.addPoint("cpu", int(time.mktime(p["date"].timetuple()) * 1000), p["cpu_15"]) graph_wal.addPoint("wal", int(time.mktime(p["date"].timetuple()) * 1000), p["wal_b"]) graph_load = graph_load.render() graph_wal = graph_wal.render() table = tplE.env.get_template("report_basic.html") return table.render(hosts=hosts.hosts, data=data, graph_load=graph_load, graph_wal=graph_wal, weeks=weeks)
def raw(self, host=None): if host is None: host_id = None elif host.isdigit(): host_id = host else: host_id = hosts.uiShortnameToHostId(host) return reportdata.getLoadReportData(host_id)
def index(self, hostId=None): if hostId is not None and not hostId.isdigit(): hostId = hosts.uiShortnameToHostId(hostId) data = reportdata.getLoadReportData(hostId) table = tplE.env.get_template("report_basic.html") return table.render(hosts=hosts.hosts, data=data)
def default(self, *p, **params): graphcalls= flotgraph.Graph("graphcalls") graphcalls.addSeries('Number of calls', 'calls') graphtime= flotgraph.TimeGraph("graphruntime") graphtime.addSeries('Total run time', 'runtime') graphavg= flotgraph.TimeGraph("graphavg") graphavg.addSeries('Average run time', 'avg') graphavgself= flotgraph.TimeGraph("graphselfavg") graphavgself.addSeries('Average self time', 'avgself') if(len(p)<=1): return """Error: Not enough URL paramter""" hostId = p[0] if p[0].isdigit() else hosts.uiShortnameToHostId(p[0]) name = p[1] if len(p) > 2: sprocNr = p[2] else: sprocNr = None if 'from' in params and 'to' in params: interval = {} interval['from'] = params['from'] interval['to'] = params['to'] else: interval = {} interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d') interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') data = sprocdata.getSingleSprocData( name, hostId , interval, sprocNr) for p in data['total_time']: graphtime.addPoint('runtime', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['calls']: graphcalls.addPoint('calls', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['avg_time']: graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['avg_self_time']: graphavgself.addPoint('avgself', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) table = tplE.env.get_template('sproc_detail.html') return table.render(hostid = int(hostId), hostname = hosts.getHostData()[int(hostId)]['uilongname'], hostuiname = hosts.getHostData()[int(hostId)]['uishortname'], name = data['name'], interval = interval, graphavg = graphavg.render(), graphselfavg = graphavgself.render(), graphcalls = graphcalls.render(), graphruntime = graphtime.render())
def get_alltables_data(self, hostId, date_from, date_to, order=None, pattern=None): hostId = hostId if hostId.isdigit() else hosts.uiShortnameToHostId(hostId) if hostId is None: return 'valid hostId/hostUiShortname expected' if order==None: order=2 hostname = hosts.getHostData()[int(hostId)]['uilongname'] top_tables = tabledata.getTopTables(hostId, date_from, date_to, order, None, pattern) return hostId, hostname, top_tables, order
def default(self, *p, **params): if len(p) < 2: return "" hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId(p[0]) hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname(p[0]) table_name = p[1] if table_name.find('.') == -1: raise Exception('Full table name needed, e.g. schema_x.table_y') schema = table_name.split('.')[0] if 'from' in params and 'to' in params: interval = {} interval['from'] = params['from'] interval['to'] = params['to'] else: interval = {} interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d') interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') data = indexdata.getIndexesDataForTable(hostId, table_name, interval['from'], interval['to']) all_graphs=[] i=0 for x in data: one_index_graphs=[] for k,v in x['data'].iteritems(): i+=1 if k == 'size': graph = flotgraph.SizeGraph ("index"+str(i),"right") else: graph = flotgraph.Graph ("index"+str(i),"right") graph.addSeries(k,k) for p in v: graph.addPoint(k, int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph = graph.render() one_index_graphs.append({'data':graph, 'i':i, 'type':k}) one_index_graphs.sort(key=lambda x:x['type']) all_graphs.append({'name':x['index_name'], 'graphs': one_index_graphs, 'last_index_size':x['last_index_size'], 'total_end_size':x['total_end_size'], 'pct_of_total_end_size':x['pct_of_total_end_size']}) all_graphs = sorted(all_graphs, key=lambda x:x['last_index_size'], reverse=True) tpl = tplE.env.get_template('table_indexes.html') return tpl.render(table_name=table_name, host=hostId, schema=schema, interval=interval, hostuiname = hostUiName, hostname = hosts.getHosts()[hostId]['uilongname'], all_graphs=all_graphs, target='World')
def alltables(self, hostId , order=None): table = tplE.env.get_template('tables_size_table_all.html') tpl = tplE.env.get_template('all_tables.html') hostUiName = hostId if not hostId.isdigit() else hosts.hostIdToUiShortname(hostId) hostId = hostId if hostId.isdigit() else hosts.uiShortnameToHostId(hostId) if hostId is None: return 'valid hostId/hostUiShortname expected' if order==None: order=2 return tpl.render(hostname = hosts.getHostData()[int(hostId)]['settings']['uiLongName'], table=table.render(hostid = hostId, hostuiname=hostUiName, order=int(order), list=tabledata.getTopTables(hostId, None, order)))
def all(self,hostId=None): if not hostId.isdigit(): # ui shortname hostId = hosts.uiShortnameToHostId(hostId) if hostId==None: return 'Needs valid hostId/uiShortName' sprocs = sprocdata.getSprocsOrderedBy(hostId) list = [] i = 0 for s in sprocs: d = sprocdata.getSingleSprocData(s, hostId, "('now'::timestamp - '4 days'::interval)") i += 1 graph= flotgraph.TimeGraph("graph"+str(i)) graph.addSeries('Avg.', 'avg') for p in d['avg_time']: graph.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) list.append( {'graph': graph.render() , 'name': s[0:s.find("(")] , 'i': i } ) tpl = tplE.env.get_template('all_sprocs.html') return tpl.render(graphs=list)
def index(self, hostId=None): if hostId is not None and not hostId.isdigit(): hostId = hosts.uiShortnameToHostId(hostId) weeks = 10 data = reportdata.getLoadReportData(hostId, weeks - 1) graph_load = [] graph_wal = [] if hostId: graph_data = reportdata.getLoadReportDataDailyAvg( hostId, weeks - 1) graph_load = flotgraph.Graph('graph_load', 'left', 30) graph_load.addSeries('CPU Load daily avg.', 'cpu') graph_wal = flotgraph.SizeGraph('graph_wal') graph_wal.addSeries('WAL daily avg.', 'wal') for p in graph_data: graph_load.addPoint( 'cpu', int(time.mktime(p['date'].timetuple()) * 1000), p['cpu_15']) graph_wal.addPoint( 'wal', int(time.mktime(p['date'].timetuple()) * 1000), p['wal_b']) graph_load = graph_load.render() graph_wal = graph_wal.render() table = tplE.env.get_template('report_basic.html') return table.render(hosts=hosts.hosts, data=data, graph_load=graph_load, graph_wal=graph_wal, weeks=weeks)
def default(self, *p, **params): if len(p) < 2: return "" hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId(p[0]) hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname(p[0]) name = p[1] interval = {} if 'interval' in params: interval['interval'] = str(params['interval'])+' days' elif 'from' in params and 'to' in params: interval['from'] = params['from'] interval['to'] = params['to'] else: days = int(cherrypy.request.cookie['days'].value) if 'days' in cherrypy.request.cookie else 8 interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y-%m-%d') interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') data = tabledata.getTableData(hostId, name, interval) graph_table_size = flotgraph.SizeGraph ("graphtablesize","right") graph_table_size.addSeries("Table Size","size") for p in data['table_size']: graph_table_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_index_size = flotgraph.SizeGraph ("graphindexsize","right") graph_index_size.addSeries("Index Size", "size") for p in data['index_size']: graph_index_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_seq_scans = flotgraph.Graph ("graphseqscans","right") graph_seq_scans.addSeries("Sequential Scans","count") for p in data['seq_scans']: graph_seq_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_index_scans = flotgraph.Graph ("graphindexscans","right") graph_index_scans.addSeries("Index Scans","count") for p in data['index_scans']: graph_index_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_t_ins = flotgraph.Graph ("gtupins","right") graph_t_ins.addSeries("Inserts","count",'#FF0000') for p in data['ins']: graph_t_ins.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_t_upd = flotgraph.Graph ("gtupupd","right") graph_t_upd.addSeries("Updates","count",'#FF8800') graph_t_upd.addSeries("Hot Updates","hotcount",'#885500') for p in data['upd']: graph_t_upd.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) for p in data['hot']: graph_t_upd.addPoint("hotcount", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_t_del = flotgraph.Graph ("gtupdel","right") graph_t_del.addSeries("Deletes","count") for p in data['del']: graph_t_del.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) data = tabledata.getTableIOData(hostId, name, interval) graph_index_iob = flotgraph.Graph ("graphindexiob","right") graph_index_iob.addSeries("Index_hit","ihit") for p in data['index_hit']: graph_index_iob.addPoint("ihit", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_index_iod = flotgraph.Graph ("graphindexiod","right") graph_index_iod.addSeries("Index_read","iread",'#FF0000') for p in data['index_read']: graph_index_iod.addPoint("iread", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_heap_iod = flotgraph.Graph ("graphheapiod","right") graph_heap_iod.addSeries("Heap_read","hread",'#FF0000') for p in data['heap_read']: graph_heap_iod.addPoint("hread", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_heap_iob = flotgraph.Graph ("graphheapiob","right") graph_heap_iob.addSeries("Heap_hit","hhit") for p in data['heap_hit']: graph_heap_iob.addPoint("hhit", int(time.mktime(p[0].timetuple()) * 1000) , p[1]) tpl = tplE.env.get_template('table_detail.html') return tpl.render(name=name, schema_name=name[:name.find('.')], host=hostId, interval=interval, hostuiname = hostUiName, hostname = hosts.getHosts()[hostId]['uilongname'], graphtablesize=graph_table_size.render(), graphindexsize=graph_index_size.render(), graphseqscans=graph_seq_scans.render(), graphindexscans=graph_index_scans.render(), graphindexiod=graph_index_iod.render(), graphindexiob=graph_index_iob.render(), graphheapiod=graph_heap_iod.render(), graphheapiob=graph_heap_iob.render(), gtupins=graph_t_ins.render(), gtupupd=graph_t_upd.render(), gtupdel=graph_t_del.render(), target='World')
def default(self, hostId = None, limit=10): hostUiName = None if hostId == None: hostId = self.hostId if str(hostId).isdigit(): hostId = int(hostId) hostUiName = hosts.hostIdToUiShortname(hostId) else: hostUiName = hostId hostId = int(hosts.uiShortnameToHostId(hostId)) load = topsprocs.getLoad(hostId) cpuload = topsprocs.getCpuLoad(hostId) walvolumes = topsprocs.getWalVolumes(hostId) graph1 = flotgraph.Graph("graph1","left",30) graph_wal = flotgraph.Graph("graph_wal","left",30) graph1.addSeries('CPU Load 15min avg','acpu_15min_avg','#FF0000') for p in cpuload['load_15min_avg']: graph1.addPoint('acpu_15min_avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph1.addSeries('Sproc Load 15 min', 'load_15min') for p in load['load_15min']: graph1.addPoint('load_15min', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) graph_wal.addSeries('WAL vol. 15 min (in MB)', 'wal_15min') for p in walvolumes['wal_15min_growth']: graph_wal.addPoint('wal_15min', int(time.mktime(p[0].timetuple()) * 1000) , p[1]) sizes = tabledata.getDatabaseSizes(hostId) graph_size = flotgraph.SizeGraph("graph_size") if hostId in sizes: tabledata.fillGraph(graph_size,sizes[hostId]) taggedload = sprocdata.getSprocDataByTags() graphT = flotgraph.BarGraph("graphtag","left",30) graphT.addSeries('Articles','article','#FF0000') graphT.addSeries('Stock','stock','#0000FF') graphT.addSeries('Export','export','#00FF00') graphT.addSeries('get_catalog_article','get_article','#00FFFF') for p in taggedload[1]: graphT.addPoint('article',int(time.mktime(p[0].timetuple()) * 1000), p[1]) for p in taggedload[2]: graphT.addPoint('stock',int(time.mktime(p[0].timetuple()) * 1000), p[1]) for p in taggedload[3]: graphT.addPoint('export',int(time.mktime(p[0].timetuple()) * 1000), p[1]) for p in taggedload[4]: graphT.addPoint('get_article',int(time.mktime(p[0].timetuple()) * 1000), p[1]) tmpl = tplE.env.get_template('index.html') return tmpl.render(hostid=hostId, hostuiname=hostUiName, graph1=graph1.render(), graph_wal=graph_wal.render(), graph_size=graph_size.render(), graphtag = graphT.render(), limit=limit, #top10alltimesavg = self.renderTop10AllTime(topsprocs.avgRuntimeOrder), top10hours1avg = self.renderTop10LastHours(topsprocs.avgRuntimeOrder,1, hostId,limit), top10hours3avg = self.renderTop10LastHours(topsprocs.avgRuntimeOrder,3, hostId,limit), #top10alltimestotal = self.renderTop10AllTime(topsprocs.totalRuntimeOrder), top10hours1total = self.renderTop10LastHours(topsprocs.totalRuntimeOrder,1, hostId,limit), top10hours3total = self.renderTop10LastHours(topsprocs.totalRuntimeOrder,3, hostId,limit), #top10alltimescalls = self.renderTop10AllTime(topsprocs.totalCallsOrder), top10hours1calls = self.renderTop10LastHours(topsprocs.totalCallsOrder,1, hostId,limit), top10hours3calls = self.renderTop10LastHours(topsprocs.totalCallsOrder,3, hostId,limit), target='World')
def default(self, *p, **params): if len(p) < 2: return "" hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId( p[0]) hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname( p[0]) table_name = p[1] if table_name.find('.') == -1: raise Exception('Full table name needed, e.g. schema_x.table_y') schema = table_name.split('.')[0] if 'from' in params and 'to' in params: interval = {} interval['from'] = params['from'] interval['to'] = params['to'] else: interval = {} interval['from'] = ( datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d') interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d') data = indexdata.getIndexesDataForTable(hostId, table_name, interval['from'], interval['to']) all_graphs = [] i = 0 for x in data: one_index_graphs = [] for k, v in x['data'].iteritems(): i += 1 if k == 'size': graph = flotgraph.SizeGraph("index" + str(i), "right") else: graph = flotgraph.Graph("index" + str(i), "right") graph.addSeries(k, k) for p in v: graph.addPoint(k, int(time.mktime(p[0].timetuple()) * 1000), p[1]) graph = graph.render() one_index_graphs.append({'data': graph, 'i': i, 'type': k}) one_index_graphs.sort(key=lambda x: x['type']) all_graphs.append({ 'name': x['index_name'], 'graphs': one_index_graphs, 'last_index_size': x['last_index_size'], 'total_end_size': x['total_end_size'], 'pct_of_total_end_size': x['pct_of_total_end_size'] }) all_graphs = sorted(all_graphs, key=lambda x: x['last_index_size'], reverse=True) tpl = tplE.env.get_template('table_indexes.html') return tpl.render(table_name=table_name, host=hostId, schema=schema, interval=interval, hostuiname=hostUiName, hostname=hosts.getHosts()[hostId]['uilongname'], all_graphs=all_graphs, target='World')