コード例 #1
0
    def detailed(self, selected_hostname=None, **params):
        schemagraphs = []
        from_date = params.get('from_date', (datetime.datetime.now() - datetime.timedelta(7)).strftime('%Y-%m-%d'))
        to_date = params.get('to_date', (datetime.datetime.now() + datetime.timedelta(1)).strftime('%Y-%m-%d'))
        filter = params.get('filter', '')

        if selected_hostname:
            if selected_hostname not in hosts.getAllHostNames():
                selected_hostname = hosts.uiShortNameToHostName(selected_hostname)
            data = reportdata.get_schema_usage_for_host(selected_hostname, from_date, to_date, filter)
            for schema_name, data in data.iteritems():
                g_calls = flotgraph.Graph (schema_name + "_calls")
                g_calls.addSeries('Sproc calls', 'calls')
                g_iud = flotgraph.Graph (schema_name + "_iud")
                g_iud.addSeries('IUD', 'iud')
                g_scans = flotgraph.Graph (schema_name + "_scans")
                g_scans.addSeries('Seq+Ind Scans', 'scans')
                for p in data:
                    g_calls.addPoint('calls', int(time.mktime(p[0].timetuple()) * 1000) , p[1][0])
                    g_iud.addPoint('iud', int(time.mktime(p[0].timetuple()) * 1000) , p[1][2]+p[1][3]+p[1][4])
                    g_scans.addPoint('scans', int(time.mktime(p[0].timetuple()) * 1000) , p[1][1])
                schemagraphs.append((schema_name, [g_calls.render(), g_iud.render(), g_scans.render()]))

        table = tplE.env.get_template('perf_schemas_detailed.html')
        return table.render(schemagraphs=schemagraphs, from_date=from_date, to_date=to_date, selected_hostname=selected_hostname, host_names=hosts.getAllHostNames(), filter=filter)
コード例 #2
0
    def graph(self, hostuiname, query_id, **params):
        hostid, hostuiname = hosts.ensureHostIdAndUIShortname(hostuiname)
        from_date = params.get('from_date', (datetime.datetime.now() -  datetime.timedelta(1)).strftime('%Y-%m-%d'))
        to_date = params.get('to_date', (datetime.datetime.now() + datetime.timedelta(1)).strftime('%Y-%m-%d'))

        data = reportdata.getStatStatementsGraph(hostid, query_id, from_date, to_date)

        graphcalls = flotgraph.Graph("graphcalls")
        graphcalls.addSeries('Number of calls', 'calls')
        graphavgruntime = flotgraph.TimeGraph("graphavgruntime")
        graphavgruntime.addSeries('Avg. runtime', 'avgruntime')
        graphruntime = flotgraph.TimeGraph("graphruntime")
        graphruntime.addSeries('Runtime', 'runtime')
        graphblocksread = flotgraph.Graph("graphblocksread")
        graphblocksread.addSeries('Blocks read', 'blocksread')
        graphblocksread.addSeries('Temp blocks read', 'tempblocksread', '#FFFF00')
        graphblockswritten = flotgraph.Graph("graphblockswritten")
        graphblockswritten.addSeries('Blocks written', 'blockswritten')
        graphblockswritten.addSeries('Temp blocks written', 'tempblockswritten', '#FFFF00')

        prev_row = None
        for d in data:
            if prev_row is None:
                prev_row = d
                continue
            calls = d['calls'] - prev_row['calls']
            graphcalls.addPoint('calls', int(time.mktime(d['timestamp'].timetuple()) * 1000), calls)
            runtime = d['total_time'] - prev_row['total_time']
            graphruntime.addPoint('runtime', int(time.mktime(d['timestamp'].timetuple()) * 1000), runtime)
            avg_runtime = round(runtime / float(calls), 2) if calls > 0 else 0
            graphavgruntime.addPoint('avgruntime', int(time.mktime(d['timestamp'].timetuple()) * 1000), avg_runtime)
            graphblocksread.addPoint('blocksread', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['blks_read'] - prev_row['blks_read'])
            graphblocksread.addPoint('tempblocksread', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['temp_blks_read'] - prev_row['temp_blks_read'])
            graphblockswritten.addPoint('blockswritten', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['blks_written'] - prev_row['blks_written'])
            graphblockswritten.addPoint('tempblockswritten', int(time.mktime(d['timestamp'].timetuple()) * 1000), d['temp_blks_written'] - prev_row['temp_blks_written'])

            prev_row = d

        table = tplE.env.get_template('perf_stat_statements_detailed.html')
        return table.render(hostuiname=hostuiname,
                            query = prev_row['query'],
                            query_id = prev_row['query_id'],
                            graphcalls=graphcalls.render(),
                            graphavgruntime=graphavgruntime.render(),
                            graphruntime=graphruntime.render(),
                            graphblocksread=graphblocksread.render(),
                            graphblockswritten=graphblockswritten.render(),
                            from_date=from_date, to_date=to_date)
コード例 #3
0
ファイル: report.py プロジェクト: zloader/PGObserver
    def index(self, hostId=None):
        if hostId is not None and not hostId.isdigit():
            hostId = hosts.uiShortnameToHostId(hostId)

        weeks = 10
        data = reportdata.getLoadReportData(hostId, weeks - 1)

        graph_load = []
        graph_wal = []

        if hostId:
            graph_data = reportdata.getLoadReportDataDailyAvg(
                hostId, weeks - 1)

            graph_load = flotgraph.Graph('graph_load', 'left', 30)
            graph_load.addSeries('CPU Load daily avg.', 'cpu')
            graph_wal = flotgraph.SizeGraph('graph_wal')
            graph_wal.addSeries('WAL daily avg.', 'wal')

            for p in graph_data:
                graph_load.addPoint(
                    'cpu', int(time.mktime(p['date'].timetuple()) * 1000),
                    p['cpu_15'])
                graph_wal.addPoint(
                    'wal', int(time.mktime(p['date'].timetuple()) * 1000),
                    p['wal_b'])

            graph_load = graph_load.render()
            graph_wal = graph_wal.render()

        table = tplE.env.get_template('report_basic.html')
        return table.render(hosts=hosts.hosts,
                            data=data,
                            graph_load=graph_load,
                            graph_wal=graph_wal,
                            weeks=weeks)
コード例 #4
0
    def default(self, *p, **params):
        if len(p) < 2:
            return ""
        hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId(p[0])
        hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname(p[0])
        name = p[1]
        interval = {}

        if 'interval' in params:
            interval['interval'] = str(params['interval'])+' days'
        elif 'from' in params and 'to' in params:
            interval['from'] = params['from']
            interval['to'] = params['to']
        else:
            days = int(cherrypy.request.cookie['days'].value) if 'days' in cherrypy.request.cookie else 8
            interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y-%m-%d')
            interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        data = tabledata.getTableData(hostId, name, interval)

        graph_table_size = flotgraph.SizeGraph ("graphtablesize","right")
        graph_table_size.addSeries("Table Size","size")
        for p in data['table_size']:
            graph_table_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_index_size = flotgraph.SizeGraph ("graphindexsize","right")
        graph_index_size.addSeries("Index Size", "size")
        for p in data['index_size']:
            graph_index_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_seq_scans = flotgraph.Graph ("graphseqscans","right")
        graph_seq_scans.addSeries("Sequential Scans","count")
        for p in data['seq_scans']:
            graph_seq_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_index_scans = flotgraph.Graph ("graphindexscans","right")
        graph_index_scans.addSeries("Index Scans","count")
        for p in data['index_scans']:
            graph_index_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_t_ins = flotgraph.Graph ("gtupins","right")
        graph_t_ins.addSeries("Inserts","count",'#FF0000')
        for p in data['ins']:
            graph_t_ins.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_t_upd = flotgraph.Graph ("gtupupd","right")
        graph_t_upd.addSeries("Updates","count",'#FF8800')
        graph_t_upd.addSeries("Hot Updates","hotcount",'#885500')
        for p in data['upd']:
            graph_t_upd.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        for p in data['hot']:
            graph_t_upd.addPoint("hotcount", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_t_del = flotgraph.Graph ("gtupdel","right")
        graph_t_del.addSeries("Deletes","count")
        for p in data['del']:
            graph_t_del.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])


        data = tabledata.getTableIOData(hostId, name, interval)

        graph_index_iob = flotgraph.Graph ("graphindexiob","right")
        graph_index_iob.addSeries("Index_hit","ihit")
        for p in data['index_hit']:
            graph_index_iob.addPoint("ihit", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_index_iod = flotgraph.Graph ("graphindexiod","right")
        graph_index_iod.addSeries("Index_read","iread",'#FF0000')
        for p in data['index_read']:
            graph_index_iod.addPoint("iread", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_heap_iod = flotgraph.Graph ("graphheapiod","right")
        graph_heap_iod.addSeries("Heap_read","hread",'#FF0000')
        for p in data['heap_read']:
            graph_heap_iod.addPoint("hread", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_heap_iob = flotgraph.Graph ("graphheapiob","right")
        graph_heap_iob.addSeries("Heap_hit","hhit")
        for p in data['heap_hit']:
            graph_heap_iob.addPoint("hhit", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        tpl = tplE.env.get_template('table_detail.html')
        return tpl.render(name=name,
                          schema_name=name[:name.find('.')],
                          host=hostId,
                          interval=interval,
                          hostuiname = hostUiName,
                          hostname = hosts.getHosts()[hostId]['uilongname'],
                          graphtablesize=graph_table_size.render(),
                          graphindexsize=graph_index_size.render(),
                          graphseqscans=graph_seq_scans.render(),
                          graphindexscans=graph_index_scans.render(),

                          graphindexiod=graph_index_iod.render(),
                          graphindexiob=graph_index_iob.render(),
                          graphheapiod=graph_heap_iod.render(),
                          graphheapiob=graph_heap_iob.render(),

                          gtupins=graph_t_ins.render(),
                          gtupupd=graph_t_upd.render(),
                          gtupdel=graph_t_del.render(),

                          target='World')
コード例 #5
0
ファイル: indexesfrontend.py プロジェクト: zloader/PGObserver
    def default(self, *p, **params):
        if len(p) < 2:
            return ""

        hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId(
            p[0])
        hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname(
            p[0])
        table_name = p[1]
        if table_name.find('.') == -1:
            raise Exception('Full table name needed, e.g. schema_x.table_y')
        schema = table_name.split('.')[0]

        if 'from' in params and 'to' in params:
            interval = {}
            interval['from'] = params['from']
            interval['to'] = params['to']
        else:
            interval = {}
            interval['from'] = (
                datetime.datetime.now() -
                datetime.timedelta(days=14)).strftime('%Y-%m-%d')
            interval['to'] = (datetime.datetime.now() +
                              datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        data = indexdata.getIndexesDataForTable(hostId, table_name,
                                                interval['from'],
                                                interval['to'])

        all_graphs = []
        i = 0
        for x in data:
            one_index_graphs = []
            for k, v in x['data'].iteritems():
                i += 1
                if k == 'size':
                    graph = flotgraph.SizeGraph("index" + str(i), "right")
                else:
                    graph = flotgraph.Graph("index" + str(i), "right")
                graph.addSeries(k, k)
                for p in v:
                    graph.addPoint(k,
                                   int(time.mktime(p[0].timetuple()) * 1000),
                                   p[1])
                graph = graph.render()

                one_index_graphs.append({'data': graph, 'i': i, 'type': k})
                one_index_graphs.sort(key=lambda x: x['type'])
            all_graphs.append({
                'name':
                x['index_name'],
                'graphs':
                one_index_graphs,
                'last_index_size':
                x['last_index_size'],
                'total_end_size':
                x['total_end_size'],
                'pct_of_total_end_size':
                x['pct_of_total_end_size']
            })

        all_graphs = sorted(all_graphs,
                            key=lambda x: x['last_index_size'],
                            reverse=True)

        tpl = tplE.env.get_template('table_indexes.html')
        return tpl.render(table_name=table_name,
                          host=hostId,
                          schema=schema,
                          interval=interval,
                          hostuiname=hostUiName,
                          hostname=hosts.getHosts()[hostId]['uilongname'],
                          all_graphs=all_graphs,
                          target='World')
コード例 #6
0
    def default(self, *p, **params):
        if len(p) == 0:
            return """Error: Not enough URL parameters. Hostname needed"""

        hostId, hostName = hosts.ensureHostIdAndUIShortname(p[0])
        sprocName = None

        if len(p) > 1:
            sprocName = p[1]

        if params.get('search'):
            sprocName = params.get('sproc_search')
            url = '/sprocs/show/' + hostName + '/' + sprocName
            raise cherrypy.HTTPRedirect(cherrypy.url(url))

        interval = {}
        interval['from'] = params.get(
            'from', (datetime.datetime.now() -
                     datetime.timedelta(days=8)).strftime('%Y-%m-%d'))
        interval['to'] = params.get(
            'to', (datetime.datetime.now() +
                   datetime.timedelta(days=1)).strftime('%Y-%m-%d'))

        graphcalls = flotgraph.Graph("graphcalls")
        graphcalls.addSeries('Number of calls', 'calls')

        graphtime = flotgraph.TimeGraph("graphruntime")
        graphtime.addSeries('Total run time', 'runtime')

        graphavg = flotgraph.TimeGraph("graphavg")
        graphavg.addSeries('Average run time', 'avg')

        graphavgself = flotgraph.TimeGraph("graphselfavg")
        graphavgself.addSeries('Average self time', 'avgself')

        data = sprocdata.getSingleSprocData(hostId, sprocName, interval)
        if data['name']:  # None if no data for sproc found
            for p in data['total_time']:
                graphtime.addPoint('runtime',
                                   int(time.mktime(p[0].timetuple()) * 1000),
                                   p[1])

            for p in data['calls']:
                graphcalls.addPoint('calls',
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])

            for p in data['avg_time']:
                graphavg.addPoint('avg',
                                  int(time.mktime(p[0].timetuple()) * 1000),
                                  p[1])

            for p in data['avg_self_time']:
                graphavgself.addPoint(
                    'avgself', int(time.mktime(p[0].timetuple()) * 1000), p[1])

            sproc_name_wo_params = data['name'] if data['name'].find(
                '(') == -1 else data['name'][0:data['name'].find('(')]
            sproc_params = "" if data['name'].find(
                '(') == -1 else data['name'][data['name'].find('(') + 1:-1]

        all_sprocs = sprocdata.getAllActiveSprocNames(hostId)

        table = tplE.env.get_template('sproc_detail.html')
        return table.render(
            hostid=hostId,
            hostname=hosts.getHostData()[int(hostId)]['uilongname'],
            hostuiname=hostName,
            name_w_params=data['name'],
            params=sproc_params if data['name'] else None,
            name_wo_params=sproc_name_wo_params if data['name'] else None,
            interval=interval,
            sproc_name=sprocName,
            all_sprocs=all_sprocs,
            all_sprocs_json=json.dumps(all_sprocs),
            graphavg=graphavg.render(),
            graphselfavg=graphavgself.render(),
            graphcalls=graphcalls.render(),
            graphruntime=graphtime.render())
コード例 #7
0
    def default(self, hostId):
        hostId, hostUiName = hosts.ensureHostIdAndUIShortname(
            max(hostId, hostId))
        days = (cherrypy.request.cookie['days'].value
                if 'days' in cherrypy.request.cookie else '8')
        sprocs_to_show = (int(cherrypy.request.cookie['sprocs_to_show'].value)
                          if 'sprocs_to_show' in cherrypy.request.cookie else
                          10)
        graph_load = None
        graph_wal = None
        graph_size = None
        graph_dbstats = None
        top_sprocs = None
        top_statements = None
        graph_checkpoint = None
        global_announcement = reportdata.getGetActiveFrontendAnnouncementIfAny(
        )  # fyi - no escaping is performed deliberately

        if tplE._settings.get('show_load', True):
            graph_load = flotgraph.Graph('graph_load', 'left', 30)
            graph_load.addSeries('CPU Load 15min avg', 'acpu_15min_avg',
                                 '#FF0000')
            cpuload = topsprocs.getCpuLoad(hostId, days)
            for p in cpuload['load_15min_avg']:
                graph_load.addPoint('acpu_15min_avg',
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])

            load = topsprocs.getLoad(hostId, days)
            graph_load.addSeries('Sproc Load 15 min', 'load_15min')
            for p in load['load_15min']:
                graph_load.addPoint('load_15min',
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])
            graph_load = graph_load.render()

        if tplE._settings.get('show_wal', True):
            graph_wal = flotgraph.Graph('graph_wal', 'left', 30)
            graph_wal.addSeries('WAL vol. 15 min (in MB)', 'wal_15min')
            walvolumes = topsprocs.getWalVolumes(hostId, days)
            for p in walvolumes['wal_15min_growth']:
                graph_wal.addPoint('wal_15min',
                                   int(time.mktime(p[0].timetuple()) * 1000),
                                   p[1])

            if hosts.isHostFeatureEnabled(hostId,
                                          'blockingStatsGatherInterval'):
                blocked_processes = topsprocs.getBlockedProcessesCounts(
                    hostId, days)
                graph_wal.addSeries('#Blocked processes (> 5s)',
                                    'blocked_processes', '#FF0000', None, 2)
                for p in blocked_processes:
                    if len(walvolumes['wal_15min_growth']
                           ) > 0 and p[0].timetuple(
                           ) >= walvolumes['wal_15min_growth'][0][0].timetuple(
                           ):  # aligning timeline with WAL data
                        graph_wal.addPoint(
                            'blocked_processes',
                            int(time.mktime(p[0].timetuple()) * 1000), p[1])
            graph_wal = graph_wal.render()

        if tplE._settings.get('show_db_size', True):
            graph_size = flotgraph.SizeGraph('graph_size')
            sizes = tabledata.getDatabaseSizes(hostId, days)
            if hostId in sizes:
                tabledata.fillGraph(graph_size, sizes[hostId])
            graph_size = graph_size.render()

        if tplE._settings.get('show_db_stats', True):
            dbstats = reportdata.getDatabaseStatistics(hostId, days)
            if len(dbstats) > 0:
                graph_dbstats = flotgraph.SizeGraph('graph_dbstats')
                graph_dbstats.addSeries('Temp bytes written',
                                        'temp_files_bytes')
                graph_dbstats.addSeries('#Backends / 10', 'numbackends',
                                        '#C0C0C0', None, 2)
                graph_dbstats.addSeries('#Deadlocks', 'deadlocks', '#FF0000',
                                        None, 2)
                graph_dbstats.addSeries('#Rollbacks [incl. exceptions]',
                                        'rollbacks', '#FFFF00', None, 2)
                for d in dbstats:
                    timestamp = int(
                        time.mktime(d['timestamp'].timetuple()) * 1000)
                    graph_dbstats.addPoint('temp_files_bytes', timestamp,
                                           d['temp_files_bytes'])
                    graph_dbstats.addPoint('deadlocks', timestamp,
                                           d['deadlocks'])
                    graph_dbstats.addPoint('numbackends', timestamp,
                                           d['numbackends'] / 10.0)
                    graph_dbstats.addPoint('rollbacks', timestamp,
                                           d['rollbacks'])
                graph_dbstats = graph_dbstats.render()

        if tplE._settings.get('show_top_sprocs', True):
            top_sprocs = {}
            top_sprocs['hours1avg'] = self.renderTop10LastHours(
                topsprocs.avgRuntimeOrder, 1, hostId, sprocs_to_show)
            top_sprocs['hours3avg'] = self.renderTop10LastHours(
                topsprocs.avgRuntimeOrder, 3, hostId, sprocs_to_show)

            top_sprocs['hours1total'] = self.renderTop10LastHours(
                topsprocs.totalRuntimeOrder, 1, hostId, sprocs_to_show)
            top_sprocs['hours3total'] = self.renderTop10LastHours(
                topsprocs.totalRuntimeOrder, 3, hostId, sprocs_to_show)

            top_sprocs['hours1calls'] = self.renderTop10LastHours(
                topsprocs.totalCallsOrder, 1, hostId, sprocs_to_show)
            top_sprocs['hours3calls'] = self.renderTop10LastHours(
                topsprocs.totalCallsOrder, 3, hostId, sprocs_to_show)

        if tplE._settings.get('show_top_statements', False):
            top_statements = {}
            tsd = topstatements.getTopStatementsData(hostId,
                                                     interval1='3hours',
                                                     interval2='1hours',
                                                     limit=sprocs_to_show)
            if tsd:
                top_statements[
                    'hours1avg'] = self.renderTop10StatementsLastHours(
                        hostId, tsd.get('avg_int2', []))
                top_statements[
                    'hours3avg'] = self.renderTop10StatementsLastHours(
                        hostId, tsd.get('avg_int1', []))

                top_statements[
                    'hours1total'] = self.renderTop10StatementsLastHours(
                        hostId, tsd.get('total_int2', []))
                top_statements[
                    'hours3total'] = self.renderTop10StatementsLastHours(
                        hostId, tsd.get('total_int1', []))

                top_statements[
                    'hours1calls'] = self.renderTop10StatementsLastHours(
                        hostId, tsd.get('calls_int2', []))
                top_statements[
                    'hours3calls'] = self.renderTop10StatementsLastHours(
                        hostId, tsd.get('calls_int1', []))

        if tplE._settings.get('show_bgwriter_stats', True):
            graph_checkpoint = self.get_rendered_bgwriter_graph(
                hostId, int(days))

        tmpl = tplE.env.get_template('index.html')
        return tmpl.render(
            hostid=hostId,
            hostname=hosts.getHostnameByHostId(hostId),
            hostuiname=hostUiName,
            graph_load=graph_load,
            graph_wal=graph_wal,
            graph_size=graph_size,
            graph_dbstats=graph_dbstats,
            graph_checkpoint=graph_checkpoint,
            top_sprocs=top_sprocs,
            top_statements=top_statements,
            limit=sprocs_to_show,
            features=hosts.getActiveFeatures(hostId),
            global_announcement=global_announcement,
            target='World',
        )
コード例 #8
0
ファイル: logfrontend.py プロジェクト: abdoni/PGObserver
    def show(self):
        tmpl = env.get_template('logfiles.html')

        graphtemp = flotgraph.Graph("tempfilesgraph")
        graphtemp.addSeries('Temporary files', 'temp_files', '#FF0000')
        tempfile_data = logdata.load_temporary_lines(self.hostId)

        for p in tempfile_data:
            graphtemp.addPoint('temp_files',
                               int(time.mktime(p[0].timetuple()) * 1000), p[1])

        grapherror = flotgraph.Graph("errorgraph")
        grapherror.addSeries('Errors', 'errors', '#FF0000')
        grapherror.addSeries('User', 'users', '#FF9900')
        error_data = logdata.load_error_lines(self.hostId)
        last_x = 0
        for p in error_data:
            if last_x != 0 and int(time.mktime(
                    p[0].timetuple())) - last_x > 10 * 60:
                grapherror.addPoint('errors', (last_x + 60) * 1000, 0)
                grapherror.addPoint(
                    'errors', (int(time.mktime(p[0].timetuple())) - 60) * 1000,
                    0)

            grapherror.addPoint('errors',
                                int(time.mktime(p[0].timetuple()) * 1000),
                                p[1])

        error_data = logdata.load_user_error_lines(self.hostId)

        last_x = 0
        for p in error_data:
            if last_x != 0 and int(time.mktime(
                    p[0].timetuple())) - last_x > 10 * 60:
                grapherror.addPoint('users', (last_x + 60) * 1000, 0)
                grapherror.addPoint(
                    'users', (int(time.mktime(p[0].timetuple())) - 60) * 1000,
                    0)
            grapherror.addPoint('users',
                                int(time.mktime(p[0].timetuple()) * 1000),
                                p[1])

        graphtimeout = flotgraph.Graph("timeoutgraph")
        graphtimeout.addSeries('Timeouts', 'timeout', '#FF0000')
        graphtimeout.addSeries('User', 'users', '#FF9900')
        timeout_data = logdata.load_timeout_lines(self.hostId)

        last_x = 0
        for p in timeout_data:
            if last_x != 0 and int(time.mktime(
                    p[0].timetuple())) - last_x > 10 * 60:
                graphtimeout.addPoint('timeout', (last_x + 60) * 1000, 0)
                graphtimeout.addPoint(
                    'timeout',
                    (int(time.mktime(p[0].timetuple())) - 60) * 1000, 0)

            last_x = int(time.mktime(p[0].timetuple()))

            graphtimeout.addPoint('timeout',
                                  int(time.mktime(p[0].timetuple()) * 1000),
                                  p[1])

        timeout_data = logdata.load_user_timeout_lines(self.hostId)
        for p in timeout_data:
            graphtimeout.addPoint('users',
                                  int(time.mktime(p[0].timetuple()) * 1000),
                                  p[1])

        graphwait = flotgraph.Graph("waitgraph")
        graphwait.addSeries('Waits', 'waits', '#FF0000')
        wait_data = logdata.load_wait_lines(self.hostId)

        for p in wait_data:
            graphwait.addPoint('waits',
                               int(time.mktime(p[0].timetuple()) * 1000), p[1])

        return tmpl.render(hostid=self.hostId,
                           tempfilesgraph=graphtemp.render(),
                           errorgraph=grapherror.render(),
                           timeoutgraph=graphtimeout.render(),
                           waitgraph=graphwait.render(),
                           target='World')
コード例 #9
0
    def default(self, *p, **params):
        graphcalls = flotgraph.Graph("graphcalls")
        graphcalls.addSeries('Number of calls', 'calls')

        graphtime = flotgraph.TimeGraph("graphruntime")
        graphtime.addSeries('Total run time', 'runtime')

        graphavg = flotgraph.TimeGraph("graphavg")
        graphavg.addSeries('Average run time', 'avg')

        graphavgself = flotgraph.TimeGraph("graphselfavg")
        graphavgself.addSeries('Average self time', 'avgself')

        if (len(p) <= 1):
            return """Error: Not enough URL paramter"""

        hostId = p[0]
        name = p[1]

        if len(p) > 2:
            sprocNr = p[2]
        else:
            sprocNr = None

        if 'from' in params and 'to' in params:
            interval = {}
            interval['from'] = params['from']
            interval['to'] = params['to']
        else:
            interval = {}
            interval['from'] = (
                datetime.datetime.now() -
                datetime.timedelta(days=14)).strftime('%Y-%m-%d')
            interval['to'] = (datetime.datetime.now() +
                              datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        data = sprocdata.getSingleSprocData(name, hostId, interval, sprocNr)

        for p in data['total_time']:
            graphtime.addPoint('runtime',
                               int(time.mktime(p[0].timetuple()) * 1000), p[1])

        for p in data['calls']:
            graphcalls.addPoint('calls',
                                int(time.mktime(p[0].timetuple()) * 1000),
                                p[1])

        for p in data['avg_time']:
            graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000),
                              p[1])

        for p in data['avg_self_time']:
            graphavgself.addPoint('avgself',
                                  int(time.mktime(p[0].timetuple()) * 1000),
                                  p[1])

        table = tplE.env.get_template('sproc_detail.html')

        return table.render(hostid=int(hostId),
                            hostname=hosts.getHostData()[int(hostId)]
                            ['settings']['uiLongName'],
                            name=data['name'],
                            interval=interval,
                            graphavg=graphavg.render(),
                            graphselfavg=graphavgself.render(),
                            graphcalls=graphcalls.render(),
                            graphruntime=graphtime.render())
コード例 #10
0
ファイル: tablesfrontend.py プロジェクト: abdoni/PGObserver
    def default(self, *p, **params):
        if len(p) < 2:
            return ""

        host = p[0]
        name = p[1]

        if 'interval' in params:
            interval = {}
            interval['interval'] = str(params['interval']) + ' days'
        elif 'from' in params and 'to' in params:
            interval = {}
            interval['from'] = params['from']
            interval['to'] = params['to']
        else:
            interval = {}
            interval['from'] = (
                datetime.datetime.now() -
                datetime.timedelta(days=14)).strftime('%Y-%m-%d')
            interval['to'] = (datetime.datetime.now() +
                              datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        data = tabledata.getTableData(host, name, interval)

        graph_table_size = flotgraph.SizeGraph("graphtablesize", "right")
        graph_table_size.addSeries("Table Size", "size")
        for p in data['table_size']:
            graph_table_size.addPoint(
                "size", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        graph_index_size = flotgraph.SizeGraph("graphindexsize", "right")
        graph_index_size.addSeries("Index Size", "size")
        for p in data['index_size']:
            graph_index_size.addPoint(
                "size", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        graph_seq_scans = flotgraph.Graph("graphseqscans", "right")
        graph_seq_scans.addSeries("Sequential Scans", "count")
        for p in data['seq_scans']:
            graph_seq_scans.addPoint("count",
                                     int(time.mktime(p[0].timetuple()) * 1000),
                                     p[1])

        graph_index_scans = flotgraph.Graph("graphindexscans", "right")
        graph_index_scans.addSeries("Index Scans", "count")
        for p in data['index_scans']:
            graph_index_scans.addPoint(
                "count", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        graph_t_ins = flotgraph.Graph("gtupins", "right")
        graph_t_ins.addSeries("Inserts", "count", '#FF0000')
        for p in data['ins']:
            graph_t_ins.addPoint("count",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        graph_t_upd = flotgraph.Graph("gtupupd", "right")
        graph_t_upd.addSeries("Updates", "count", '#FF8800')
        graph_t_upd.addSeries("Hot Updates", "hotcount", '#885500')
        for p in data['upd']:
            graph_t_upd.addPoint("count",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        for p in data['hot']:
            graph_t_upd.addPoint("hotcount",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        graph_t_del = flotgraph.Graph("gtupdel", "right")
        graph_t_del.addSeries("Deletes", "count")
        for p in data['del']:
            graph_t_del.addPoint("count",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        data = tabledata.getTableIOData(host, name)

        graph_index_iob = flotgraph.Graph("graphindexiob", "right")
        graph_index_iob.addSeries("Index_hit", "ihit")
        for p in data['index_hit']:
            graph_index_iob.addPoint("ihit",
                                     int(time.mktime(p[0].timetuple()) * 1000),
                                     p[1])

        graph_index_iod = flotgraph.Graph("graphindexiod", "right")
        graph_index_iod.addSeries("Index_read", "iread", '#FF0000')
        for p in data['index_read']:
            graph_index_iod.addPoint("iread",
                                     int(time.mktime(p[0].timetuple()) * 1000),
                                     p[1])

        graph_heap_iod = flotgraph.Graph("graphheapiod", "right")
        graph_heap_iod.addSeries("Heap_read", "hread", '#FF0000')
        for p in data['heap_read']:
            graph_heap_iod.addPoint("hread",
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])

        graph_heap_iob = flotgraph.Graph("graphheapiob", "right")
        graph_heap_iob.addSeries("Heap_hit", "hhit")
        for p in data['heap_hit']:
            graph_heap_iob.addPoint("hhit",
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])

        tpl = tplE.env.get_template('table_detail.html')
        return tpl.render(
            name=name,
            host=host,
            interval=interval,
            hostname=hosts.getHostData()[int(host)]['settings']['uiLongName'],
            graphtablesize=graph_table_size.render(),
            graphindexsize=graph_index_size.render(),
            graphseqscans=graph_seq_scans.render(),
            graphindexscans=graph_index_scans.render(),
            graphindexiod=graph_index_iod.render(),
            graphindexiob=graph_index_iob.render(),
            graphheapiod=graph_heap_iod.render(),
            graphheapiob=graph_heap_iob.render(),
            gtupins=graph_t_ins.render(),
            gtupupd=graph_t_upd.render(),
            gtupdel=graph_t_del.render(),
            target='World')