Пример #1
0
    def default(self, *p, **params):
            graphcalls= flotgraph.Graph("graphcalls")
            graphcalls.addSeries('Number of calls', 'calls')

            graphtime= flotgraph.TimeGraph("graphruntime")
            graphtime.addSeries('Total run time', 'runtime')

            graphavg= flotgraph.TimeGraph("graphavg")
            graphavg.addSeries('Average run time', 'avg')

            graphavgself= flotgraph.TimeGraph("graphselfavg")
            graphavgself.addSeries('Average self time', 'avgself')

            if(len(p)<=1):
                return """Error: Not enough URL paramter"""
            hostId = p[0] if p[0].isdigit() else hosts.uiShortnameToHostId(p[0])
            name = p[1]

            if len(p) > 2:
                sprocNr = p[2]
            else:
                sprocNr = None

            if 'from' in params and 'to' in params:
                interval = {}
                interval['from'] = params['from']
                interval['to'] = params['to']
            else:
                interval = {}
                interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d')
                interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')

            data = sprocdata.getSingleSprocData( name, hostId , interval, sprocNr)

            for p in data['total_time']:
                    graphtime.addPoint('runtime', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['calls']:
                    graphcalls.addPoint('calls', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['avg_time']:
                    graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['avg_self_time']:
                    graphavgself.addPoint('avgself', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            table = tplE.env.get_template('sproc_detail.html')

            return table.render(hostid = int(hostId),                
                                hostname = hosts.getHostData()[int(hostId)]['uilongname'],
                                hostuiname = hosts.getHostData()[int(hostId)]['uishortname'],
                                name = data['name'],
                                interval = interval,
                                graphavg = graphavg.render(),
                                graphselfavg = graphavgself.render(),
                                graphcalls = graphcalls.render(),
                                graphruntime = graphtime.render())
Пример #2
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c', '--config', help='Path to config file. (default: %s)' % DEFAULT_CONF_FILE, dest='config',
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('-p', '--port', help='server port', dest='port', type=int)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    if not os.path.exists(args.config):
        print 'Configuration file missing:', args.config
        parser.print_help()
        return

    with open(args.config, 'rb') as fd:
        settings = json.load(fd)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    print 'Setting connection string to ... ' + conn_string

    DataDB.setConnectionString(conn_string)

    if 'logfiles' in settings:
        logdata.setFilter(settings['logfiles']['liveuserfilter'])

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {'global': {'server.socket_host': '0.0.0.0', 'server.socket_port': args.port or settings.get('frontend',
            {}).get('port') or 8080}, '/': {'tools.staticdir.root': current_dir},
            '/static': {'tools.staticdir.dir': 'static', 'tools.staticdir.on': True}}

    tplE.setup(settings)

    root = None

    for h in hosts.getHostData().values():
        mf = MonitorFrontend.MonitorFrontend(h['host_id'])

        if root == None:
            root = mf

        setattr(root, h['uishortname'], mf)

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()

    cherrypy.quickstart(root, config=conf)
Пример #3
0
    def allgraph(self, hostId):
        hostId, hostUiName = hosts.ensureHostIdAndUIShortname(hostId)
        sprocs = self.get_data(hostId)
        tpl = tplE.env.get_template('all_sprocs.html')
        list = []
        i = 0
        for s in sprocs:
            d = sprocdata.getSingleSprocData(
                hostId, s, "('now'::timestamp - '4 days'::interval)")
            i += 1

            graph = flotgraph.TimeGraph("graph" + str(i))
            graph.addSeries('Avg.', 'avg')

            for p in d['avg_time']:
                graph.addPoint('avg',
                               int(time.mktime(p[0].timetuple()) * 1000), p[1])

            list.append({
                'graph': graph.render(),
                'name': s[0:s.find("(")],
                'i': i
            })
        return tpl.render(
            graphs=list,
            hostuiname=hostUiName,
            hostname=hosts.getHostData()[int(hostId)]['uilongname'],
            all_sprocs=None)
Пример #4
0
    def all(self, hostId, graph=False):
        hostId, hostUiName = hosts.ensureHostIdAndUIShortname(hostId)
        graph_list = None
        all_sprocs = None

        if not graph:
           all_sprocs = sprocdata.getAllActiveSprocNames(hostId)
        else:
            sprocs = self.get_data(hostId)
            graph_list = []
            i = 0
            for s in sprocs:
                print ('s')
                print (s)
                d = sprocdata.getSingleSprocData(hostId, s, "('now'::timestamp - '4 days'::interval)")
                i += 1

                graph= flotgraph.TimeGraph("graph"+str(i))
                graph.addSeries('Avg.', 'avg')

                for p in d['avg_time']:
                    graph.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

                graph_list.append( {'graph': graph.render() , 'name': s[0:s.find("(")] , 'i': i } )

        tpl = tplE.env.get_template('all_sprocs.html')
        return tpl.render(graphs=graph_list,
                          hostuiname = hostUiName,
                          hostname = hosts.getHostData()[int(hostId)]['uilongname'],
                          all_sprocs = all_sprocs)
Пример #5
0
    def default(self, *p, **params):
        graphcalls = flotgraph.Graph("graphcalls")
        graphcalls.addSeries("Number of calls", "calls")

        graphtime = flotgraph.TimeGraph("graphruntime")
        graphtime.addSeries("Total run time", "runtime")

        graphavg = flotgraph.TimeGraph("graphavg")
        graphavg.addSeries("Average run time", "avg")

        graphavgself = flotgraph.TimeGraph("graphselfavg")
        graphavgself.addSeries("Average self time", "avgself")

        if len(p) <= 1:
            return """Error: Not enough URL paramter"""

        hostId = p[0]
        name = p[1]

        if len(p) > 2:
            sprocNr = p[2]
        else:
            sprocNr = None

        if "from" in params and "to" in params:
            interval = {}
            interval["from"] = params["from"]
            interval["to"] = params["to"]
        else:
            interval = {}
            interval["from"] = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime("%Y-%m-%d")
            interval["to"] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")

        data = sprocdata.getSingleSprocData(name, hostId, interval, sprocNr)

        for p in data["total_time"]:
            graphtime.addPoint("runtime", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        for p in data["calls"]:
            graphcalls.addPoint("calls", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        for p in data["avg_time"]:
            graphavg.addPoint("avg", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        for p in data["avg_self_time"]:
            graphavgself.addPoint("avgself", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        table = tplE.env.get_template("sproc_detail.html")

        return table.render(
            hostid=int(hostId),
            hostname=hosts.getHostData()[int(hostId)]["settings"]["uiLongName"],
            name=data["name"],
            interval=interval,
            graphavg=graphavg.render(),
            graphselfavg=graphavgself.render(),
            graphcalls=graphcalls.render(),
            graphruntime=graphtime.render(),
        )
Пример #6
0
    def alltables(self, hostId , order=None):
        table = tplE.env.get_template('tables_size_table_all.html')
        tpl = tplE.env.get_template('all_tables.html')

        if order==None:
            order=2

        return tpl.render(hostname = hosts.getHostData()[int(hostId)]['settings']['uiLongName'], table=table.render(hostid = hostId,order=int(order), list=tabledata.getTopTables(hostId, None, order)))
Пример #7
0
    def get_alltables_data(self, hostId, date_from, date_to, order=None, pattern=None):
        hostId = hostId if hostId.isdigit() else hosts.uiShortnameToHostId(hostId)

        if hostId is None:
            return 'valid hostId/hostUiShortname expected'
        if order==None:
            order=2
        hostname = hosts.getHostData()[int(hostId)]['uilongname']
        top_tables = tabledata.getTopTables(hostId, date_from, date_to, order, None, pattern)
        return hostId, hostname, top_tables, order
Пример #8
0
def main():
    parser = ArgumentParser(description = 'PGObserver Frontend')
    parser.add_argument('-c', '--config', help = 'Path to config file. (default: %s)' % DEFAULT_CONF_FILE, dest="config" , default = DEFAULT_CONF_FILE)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    if not os.path.exists(args.config):
        print 'Configuration file missing:', args.config
        parser.print_help()
        return

    with open(args.config, 'rb') as fd:
        settings = json.load(fd)

    conn_string = ' '.join( ( "dbname=" + settings['database']['name'],
                              "host="+settings['database']['host'],
                              "user="******"password="******"port="+ str(settings['database']['port']) ) )

    print "Setting connection string to ... " + conn_string 

    DataDB.setConnectionString ( conn_string )

    if 'logfiles' in settings:
        logdata.setFilter( settings['logfiles']['liveuserfilter'] )

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = ( { 'global': { 'server.socket_host': '0.0.0.0',
                           'server.socket_port': int(settings['frontend']['port']) } ,
               '/' :     {'tools.staticdir.root' : current_dir },
               '/static' : {'tools.staticdir.dir' : 'static' ,
                            'tools.staticdir.on' : True } } )

    tplE.setup( settings )

    root = None

    for h in hosts.getHostData().values():
        mf = MonitorFrontend.MonitorFrontend(h['host_id'])

        if root == None:
            root = mf

        setattr(root , h['settings']['uiShortName'].lower().replace('-','') , mf)

    root.report = report.Report()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()

    cherrypy.quickstart(root,config=conf)
Пример #9
0
    def alltables(self, hostId, order=None):
        table = tplE.env.get_template('tables_size_table_all.html')
        tpl = tplE.env.get_template('all_tables.html')

        if order == None:
            order = 2

        return tpl.render(hostname=hosts.getHostData()[int(hostId)]['settings']
                          ['uiLongName'],
                          table=table.render(hostid=hostId,
                                             order=int(order),
                                             list=tabledata.getTopTables(
                                                 hostId, None, order)))
Пример #10
0
    def alltables(self, hostId , order=None):
        table = tplE.env.get_template('tables_size_table_all.html')
        tpl = tplE.env.get_template('all_tables.html')

        hostUiName = hostId if not hostId.isdigit() else hosts.hostIdToUiShortname(hostId)
        hostId = hostId if hostId.isdigit() else hosts.uiShortnameToHostId(hostId)

        if hostId is None:
            return 'valid hostId/hostUiShortname expected'
        if order==None:
            order=2

        return tpl.render(hostname = hosts.getHostData()[int(hostId)]['settings']['uiLongName'], table=table.render(hostid = hostId, hostuiname=hostUiName, order=int(order), list=tabledata.getTopTables(hostId, None, order)))
Пример #11
0
    def get_data(self):
        size = tabledata.getDatabaseSizes()

        systems = []

        hs = hosts.getHostData().values()

        for h in hs:
            g = flotgraph.SizeGraph("s" + str(h['host_id']))
            tabledata.fillGraph(g,size[h['host_id']])

            s = self.renderSizeTable(h['host_id'])
            systems.append({ 'id' : "s"+str(h['host_id']) , 't' : s , 'g' : g.render() , 'h' : h })
        return systems
Пример #12
0
    def get_all_databases_size(self):   # show a graph of all db sizes?
        size = tabledata.getDatabaseSizes()

        systems = []

        hs = hosts.getHostData().values()

        for h in hs:
            g = flotgraph.SizeGraph("s" + str(h['host_id']))
            tabledata.fillGraph(g,size[h['host_id']])

            s = self.renderSizeTable(h['host_id'])  # TODO doesnt exist
            systems.append({ 'id' : "s"+str(h['host_id']) , 't' : s , 'g' : g.render() , 'h' : h })
        return systems
Пример #13
0
def setup(settings = None):
    global env, _settings

    if env != None:
        return

    if settings == None:
        _settings = {"tags":False,"sizeoverview":False,"logfiles":False}
    else:
        _settings = settings['features']

    env = Environment(loader=FileSystemLoader('templates'))

    env.globals['hosts'] = hosts.getHostData()
    env.globals['hosts_json'] = json.dumps(env.globals['hosts'])
    env.globals['settings'] = _settings;
    hl = sorted( env.globals['hosts'].values() , key = lambda h : h['settings']['uiShortName'] )

    gs = {}
    hlf = []
    for h in hl:
        if h['host_group_id'] == None:
            hlf.append(h)
        else:
            if h['host_group_id'] in gs:
                continue
            else:
                gs[h['host_group_id']] = True
                hlf.append(h)

    env.globals['hostlist'] = hlf

    groups = {}

    for h in hosts.getHosts().values():
        if h['host_group_id'] > 0:
            if not (h['host_group_id'] in groups):
                groups[h['host_group_id']] = []

            groups[h['host_group_id']].append(h);

    for g in groups.keys():
        groups[g] = sorted(groups[g], key = lambda x : x['settings']['uiShortName'])

    env.globals['hostgroups'] = groups
    env.globals['groups'] = hosts.getGroups()
    env.globals['groups_json'] = json.dumps(hosts.getGroups())
Пример #14
0
    def default(self, *p, **params):
            graphcalls= flotgraph.Graph("graphcalls")
            graphcalls.addSeries('Number of calls', 'calls')

            graphtime= flotgraph.TimeGraph("graphruntime")
            graphtime.addSeries('Total run time', 'runtime')

            graphavg= flotgraph.TimeGraph("graphavg")
            graphavg.addSeries('Average run time', 'avg')

            graphavgself= flotgraph.TimeGraph("graphselfavg")
            graphavgself.addSeries('Average self time', 'avgself')

            if(len(p)<=1):
                return """Error: Not enough URL paramter"""

            hostId = p[0]
            name = p[1]

            if len(p) > 2:
                sprocNr = p[2]
            else:
                sprocNr = None

            data = sprocdata.getSingleSprocData( name, hostId , None, sprocNr)

            for p in data['total_time']:
                    graphtime.addPoint('runtime', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['calls']:
                    graphcalls.addPoint('calls', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['avg_time']:
                    graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['avg_self_time']:
                    graphavgself.addPoint('avgself', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            table = tplE.env.get_template('sproc_detail.html')

            return table.render(hostid=int(hostId),
                                hostname=hosts.getHostData()[int(hostId)]['settings']['uiLongName'],
                                name = data['name'],
                                graphavg=graphavg.render(),
                                graphselfavg=graphavgself.render(),
                                graphcalls=graphcalls.render(),
                                graphruntime=graphtime.render())
Пример #15
0
def setup(settings=None):
    global env, _settings

    if env != None:
        return

    if settings == None:
        _settings = {"tags": False, "sizeoverview": False, "logfiles": False}
    else:
        _settings = settings['features']

    env = Environment(loader=FileSystemLoader('templates'))

    env.globals['hosts'] = hosts.getHostData()
    env.globals['settings'] = _settings
    hl = sorted(env.globals['hosts'].values(),
                key=lambda h: h['settings']['uiShortName'])

    gs = {}
    hlf = []
    for h in hl:
        if h['host_group_id'] == None:
            hlf.append(h)
        else:
            if h['host_group_id'] in gs:
                continue
            else:
                gs[h['host_group_id']] = True
                hlf.append(h)

    env.globals['hostlist'] = hlf

    groups = {}

    for h in hosts.getHosts().values():
        if h['host_group_id'] > 0:
            if not (h['host_group_id'] in groups):
                groups[h['host_group_id']] = []

            groups[h['host_group_id']].append(h)

    for g in groups.keys():
        groups[g] = sorted(groups[g],
                           key=lambda x: x['settings']['uiShortName'])

    env.globals['hostgroups'] = groups
    env.globals['groups'] = hosts.getGroups()
Пример #16
0
    def index(self):

        size = tabledata.getDatabaseSizes()

        systems = []

        hs = hosts.getHostData().values()

        for h in hs:
            g = flotgraph.SizeGraph("s" + str(h["host_id"]))
            tabledata.fillGraph(g, size[h["host_id"]])

            s = self.renderSizeTable(h["host_id"])
            systems.append({"id": "s" + str(h["host_id"]), "t": s, "g": g.render(), "h": h})

        tmpl = tplE.env.get_template("tables.html")
        return tmpl.render(systems=sorted(systems, key=lambda x: x["h"]["settings"]["uiShortName"]), target="World")
Пример #17
0
def setup(settings = None):
    global env, _settings
    hosts.resetHostsAndGroups()

    if settings == None:
        _settings = {"show_load":True, "show_wal":True, "show_top_sprocs":True, "show_db_size":True, "show_db_stats":True}
    else:
        _settings = settings['features']

    if env is None:
        env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))

    env.globals['hosts'] = hosts.getHostData()
    env.globals['hosts_json'] = json.dumps(env.globals['hosts'])
    env.globals['settings'] = _settings
    hl = sorted( env.globals['hosts'].values() , key = lambda h : h['uishortname'] )

    gs = {}
    hlf = []
    for h in hl:
        if h['host_group_id'] == None:
            hlf.append(h)
        else:
            if h['host_group_id'] in gs:
                continue
            else:
                gs[h['host_group_id']] = True
                hlf.append(h)

    env.globals['hostlist'] = hlf

    groups = {}

    for h in hosts.getHosts().values():
        if not (h['host_group_id'] in groups):
            groups[h['host_group_id']] = []

        groups[h['host_group_id']].append(h)

    for g in groups.keys(): # TODO remove?
        groups[g] = sorted(groups[g], key = lambda x : x['uishortname'])

    env.globals['hostgroups'] = groups
    env.globals['groups'] = hosts.getGroups()
    env.globals['groups_json'] = json.dumps(hosts.getGroups())
Пример #18
0
    def index(self):

        size = tabledata.getDatabaseSizes()

        systems = []

        hs = hosts.getHostData().values()

        for h in hs:
            g = flotgraph.SizeGraph("s" + str(h['host_id']))
            tabledata.fillGraph(g,size[h['host_id']])

            s = self.renderSizeTable(h['host_id'])
            systems.append({ 'id' : "s"+str(h['host_id']) , 't' : s , 'g' : g.render() , 'h' : h })

        tmpl = tplE.env.get_template('tables.html')
        return tmpl.render(systems=sorted(systems,key=lambda x : x['h']['settings']['uiShortName']),
                           target='World')
Пример #19
0
    def all(self, hostId, active_days=4, graph=False):
        hostId, hostUiName = hosts.ensureHostIdAndUIShortname(hostId)
        graph_list = []
        all_sprocs = None

        if not graph:
            all_sprocs = sprocdata.getAllActiveSprocNames(hostId, active_days)
        else:
            sprocs = self.get_data(hostId)

            i = 0
            for s in sprocs:
                d = sprocdata.getSingleSprocData(
                    hostId, s,
                    "('now'::timestamp - '{} days'::interval)".format(
                        active_days))
                i += 1

                graph = flotgraph.TimeGraph("graph" + str(i))
                graph.addSeries('Avg.', 'avg')

                for p in d['avg_time']:
                    graph.addPoint('avg',
                                   int(time.mktime(p[0].timetuple()) * 1000),
                                   p[1])

                graph_list.append({
                    'graph': graph.render(),
                    'name': s[0:s.find("(")],
                    'i': i
                })

        tpl = tplE.env.get_template('all_sprocs.html')
        return tpl.render(
            graphs=graph_list,
            hostuiname=hostUiName,
            hostname=hosts.getHostData()[int(hostId)]['uilongname'],
            active_days=active_days,
            all_sprocs=all_sprocs)
Пример #20
0
    def index(self):

        size = tabledata.getDatabaseSizes()

        systems = []

        hs = hosts.getHostData().values()

        for h in hs:
            g = flotgraph.SizeGraph("s" + str(h['host_id']))
            tabledata.fillGraph(g, size[h['host_id']])

            s = self.renderSizeTable(h['host_id'])
            systems.append({
                'id': "s" + str(h['host_id']),
                't': s,
                'g': g.render(),
                'h': h
            })

        tmpl = tplE.env.get_template('tables.html')
        return tmpl.render(systems=sorted(
            systems, key=lambda x: x['h']['settings']['uiShortName']),
                           target='World')
Пример #21
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c',
                        '--config',
                        help='Path to config file. (default: %s)' %
                        DEFAULT_CONF_FILE,
                        dest="config",
                        default=DEFAULT_CONF_FILE)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    if not os.path.exists(args.config):
        print 'Configuration file missing:', args.config
        parser.print_help()
        return

    with open(args.config, 'rb') as fd:
        settings = json.load(fd)

    conn_string = ' '.join(
        ("dbname=" + settings['database']['name'],
         "host=" + settings['database']['host'],
         "user="******"password="******"port=" + str(settings['database']['port'])))

    print "Setting connection string to ... " + conn_string

    DataDB.setConnectionString(conn_string)

    if 'logfiles' in settings:
        logdata.setFilter(settings['logfiles']['liveuserfilter'])

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = ({
        'global': {
            'server.socket_host': '0.0.0.0',
            'server.socket_port': int(settings['frontend']['port'])
        },
        '/': {
            'tools.staticdir.root': current_dir
        },
        '/static': {
            'tools.staticdir.dir': 'static',
            'tools.staticdir.on': True
        }
    })

    tplE.setup(settings)

    root = None

    for h in hosts.getHostData().values():
        mf = MonitorFrontend.MonitorFrontend(h['host_id'])

        if root == None:
            root = mf

        setattr(root, h['settings']['uiShortName'].lower().replace('-', ''),
                mf)

    root.report = report.Report()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()

    cherrypy.quickstart(root, config=conf)
Пример #22
0
def main():
    parser = ArgumentParser(description="PGObserver Frontend")
    parser.add_argument(
        "-c",
        "--config",
        help="Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
        Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]",
    )
    parser.add_argument(
        "--s3-config-path",
        help="Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var",
        metavar="https://s3-region.amazonaws.com/x/y/file.yaml",
        default=os.getenv("PGOBS_CONFIG_S3_BUCKET"),
    )
    parser.add_argument("-p", "--port", help="Web server port. Overrides value from config file", type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.s3_config_path:  # S3 has precedence if specified
        import aws_s3_configreader

        settings = aws_s3_configreader.get_config_as_dict_from_s3_file(args.s3_config_path)
    elif args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print "WARNING. Config file {} not found! exiting...".format(args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, "rb") as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings["database"]["host"] = os.getenv("PGOBS_HOST") or settings["database"].get("host")
    settings["database"]["port"] = os.getenv("PGOBS_PORT") or settings["database"].get("port") or 5432
    settings["database"]["name"] = os.getenv("PGOBS_DATABASE") or settings["database"].get("name")
    settings["database"]["frontend_user"] = os.getenv("PGOBS_USER") or settings["database"].get("frontend_user")
    settings["database"]["password"] = os.getenv("PGOBS_PASSWORD") or settings["database"].get("frontend_password")

    if not (
        settings["database"].get("host")
        and settings["database"].get("name")
        and settings["database"].get("frontend_user")
    ):
        print "Mandatory datastore connect details missing!"
        print "Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]"
        print ""
        parser.print_help()
        return

    conn_string = " ".join(
        (
            "dbname=" + settings["database"]["name"],
            "host=" + settings["database"]["host"],
            "user="******"database"]["frontend_user"],
            "port=" + str(settings["database"]["port"]),
        )
    )
    print "Setting connection string to ... " + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + " password="******"database"]["frontend_password"]

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        "global": {
            "server.socket_host": "0.0.0.0",
            "server.socket_port": args.port or settings.get("frontend", {}).get("port") or 8080,
        },
        "/": {"tools.staticdir.root": current_dir},
        "/healthcheck": {"tools.sessions.on": False},
        "/static": {"tools.staticdir.dir": "static", "tools.staticdir.on": True, "tools.sessions.on": False},
        "/manifest.info": {
            "tools.staticfile.on": True,
            "tools.staticfile.filename": os.path.join(current_dir, "..", "MANIFEST.MF"),
            "tools.auth_basic.on": False,
            "tools.sessions.on": False,
        },
    }

    tplE.setup(settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    for h in hosts.getHostData().values():
        mf = monitorfrontend.MonitorFrontend(h["host_id"])

        setattr(root, h["uishortname"], mf)
        setattr(root, str(h["host_id"]), mf)  # allowing host_id's for backwards comp

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(root)  # JSON api exposure, enabling integration with other monitoring tools
    root.healthcheck = Healthcheck()

    if settings.get("oauth", {}).get("enable_oauth", False):
        print "switching on oauth ..."
        import oauth

        root.oauth = oauth.Oauth(settings["oauth"])
        cherrypy.config.update(
            {
                "tools.oauthtool.on": True,
                "tools.sessions.on": True,
                "tools.sessions.timeout": settings["oauth"].get("session_timeout", 43200),
            }
        )

    cherrypy.quickstart(root, config=conf)
Пример #23
0
    def default(self, *p, **params):
        if len(p) < 2:
            return ""

        host = p[0]
        name = p[1]

        if 'interval' in params:
            interval = {}
            interval['interval'] = str(params['interval'])+' days'
        elif 'from' in params and 'to' in params:
            interval = {}
            interval['from'] = params['from']
            interval['to'] = params['to']
        else:
            interval = {}
            interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d')
            interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        data = tabledata.getTableData(host, name, interval)

        graph_table_size = flotgraph.SizeGraph ("graphtablesize","right")
        graph_table_size.addSeries("Table Size","size")
        for p in data['table_size']:
            graph_table_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_index_size = flotgraph.SizeGraph ("graphindexsize","right")
        graph_index_size.addSeries("Index Size", "size")
        for p in data['index_size']:
            graph_index_size.addPoint("size", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_seq_scans = flotgraph.Graph ("graphseqscans","right")
        graph_seq_scans.addSeries("Sequential Scans","count")
        for p in data['seq_scans']:
            graph_seq_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_index_scans = flotgraph.Graph ("graphindexscans","right")
        graph_index_scans.addSeries("Index Scans","count")
        for p in data['index_scans']:
            graph_index_scans.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_t_ins = flotgraph.Graph ("gtupins","right")
        graph_t_ins.addSeries("Inserts","count",'#FF0000')
        for p in data['ins']:
            graph_t_ins.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_t_upd = flotgraph.Graph ("gtupupd","right")
        graph_t_upd.addSeries("Updates","count",'#FF8800')
        graph_t_upd.addSeries("Hot Updates","hotcount",'#885500')
        for p in data['upd']:
            graph_t_upd.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        for p in data['hot']:
            graph_t_upd.addPoint("hotcount", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_t_del = flotgraph.Graph ("gtupdel","right")
        graph_t_del.addSeries("Deletes","count")
        for p in data['del']:
            graph_t_del.addPoint("count", int(time.mktime(p[0].timetuple()) * 1000) , p[1])


        data = tabledata.getTableIOData(host, name)

        graph_index_iob = flotgraph.Graph ("graphindexiob","right")
        graph_index_iob.addSeries("Index_hit","ihit")
        for p in data['index_hit']:
            graph_index_iob.addPoint("ihit", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_index_iod = flotgraph.Graph ("graphindexiod","right")
        graph_index_iod.addSeries("Index_read","iread",'#FF0000')
        for p in data['index_read']:
            graph_index_iod.addPoint("iread", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_heap_iod = flotgraph.Graph ("graphheapiod","right")
        graph_heap_iod.addSeries("Heap_read","hread",'#FF0000')
        for p in data['heap_read']:
            graph_heap_iod.addPoint("hread", int(time.mktime(p[0].timetuple()) * 1000) , p[1])

        graph_heap_iob = flotgraph.Graph ("graphheapiob","right")
        graph_heap_iob.addSeries("Heap_hit","hhit")
        for p in data['heap_hit']:
            graph_heap_iob.addPoint("hhit", int(time.mktime(p[0].timetuple()) * 1000) , p[1])


        tpl = tplE.env.get_template('table_detail.html')
        return tpl.render(name=name,host=host,interval=interval,
                          hostname = hosts.getHostData()[int(host)]['settings']['uiLongName'],
                          graphtablesize=graph_table_size.render(),
                          graphindexsize=graph_index_size.render(),
                          graphseqscans=graph_seq_scans.render(),
                          graphindexscans=graph_index_scans.render(),

                          graphindexiod=graph_index_iod.render(),
                          graphindexiob=graph_index_iob.render(),
                          graphheapiod=graph_heap_iod.render(),
                          graphheapiob=graph_heap_iob.render(),

                          gtupins=graph_t_ins.render(),
                          gtupupd=graph_t_upd.render(),
                          gtupdel=graph_t_del.render(),

                          target='World')
Пример #24
0
    def default(self, *p, **params):
        if len(p) < 2:
            return ""

        host = p[0]
        name = p[1]

        if 'interval' in params:
            interval = {}
            interval['interval'] = str(params['interval']) + ' days'
        elif 'from' in params and 'to' in params:
            interval = {}
            interval['from'] = params['from']
            interval['to'] = params['to']
        else:
            interval = {}
            interval['from'] = (
                datetime.datetime.now() -
                datetime.timedelta(days=14)).strftime('%Y-%m-%d')
            interval['to'] = (datetime.datetime.now() +
                              datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        data = tabledata.getTableData(host, name, interval)

        graph_table_size = flotgraph.SizeGraph("graphtablesize", "right")
        graph_table_size.addSeries("Table Size", "size")
        for p in data['table_size']:
            graph_table_size.addPoint(
                "size", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        graph_index_size = flotgraph.SizeGraph("graphindexsize", "right")
        graph_index_size.addSeries("Index Size", "size")
        for p in data['index_size']:
            graph_index_size.addPoint(
                "size", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        graph_seq_scans = flotgraph.Graph("graphseqscans", "right")
        graph_seq_scans.addSeries("Sequential Scans", "count")
        for p in data['seq_scans']:
            graph_seq_scans.addPoint("count",
                                     int(time.mktime(p[0].timetuple()) * 1000),
                                     p[1])

        graph_index_scans = flotgraph.Graph("graphindexscans", "right")
        graph_index_scans.addSeries("Index Scans", "count")
        for p in data['index_scans']:
            graph_index_scans.addPoint(
                "count", int(time.mktime(p[0].timetuple()) * 1000), p[1])

        graph_t_ins = flotgraph.Graph("gtupins", "right")
        graph_t_ins.addSeries("Inserts", "count", '#FF0000')
        for p in data['ins']:
            graph_t_ins.addPoint("count",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        graph_t_upd = flotgraph.Graph("gtupupd", "right")
        graph_t_upd.addSeries("Updates", "count", '#FF8800')
        graph_t_upd.addSeries("Hot Updates", "hotcount", '#885500')
        for p in data['upd']:
            graph_t_upd.addPoint("count",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        for p in data['hot']:
            graph_t_upd.addPoint("hotcount",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        graph_t_del = flotgraph.Graph("gtupdel", "right")
        graph_t_del.addSeries("Deletes", "count")
        for p in data['del']:
            graph_t_del.addPoint("count",
                                 int(time.mktime(p[0].timetuple()) * 1000),
                                 p[1])

        data = tabledata.getTableIOData(host, name)

        graph_index_iob = flotgraph.Graph("graphindexiob", "right")
        graph_index_iob.addSeries("Index_hit", "ihit")
        for p in data['index_hit']:
            graph_index_iob.addPoint("ihit",
                                     int(time.mktime(p[0].timetuple()) * 1000),
                                     p[1])

        graph_index_iod = flotgraph.Graph("graphindexiod", "right")
        graph_index_iod.addSeries("Index_read", "iread", '#FF0000')
        for p in data['index_read']:
            graph_index_iod.addPoint("iread",
                                     int(time.mktime(p[0].timetuple()) * 1000),
                                     p[1])

        graph_heap_iod = flotgraph.Graph("graphheapiod", "right")
        graph_heap_iod.addSeries("Heap_read", "hread", '#FF0000')
        for p in data['heap_read']:
            graph_heap_iod.addPoint("hread",
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])

        graph_heap_iob = flotgraph.Graph("graphheapiob", "right")
        graph_heap_iob.addSeries("Heap_hit", "hhit")
        for p in data['heap_hit']:
            graph_heap_iob.addPoint("hhit",
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])

        tpl = tplE.env.get_template('table_detail.html')
        return tpl.render(
            name=name,
            host=host,
            interval=interval,
            hostname=hosts.getHostData()[int(host)]['settings']['uiLongName'],
            graphtablesize=graph_table_size.render(),
            graphindexsize=graph_index_size.render(),
            graphseqscans=graph_seq_scans.render(),
            graphindexscans=graph_index_scans.render(),
            graphindexiod=graph_index_iod.render(),
            graphindexiob=graph_index_iob.render(),
            graphheapiod=graph_heap_iod.render(),
            graphheapiob=graph_heap_iob.render(),
            gtupins=graph_t_ins.render(),
            gtupupd=graph_t_upd.render(),
            gtupdel=graph_t_del.render(),
            target='World')
Пример #25
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument(
        '-c',
        '--config',
        help=
        'Path to yaml config file with datastore connect details. See pgobserver_frontend.example.yaml for a sample file. \
        Certain values can be overridden by ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
    )
    parser.add_argument(
        '--s3-config-path',
        help=
        'Path style S3 URL to a key that holds the config file. Or PGOBS_CONFIG_S3_BUCKET env. var',
        metavar='https://s3-region.amazonaws.com/x/y/file.yaml',
        default=os.getenv('PGOBS_CONFIG_S3_BUCKET'))
    parser.add_argument(
        '-p',
        '--port',
        help='Web server port. Overrides value from config file',
        type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.s3_config_path:  # S3 has precedence if specified
        import aws_s3_configreader
        settings = aws_s3_configreader.get_config_as_dict_from_s3_file(
            args.s3_config_path)
    elif args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print 'WARNING. Config file {} not found! exiting...'.format(
                args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = (os.getenv('PGOBS_HOST')
                                    or settings['database'].get('host'))
    settings['database']['port'] = (os.getenv('PGOBS_PORT')
                                    or settings['database'].get('port')
                                    or 5432)
    settings['database']['name'] = (os.getenv('PGOBS_DATABASE')
                                    or settings['database'].get('name'))
    settings['database']['frontend_user'] = (
        os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
    settings['database']['password'] = (
        os.getenv('PGOBS_PASSWORD')
        or settings['database'].get('frontend_password'))

    if not (settings['database'].get('host')
            and settings['database'].get('name')
            and settings['database'].get('frontend_user')):
        print 'Mandatory datastore connect details missing!'
        print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
        print ''
        parser.print_help()
        return

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))
    print 'Setting connection string to ... ' + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + ' password='******'database'][
        'frontend_password']

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        'global': {
            'server.socket_host':
            '0.0.0.0',
            'server.socket_port':
            args.port or settings.get('frontend', {}).get('port') or 8080
        },
        '/': {
            'tools.staticdir.root': current_dir
        },
        '/healthcheck': {
            'tools.sessions.on': False
        },
        '/static': {
            'tools.staticdir.dir': 'static',
            'tools.staticdir.on': True,
            'tools.sessions.on': False
        },
        '/manifest.info': {
            'tools.staticfile.on':
            True,
            'tools.staticfile.filename':
            os.path.join(current_dir, '..', 'MANIFEST.MF'),
            'tools.auth_basic.on':
            False,
            'tools.sessions.on':
            False
        },
    }

    tplE.setup(
        settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    for h in hosts.getHostData().values():
        mf = monitorfrontend.MonitorFrontend(h['host_id'])

        setattr(root, h['uishortname'], mf)
        setattr(root, str(h['host_id']),
                mf)  # allowing host_id's for backwards comp

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(
        root
    )  # JSON api exposure, enabling integration with other monitoring tools
    root.healthcheck = Healthcheck()

    if settings.get('oauth', {}).get('enable_oauth', False):
        print 'switching on oauth ...'
        import oauth
        root.oauth = oauth.Oauth(settings['oauth'])
        cherrypy.config.update({
            'tools.oauthtool.on':
            True,
            'tools.sessions.on':
            True,
            'tools.sessions.timeout':
            settings['oauth'].get('session_timeout', 43200)
        })

    cherrypy.quickstart(root, config=conf)
Пример #26
0
    def default(self, *p, **params):
        if len(p) == 0:
            return """Error: Not enough URL parameters. Hostname needed"""

        hostId, hostName = hosts.ensureHostIdAndUIShortname(p[0])
        sprocName = None

        if len(p) > 1:
            sprocName = p[1]

        if params.get('search'):
            sprocName = params.get('sproc_search')
            url = '/sprocs/show/' + hostName + '/' + sprocName
            raise cherrypy.HTTPRedirect(cherrypy.url(url))

        interval = {}
        interval['from'] = params.get('from',(datetime.datetime.now() - datetime.timedelta(days=8)).strftime('%Y-%m-%d'))
        interval['to'] = params.get('to',(datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d'))

        graphcalls= flotgraph.Graph("graphcalls")
        graphcalls.addSeries('Number of calls', 'calls')

        graphtime= flotgraph.TimeGraph("graphruntime")
        graphtime.addSeries('Total run time', 'runtime')

        graphavg= flotgraph.TimeGraph("graphavg")
        graphavg.addSeries('Average run time', 'avg')

        graphavgself= flotgraph.TimeGraph("graphselfavg")
        graphavgself.addSeries('Average self time', 'avgself')

        data = sprocdata.getSingleSprocData(hostId, sprocName, interval)
        if data['name']:    # None if no data for sproc found
            for p in data['total_time']:
                graphtime.addPoint('runtime', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['calls']:
                graphcalls.addPoint('calls', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['avg_time']:
                graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000) , p[1])

            for p in data['avg_self_time']:
                graphavgself.addPoint('avgself', int(time.mktime(p[0].timetuple()) * 1000), p[1])

            sproc_name_wo_params = data['name'] if data['name'].find('(') == -1 else data['name'][0:data['name'].find('(')]
            sproc_params = "" if data['name'].find('(') == -1 else data['name'][data['name'].find('(')+1:-1]

        all_sprocs = sprocdata.getAllActiveSprocNames(hostId)

        table = tplE.env.get_template('sproc_detail.html')
        return table.render(hostid = hostId,
                            hostname = hosts.getHostData()[int(hostId)]['uilongname'],
                            hostuiname = hostName,
                            name_w_params = data['name'] ,
                            params = sproc_params if data['name'] else None,
                            name_wo_params = sproc_name_wo_params if data['name'] else None,
                            interval = interval,
                            sproc_name = sprocName,
                            all_sprocs = all_sprocs,
                            all_sprocs_json = json.dumps(all_sprocs),
                            graphavg = graphavg.render(),
                            graphselfavg = graphavgself.render(),
                            graphcalls = graphcalls.render(),
                            graphruntime = graphtime.render())
Пример #27
0
    def default(self, *p, **params):
        if len(p) == 0:
            return """Error: Not enough URL parameters. Hostname needed"""

        hostId, hostName = hosts.ensureHostIdAndUIShortname(p[0])
        sprocName = None

        if len(p) > 1:
            sprocName = p[1]

        if params.get('search'):
            sprocName = params.get('sproc_search')
            url = '/sprocs/show/' + hostName + '/' + sprocName
            raise cherrypy.HTTPRedirect(cherrypy.url(url))

        interval = {}
        interval['from'] = params.get(
            'from', (datetime.datetime.now() -
                     datetime.timedelta(days=8)).strftime('%Y-%m-%d'))
        interval['to'] = params.get(
            'to', (datetime.datetime.now() +
                   datetime.timedelta(days=1)).strftime('%Y-%m-%d'))

        graphcalls = flotgraph.Graph("graphcalls")
        graphcalls.addSeries('Number of calls', 'calls')

        graphtime = flotgraph.TimeGraph("graphruntime")
        graphtime.addSeries('Total run time', 'runtime')

        graphavg = flotgraph.TimeGraph("graphavg")
        graphavg.addSeries('Average run time', 'avg')

        graphavgself = flotgraph.TimeGraph("graphselfavg")
        graphavgself.addSeries('Average self time', 'avgself')

        data = sprocdata.getSingleSprocData(hostId, sprocName, interval)
        if data['name']:  # None if no data for sproc found
            for p in data['total_time']:
                graphtime.addPoint('runtime',
                                   int(time.mktime(p[0].timetuple()) * 1000),
                                   p[1])

            for p in data['calls']:
                graphcalls.addPoint('calls',
                                    int(time.mktime(p[0].timetuple()) * 1000),
                                    p[1])

            for p in data['avg_time']:
                graphavg.addPoint('avg',
                                  int(time.mktime(p[0].timetuple()) * 1000),
                                  p[1])

            for p in data['avg_self_time']:
                graphavgself.addPoint(
                    'avgself', int(time.mktime(p[0].timetuple()) * 1000), p[1])

            sproc_name_wo_params = data['name'] if data['name'].find(
                '(') == -1 else data['name'][0:data['name'].find('(')]
            sproc_params = "" if data['name'].find(
                '(') == -1 else data['name'][data['name'].find('(') + 1:-1]

        all_sprocs = sprocdata.getAllActiveSprocNames(hostId)

        table = tplE.env.get_template('sproc_detail.html')
        return table.render(
            hostid=hostId,
            hostname=hosts.getHostData()[int(hostId)]['uilongname'],
            hostuiname=hostName,
            name_w_params=data['name'],
            params=sproc_params if data['name'] else None,
            name_wo_params=sproc_name_wo_params if data['name'] else None,
            interval=interval,
            sproc_name=sprocName,
            all_sprocs=all_sprocs,
            all_sprocs_json=json.dumps(all_sprocs),
            graphavg=graphavg.render(),
            graphselfavg=graphavgself.render(),
            graphcalls=graphcalls.render(),
            graphruntime=graphtime.render())
Пример #28
0
    def default(self, *p, **params):
        graphcalls = flotgraph.Graph("graphcalls")
        graphcalls.addSeries('Number of calls', 'calls')

        graphtime = flotgraph.TimeGraph("graphruntime")
        graphtime.addSeries('Total run time', 'runtime')

        graphavg = flotgraph.TimeGraph("graphavg")
        graphavg.addSeries('Average run time', 'avg')

        graphavgself = flotgraph.TimeGraph("graphselfavg")
        graphavgself.addSeries('Average self time', 'avgself')

        if (len(p) <= 1):
            return """Error: Not enough URL paramter"""

        hostId = p[0]
        name = p[1]

        if len(p) > 2:
            sprocNr = p[2]
        else:
            sprocNr = None

        if 'from' in params and 'to' in params:
            interval = {}
            interval['from'] = params['from']
            interval['to'] = params['to']
        else:
            interval = {}
            interval['from'] = (
                datetime.datetime.now() -
                datetime.timedelta(days=14)).strftime('%Y-%m-%d')
            interval['to'] = (datetime.datetime.now() +
                              datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        data = sprocdata.getSingleSprocData(name, hostId, interval, sprocNr)

        for p in data['total_time']:
            graphtime.addPoint('runtime',
                               int(time.mktime(p[0].timetuple()) * 1000), p[1])

        for p in data['calls']:
            graphcalls.addPoint('calls',
                                int(time.mktime(p[0].timetuple()) * 1000),
                                p[1])

        for p in data['avg_time']:
            graphavg.addPoint('avg', int(time.mktime(p[0].timetuple()) * 1000),
                              p[1])

        for p in data['avg_self_time']:
            graphavgself.addPoint('avgself',
                                  int(time.mktime(p[0].timetuple()) * 1000),
                                  p[1])

        table = tplE.env.get_template('sproc_detail.html')

        return table.render(hostid=int(hostId),
                            hostname=hosts.getHostData()[int(hostId)]
                            ['settings']['uiLongName'],
                            name=data['name'],
                            interval=interval,
                            graphavg=graphavg.render(),
                            graphselfavg=graphavgself.render(),
                            graphcalls=graphcalls.render(),
                            graphruntime=graphtime.render())
Пример #29
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c', '--config', help='Path to yaml config file with datastore connect details. Default location - {} \
        If not found then ENV vars PGOBS_HOST, PGOBS_DBNAME, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]  will be used'.format(DEFAULT_CONF_FILE),
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('-p', '--port', help='Web server port. Overrides value from config file', dest='port', type=int)

    args = parser.parse_args()

    settings = collections.defaultdict(dict)

    if args.config:
        args.config = os.path.expanduser(args.config)

        if not os.path.exists(args.config):
            print 'WARNING. Config file {} not found! exiting...'.format(args.config)
            return
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = (os.getenv('PGOBS_HOST') or settings['database'].get('host'))
    settings['database']['port'] = (os.getenv('PGOBS_PORT') or settings['database'].get('port') or 5432)
    settings['database']['name'] = (os.getenv('PGOBS_DATABASE') or settings['database'].get('name'))
    settings['database']['frontend_user'] = (os.getenv('PGOBS_USER') or settings['database'].get('frontend_user'))
    settings['database']['password'] = (os.getenv('PGOBS_PASSWORD') or settings['database'].get('frontend_password'))

    if not (settings['database'].get('host') and settings['database'].get('name') and settings['database'].get('frontend_user')):
        print 'Mandatory datastore connect details missing!'
        print 'Check --config input or environment variables: PGOBS_HOST, PGOBS_DATABASE, PGOBS_USER, PGOBS_PASSWORD [, PGOBS_PORT]'
        print ''
        parser.print_help()
        return

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))
    print 'Setting connection string to ... ' + conn_string
    # finished print conn_string to the world, password can be added
    conn_string = conn_string + ' password='******'database']['frontend_password']

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {
        'global': {'server.socket_host': '0.0.0.0', 'server.socket_port': args.port or settings.get('frontend',
                   {}).get('port') or 8080},
        '/': {'tools.staticdir.root': current_dir},
        '/static': {'tools.staticdir.dir': 'static', 'tools.staticdir.on': True, 'tools.sessions.on': False},
        '/manifest.info': {'tools.staticfile.on': True, 'tools.staticfile.filename': os.path.join(current_dir, '..',
                           'MANIFEST.MF'), 'tools.auth_basic.on': False, 'tools.sessions.on': False},
    }

    tplE.setup(settings)  # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    for h in hosts.getHostData().values():
        mf = monitorfrontend.MonitorFrontend(h['host_id'])

        setattr(root, h['uishortname'], mf)
        setattr(root, str(h['host_id']), mf)  # allowing host_id's for backwards comp

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(root)  # JSON api exposure, enabling integration with other monitoring tools

    if settings.get('oauth', {}).get('enable_oauth', False):
        print 'switching on oauth ...'
        import oauth
        root.oauth = oauth.Oauth(settings['oauth'])
        cherrypy.config.update({'tools.oauthtool.on': True, 'tools.sessions.on': True,
                                      'tools.sessions.timeout': settings['oauth'].get('session_timeout', 43200)})

    cherrypy.quickstart(root, config=conf)
Пример #30
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c', '--config', help='Path to config file. (default: %s)'.format(DEFAULT_CONF_FILE), dest='config',
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('-p', '--port', help='server port', dest='port', type=int)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    settings = None
    if os.path.exists(args.config):
        print "trying to read config file from {}".format(args.config)
        with open(args.config, 'rb') as fd:
            settings = yaml.load(fd)

    if settings is None:
        print 'Config file missing - Yaml file could not be found'
        parser.print_help()
        return

    # Make env vars overwrite yaml file, to run via docker without changing config file
    settings['database']['host'] = os.getenv('PGOBS_HOST', settings['database']['host'])
    settings['database']['port'] = os.getenv('PGOBS_PORT', settings['database']['port'])
    settings['database']['name'] = os.getenv('PGOBS_DATABASE', settings['database']['name'])
    settings['database']['frontend_user'] = os.getenv('PGOBS_USER', settings['database']['frontend_user'])
    settings['database']['frontend_password'] = os.getenv('PGOBS_PASSWORD', settings['database']['frontend_password'])

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'port=' + str(settings['database']['port']),
    ))

    print 'Setting connection string to ... ' + conn_string

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    datadb.setConnectionString(conn_string)

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {'global':
                {
                    'server.socket_host': '0.0.0.0',
                    'server.socket_port': args.port or settings.get('frontend', {}).get('port') or 8080
                },
            '/':
                {
                    'tools.staticdir.root': current_dir
                },
            '/static':
                {
                    'tools.staticdir.dir': 'static',
                    'tools.staticdir.on': True
                },
            '/manifest.info':
                {
                    'tools.staticfile.on': True,
                    'tools.staticfile.filename': os.path.join(current_dir, '..', 'MANIFEST.MF'),
                    'tools.auth_basic.on': False
                }

            }

    tplE.setup(settings)    # setup of global variables and host data for usage in views

    root = welcomefrontend.WelcomeFrontend()

    for h in hosts.getHostData().values():
        mf = monitorfrontend.MonitorFrontend(h['host_id'])

        setattr(root, h['uishortname'], mf)
        setattr(root, str(h['host_id']), mf) # allowing host_id's for backwards comp

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.perfschemas = performance.PerfUnusedSchemas()
    root.perflocks = performance.PerfLocksReport()
    root.perfstatstatements = performance.PerfStatStatementsReport()
    root.perfbloat = performance.PerfBloat()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()
    root.indexes = indexesfrontend.IndexesFrontend()
    root.hosts = hostsfrontend.HostsFrontend()
    root.api = api.Root(root)   # JSON api exposure, enabling integration with other monitoring tools

    cherrypy.quickstart(root, config=conf)