Example #1
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c', '--config', help='Path to config file. (default: %s)' % DEFAULT_CONF_FILE, dest='config',
                        default=DEFAULT_CONF_FILE)
    parser.add_argument('-p', '--port', help='server port', dest='port', type=int)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    if not os.path.exists(args.config):
        print 'Configuration file missing:', args.config
        parser.print_help()
        return

    with open(args.config, 'rb') as fd:
        settings = json.load(fd)

    conn_string = ' '.join((
        'dbname=' + settings['database']['name'],
        'host=' + settings['database']['host'],
        'user='******'database']['frontend_user'],
        'password='******'database']['frontend_password'],
        'port=' + str(settings['database']['port']),
    ))

    print 'Setting connection string to ... ' + conn_string

    DataDB.setConnectionString(conn_string)

    if 'logfiles' in settings:
        logdata.setFilter(settings['logfiles']['liveuserfilter'])

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = {'global': {'server.socket_host': '0.0.0.0', 'server.socket_port': args.port or settings.get('frontend',
            {}).get('port') or 8080}, '/': {'tools.staticdir.root': current_dir},
            '/static': {'tools.staticdir.dir': 'static', 'tools.staticdir.on': True}}

    tplE.setup(settings)

    root = None

    for h in hosts.getHostData().values():
        mf = MonitorFrontend.MonitorFrontend(h['host_id'])

        if root == None:
            root = mf

        setattr(root, h['uishortname'], mf)

    root.report = report.Report()
    root.export = export.Export()
    root.perftables = performance.PerfTables()
    root.perfapi = performance.PerfApi()
    root.perfindexes = performance.PerfIndexes()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()

    cherrypy.quickstart(root, config=conf)
Example #2
0
def getSingleSprocData(name, hostId=1, interval=None, sprocNr=None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleSprocSQL(name, hostId, interval, sprocNr))

    data = {
        'calls': [],
        'self_time': [],
        'total_time': [],
        'avg_time': [],
        'avg_self_time': [],
        'name': name
    }

    for r in cur:
        data['calls'].append((r['xaxis'], r['d_calls']))
        data['total_time'].append((r['xaxis'], r['d_total_time']))
        data['self_time'].append((r['xaxis'], r['d_self_time']))
        data['avg_time'].append((r['xaxis'], r['d_avg_time']))
        data['avg_self_time'].append((r['xaxis'], r['d_avg_self_time']))

    cur.close()
    DataDB.closeDataConnection(conn)

    return data
Example #3
0
def getSprocDataByTags():
    sql = """select tm_tag_id , sum("yaxis") AS "yaxis_t" , sum("yaxis2") AS "yaxis_c", "xaxis"  from (
 select group_date(sp_timestamp,30) as "xaxis",
        sp_sproc_id,
        max(sp_self_time) - min(sp_self_time) as "yaxis",
        max(sp_calls) - min(sp_calls) as "yaxis2"
   from monitor_data.sproc_performance_data
  where sp_timestamp > 'now'::timestamp - '9 days'::interval
  group by sp_sproc_id , group_date(sp_timestamp,30) ) data,
  monitor_data.sprocs,
  monitor_data.tag_members
  where sprocs.sproc_id = sp_sproc_id
    and tm_sproc_name = sproc_name
    and tm_schema = get_noversion_name(sproc_schema)
  group by tm_tag_id , "xaxis" order by 4 asc;"""


    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)

    data = collections.defaultdict(list)

    for r in cur:
        data[r['tm_tag_id']].append((r['xaxis'], r['yaxis_t'], r['yaxis_c']))

    cur.close()
    DataDB.closeDataConnection(conn)

    return data
Example #4
0
def getSprocDataByTags():
    sql = """select tm_tag_id , sum("yaxis") AS "yaxis_t" , sum("yaxis2") AS "yaxis_c", "xaxis"  from (
 select group_date(sp_timestamp,30) as "xaxis",
        sp_sproc_id,
        max(sp_self_time) - min(sp_self_time) as "yaxis",
        max(sp_calls) - min(sp_calls) as "yaxis2"
   from monitor_data.sproc_performance_data
  where sp_timestamp > 'now'::timestamp - '9 days'::interval
  group by sp_sproc_id , group_date(sp_timestamp,30) ) data,
  monitor_data.sprocs,
  monitor_data.tag_members
  where sprocs.sproc_id = sp_sproc_id
    and tm_sproc_name = sproc_name
    and tm_schema = get_noversion_name(sproc_schema)
  group by tm_tag_id , "xaxis" order by 4 asc;"""

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)

    data = collections.defaultdict(list)

    for r in cur:
        data[r['tm_tag_id']].append((r['xaxis'], r['yaxis_t'], r['yaxis_c']))

    cur.close()
    DataDB.closeDataConnection(conn)

    return data
Example #5
0
def main():
    parser = ArgumentParser(description = 'PGObserver Frontend')
    parser.add_argument('-c', '--config', help = 'Path to config file. (default: %s)' % DEFAULT_CONF_FILE, dest="config" , default = DEFAULT_CONF_FILE)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    if not os.path.exists(args.config):
        print 'Configuration file missing:', args.config
        parser.print_help()
        return

    with open(args.config, 'rb') as fd:
        settings = json.load(fd)

    conn_string = ' '.join( ( "dbname=" + settings['database']['name'],
                              "host="+settings['database']['host'],
                              "user="******"password="******"port="+ str(settings['database']['port']) ) )

    print "Setting connection string to ... " + conn_string 

    DataDB.setConnectionString ( conn_string )

    if 'logfiles' in settings:
        logdata.setFilter( settings['logfiles']['liveuserfilter'] )

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = ( { 'global': { 'server.socket_host': '0.0.0.0',
                           'server.socket_port': int(settings['frontend']['port']) } ,
               '/' :     {'tools.staticdir.root' : current_dir },
               '/static' : {'tools.staticdir.dir' : 'static' ,
                            'tools.staticdir.on' : True } } )

    tplE.setup( settings )

    root = None

    for h in hosts.getHostData().values():
        mf = MonitorFrontend.MonitorFrontend(h['host_id'])

        if root == None:
            root = mf

        setattr(root , h['settings']['uiShortName'].lower().replace('-','') , mf)

    root.report = report.Report()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()

    cherrypy.quickstart(root,config=conf)
Example #6
0
def getTableData(host, name, interval = None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableSql(host,name,interval))

    d = { 'table_size' : [], 'index_size' : [], 'seq_scans': [], 'index_scans' : [], 'ins':[], 'upd':[], 'del':[], 'hot':[] }

    last_is = None
    last_ss = None

    last_ins = None
    last_del = None
    last_upd = None
    last_hot = None
    last_timestamp = 0

    for r in cur:
        d['table_size'].append ( ( r['tsd_timestamp'] , r['tsd_table_size'] ) )
        d['index_size'].append ( ( r['tsd_timestamp'] , r['tsd_index_size'] ) )

        if int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000) - last_timestamp <= ( 15*60*1000 ):
            if last_ss != None:
                d['seq_scans'].append  ( ( r['tsd_timestamp'] , r['tsd_seq_scans']-last_ss ) )

            if last_is != None:
                d['index_scans'].append( ( r['tsd_timestamp'] , r['tsd_index_scans'] - last_is ) )

            if last_ins != None and last_ins != 0:
                d['ins'].append( ( r['tsd_timestamp'] , r['tsd_tup_ins'] - last_ins ) )

            if last_del != None and last_del != 0:
                d['del'].append( ( r['tsd_timestamp'] , r['tsd_tup_del'] - last_del ) )

            if last_upd != None and last_upd != 0:
                d['upd'].append( ( r['tsd_timestamp'] , r['tsd_tup_upd'] - last_upd ) )

            if last_hot != None and last_hot != 0:
                d['hot'].append( ( r['tsd_timestamp'] , r['tsd_tup_hot_upd'] - last_hot ) )

        last_is = r['tsd_index_scans']
        last_ss = r['tsd_seq_scans']

        last_ins = r['tsd_tup_ins']
        last_del = r['tsd_tup_del']
        last_upd = r['tsd_tup_upd']
        last_hot = r['tsd_tup_hot_upd']

        last_timestamp = int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000)

    cur.close()
    DataDB.closeDataConnection(conn)

    return d
Example #7
0
def getTableData(host, name, interval = None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableSql(host,name,interval))

    d = { 'table_size' : [], 'index_size' : [], 'seq_scans': [], 'index_scans' : [], 'ins':[], 'upd':[], 'del':[], 'hot':[] }

    last_is = None
    last_ss = None

    last_ins = None
    last_del = None
    last_upd = None
    last_hot = None
    last_timestamp = 0

    for r in cur:
        d['table_size'].append ( ( r['tsd_timestamp'] , r['tsd_table_size'] ) )
        d['index_size'].append ( ( r['tsd_timestamp'] , r['tsd_index_size'] ) )

        if int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000) - last_timestamp <= ( 15*60*1000 ):
            if last_ss != None:
                d['seq_scans'].append  ( ( r['tsd_timestamp'] , r['tsd_seq_scans']-last_ss ) )

            if last_is != None:
                d['index_scans'].append( ( r['tsd_timestamp'] , r['tsd_index_scans'] - last_is ) )

            if last_ins != None and last_ins != 0:
                d['ins'].append( ( r['tsd_timestamp'] , r['tsd_tup_ins'] - last_ins ) )

            if last_del != None and last_del != 0:
                d['del'].append( ( r['tsd_timestamp'] , r['tsd_tup_del'] - last_del ) )

            if last_upd != None and last_upd != 0:
                d['upd'].append( ( r['tsd_timestamp'] , r['tsd_tup_upd'] - last_upd ) )

            if last_hot != None and last_hot != 0:
                d['hot'].append( ( r['tsd_timestamp'] , r['tsd_tup_hot_upd'] - last_hot ) )

        last_is = r['tsd_index_scans']
        last_ss = r['tsd_seq_scans']

        last_ins = r['tsd_tup_ins']
        last_del = r['tsd_tup_del']
        last_upd = r['tsd_tup_upd']
        last_hot = r['tsd_tup_hot_upd']

        last_timestamp = int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000)

    cur.close()
    DataDB.closeDataConnection(conn)

    return d
Example #8
0
def getTop10Interval(order=avgRuntimeOrder,interval=None,hostId = 1, limit = 10):

    sql = """select regexp_replace("name", E'(\\\\(.*\\\\))','()') AS "name",
                    round( sum(d_calls) , 0 ) AS "calls",
                    round( sum(d_total_time) , 0 ) AS "totalTime",
                    round( sum(d_total_time) / sum(d_calls) , 0 ) AS "avgTime"
               from ( """ + getSQL(interval, hostId) + """) tt
              where d_calls > 0
              group by "name"
              order by """+order+"""  limit """ + str(adapt(limit))

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)

    sprocs = []

    for record in cur:
        record['avgTime'] = makeTimeIntervalReadable(record['avgTime'])
        record['totalTime'] = makeTimeIntervalReadable(record['totalTime'])
        sprocs.append(record)

    conn.close()

    return sprocs
Example #9
0
def getTop10Interval(order=avgRuntimeOrder,interval=None,hostId = 1, limit = 10):

    sql = """select regexp_replace("name", E'(\\\\(.*\\\\))','()') AS "name",
                    round( sum(d_calls) , 0 ) AS "calls",
                    round( sum(d_total_time) , 0 ) AS "totalTime",
                    round( sum(d_total_time) / sum(d_calls) , 0 ) AS "avgTime"
               from ( """ + getSQL(interval, hostId) + """) tt
              where d_calls > 0
              group by "name"
              order by """+order+"""  limit """ + str(adapt(limit))

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)

    sprocs = []

    for record in cur:
        record['avgTime'] = makeTimeIntervalReadable(record['avgTime'])
        record['totalTime'] = makeTimeIntervalReadable(record['totalTime'])
        sprocs.append(record)

    conn.close()

    return sprocs
Example #10
0
def getTablePerformanceIssues(hostname, date_from, date_to):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute(
        """select * from monitor_data.get_table_threshold_sinners_for_period(%s,%s,%s)""",
        (hostname, date_from, date_to))
    data = []  # cur.fetchall()
    for r in cur:
        row = {
            'host_name': r['host_name'],
            'host_id': r['host_id'],
            'schema_name': r['schema_name'],
            'table_name': r['table_name'],
            'day': r['day'],
            'scan_change_pct': r['scan_change_pct'],
            'scans1': r['scans1'],
            'scans2': r['scans2'],
            'size1': r['size1'],
            'size2': r['size2'],
            'size_change_pct': r['size_change_pct'],
            'allowed_seq_scan_pct': r['allowed_seq_scan_pct'],
        }
        data.append(row)
    cur.close()
    conn.close()
    return data
Example #11
0
 def topsprocsbyruntime(self):
     q = """
         select
             host_db_export_name as db,
             sproc_name,
             total_runtime
         from (
             select
                 *,
                 row_number() over(partition by host_db_export_name order by total_runtime desc)
             from (
                 select
                     host_db_export_name,
                     substring(sproc_name, 1, position ('(' in sproc_name)-1) as sproc_name,
                     max(sp_total_time)-min(sp_total_time) as total_runtime
                 from sprocs
                 join sproc_performance_data on sp_sproc_id = sproc_id
                 join hosts on host_id = sproc_host_id 
                 where sp_timestamp > now() - '7days'::interval
                 and host_db_export_name is not null
                 group by 1, 2
             ) a
         ) b
         where row_number <= 10
         order by host_db_export_name, total_runtime desc
     """
     topbyruntime = DataDB.execute(q)
     retdict=defaultdict(list)
     for r in topbyruntime:
         retdict[r['db']].append(r['sproc_name'])
     return json.dumps(retdict)
Example #12
0
def getTableIOData(host, name, interval=None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableIOSql(host, name, interval))

    d = {'heap_read': [], 'heap_hit': [], 'index_read': [], 'index_hit': []}

    last_hr = None
    last_hh = None
    last_ir = None
    last_ih = None
    last_timestamp = 0

    for r in cur:

        if int(time.mktime(r['tio_timestamp'].timetuple()) *
               1000) - last_timestamp <= (15 * 60 * 1000):
            if last_hr != None:
                d['heap_read'].append(
                    (r['tio_timestamp'], r['tio_heap_read'] - last_hr))

            if last_hh != None:
                d['heap_hit'].append(
                    (r['tio_timestamp'], r['tio_heap_hit'] - last_hh))

            if last_ir != None:
                d['index_read'].append(
                    (r['tio_timestamp'], r['tio_idx_read'] - last_ir))

            if last_ih != None:
                d['index_hit'].append(
                    (r['tio_timestamp'], r['tio_idx_hit'] - last_ih))

        last_hr = r['tio_heap_read']
        last_hh = r['tio_heap_hit']
        last_ir = r['tio_idx_read']
        last_ih = r['tio_idx_hit']

        last_timestamp = int(
            time.mktime(r['tio_timestamp'].timetuple()) * 1000)

    cur.close()
    DataDB.closeDataConnection(conn)

    return d
Example #13
0
def getLoadReportData():
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute("""select load_host_id AS id,
                          extract(week from load_timestamp)::text AS kw,
                          round(avg(load_1min_value)/100,2) AS avg,
                          round(max(load_1min_value)/100,2) AS max,
                          to_char(min(load_timestamp::date),'dd.mm.YYYY') AS min_date,
                          to_char(max(load_timestamp::date),'dd.mm.YYYY') AS max_date,
                          min(load_timestamp::date) AS sort_date
                     from monitor_data.host_load , monitor_data.hosts
                    where host_id = load_host_id
                      and host_enabled
                      and load_timestamp > ('now'::timestamp - '9 weeks'::interval)
                      and extract(dow from load_timestamp) IN(1,2,3,4,5)
                    group by load_host_id, extract(week from load_timestamp)
                    order by 1 ASC,7 DESC""")

    data = defaultdict(list)

    lastAvg = None
    lastMax = None
    lastId = 0

    lastRR = None

    for r in cur:

        rr = {'id' : r['id'],
              'avg' : r['avg'],
              'max' : r['max'],
              'min_date' : r['min_date'],
              'max_date' : r['max_date'],
              'trendAvg': 0,
              'trendMax': 0,
              'kw' : r['kw']
              }

        if lastRR != None and lastRR['id']==rr['id']:
            if lastRR['max'] < r['max']:
                lastRR['trendMax'] = -1
            elif lastRR['max'] > r['max']:
                lastRR['trendMax'] = 1

            if lastRR['avg'] < r['avg']:
                lastRR['trendAvg'] = -1
            elif lastRR['avg'] > r['avg']:
                lastRR['trendAvg'] = 1

        data[int(r['id'])].append(rr);
        lastRR = rr

    cur.close()
    conn.close()

    return sorted(data.values(), key = lambda x : hosts.hosts[x[0]['id']]['settings']['uiShortName'])
Example #14
0
def getLoadReportData():
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute("""select load_host_id AS id,
                          extract(week from load_timestamp)::text AS kw,
                          round(avg(load_1min_value)/100,2) AS avg,
                          round(max(load_1min_value)/100,2) AS max,
                          to_char(min(load_timestamp::date),'dd.mm.YYYY') AS min_date,
                          to_char(max(load_timestamp::date),'dd.mm.YYYY') AS max_date,
                          min(load_timestamp::date) AS sort_date
                     from monitor_data.host_load , monitor_data.hosts
                    where host_id = load_host_id
                      and host_enabled
                      and load_timestamp > ('now'::timestamp - '9 weeks'::interval)
                      and extract(dow from load_timestamp) IN(1,2,3,4,5)
                    group by load_host_id, extract(week from load_timestamp)
                    order by 1 ASC,7 DESC""")

    data = defaultdict(list)

    lastRR = None

    for r in cur:

        rr = {
            'id': r['id'],
            'avg': r['avg'],
            'max': r['max'],
            'min_date': r['min_date'],
            'max_date': r['max_date'],
            'trendAvg': 0,
            'trendMax': 0,
            'kw': r['kw']
        }

        if lastRR != None and lastRR['id'] == rr['id']:
            if lastRR['max'] < r['max']:
                lastRR['trendMax'] = -1
            elif lastRR['max'] > r['max']:
                lastRR['trendMax'] = 1

            if lastRR['avg'] < r['avg']:
                lastRR['trendAvg'] = -1
            elif lastRR['avg'] > r['avg']:
                lastRR['trendAvg'] = 1

        data[int(r['id'])].append(rr)
        lastRR = rr

    cur.close()
    conn.close()

    return sorted(
        data.values(),
        key=lambda x: hosts.hosts[x[0]['id']]['settings']['uiShortName'])
Example #15
0
def getSingleSprocData(name, hostId=1, interval=None, sprocNr = None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute( getSingleSprocSQL(name, hostId, interval, sprocNr ) )

    data = { 'calls' : [], 'self_time': [], 'total_time' : [] , 'avg_time' : [] , 'avg_self_time': [] , 'name' : name }

    for r in cur:
        data['calls'].append( ( r['xaxis'] , r['d_calls'] ) )
        data['total_time'].append ( ( r['xaxis'] , r['d_total_time'] ) )
        data['self_time'].append ( ( r['xaxis'] , r['d_self_time'] ) )
        data['avg_time'].append ( ( r['xaxis'] , r['d_avg_time'] ) )
        data['avg_self_time'].append ( ( r['xaxis'] , r['d_avg_self_time'] ) )

    cur.close()
    DataDB.closeDataConnection(conn)

    return data
Example #16
0
def load_filter_lines(host_id, _filter=None, interval=None):

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(get_filted_query(host_id, _filter, interval))
    l = []
    for row in cur:
        l.append((row['xaxis'], row['yaxis']))

    return l
Example #17
0
def load_filter_lines(host_id, _filter = None, interval = None):

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(get_filted_query(host_id,_filter,interval))
    l = []
    for row in cur:
        l.append ( ( row['xaxis'], row['yaxis'] ) )

    return l
Example #18
0
def getSprocsOrderedBy( hostId, order = " ORDER BY SUM(delta_total_time) DESC"):
    sql = """SELECT sproc_name
               FROM ( """ + viewSprocs() + """ ) t JOIN monitor_data.sprocs ON sp_sproc_id = sproc_id
               WHERE sproc_host_id = """ + str(hostId) + """
               GROUP BY sproc_name
             """ + order + """;
          """

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    list= []
    cur.execute( sql )

    for r in cur:
        list.append ( r['sproc_name'] )

    cur.close()
    DataDB.closeDataConnection(conn)
    return list
Example #19
0
def getTableIOData(host, name, interval = None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableIOSql(host,name,interval))

    d = { 'heap_read' : [], 'heap_hit' : [], 'index_read' : [], 'index_hit': [] }

    last_hr = None
    last_hh = None
    last_ir = None
    last_ih = None
    last_timestamp = 0

    for r in cur:

        if int(time.mktime(r['tio_timestamp'].timetuple()) * 1000) - last_timestamp <= ( 15*60*1000 ):
            if last_hr != None:
                d['heap_read'].append(( r['tio_timestamp'] , r['tio_heap_read'] - last_hr ))

            if last_hh != None:
                d['heap_hit'].append(( r['tio_timestamp'] , r['tio_heap_hit'] - last_hh ))

            if last_ir != None:
                d['index_read'].append(( r['tio_timestamp'] , r['tio_idx_read'] - last_ir ))

            if last_ih != None:
                d['index_hit'].append(( r['tio_timestamp'] , r['tio_idx_hit'] - last_ih ))

        last_hr = r['tio_heap_read']
        last_hh = r['tio_heap_hit']
        last_ir = r['tio_idx_read']
        last_ih = r['tio_idx_hit']

        last_timestamp = int(time.mktime(r['tio_timestamp'].timetuple()) * 1000)


    cur.close()
    DataDB.closeDataConnection(conn)

    return d
Example #20
0
def getGroupsData():
    conn = DataDB.getDataConnection()
    groups = {}
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor);

    cur.execute("SELECT * FROM monitor_data.host_groups;")
    for g in cur:
        groups [ g['group_id'] ] = g['group_name']

    cur.close();
    conn.close();
    return groups;
Example #21
0
def getSprocsOrderedBy(hostId, order=" ORDER BY SUM(delta_total_time) DESC"):
    sql = """SELECT sproc_name
               FROM ( """ + viewSprocs(
    ) + """ ) t JOIN monitor_data.sprocs ON sp_sproc_id = sproc_id
               WHERE sproc_host_id = """ + str(adapt(hostId)) + """
               GROUP BY sproc_name
             """ + order + """;
          """

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    list = []
    cur.execute(sql)

    for r in cur:
        list.append(r['sproc_name'])

    cur.close()
    DataDB.closeDataConnection(conn)
    return list
Example #22
0
def getHostData():
    conn = DataDB.getDataConnection()
    hosts = {}

    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor);

    cur.execute("SELECT * FROM monitor_data.hosts WHERE host_enabled = true ORDER BY host_id ASC;")
    for r in cur:
        rr = dict(r);
        rr['settings'] = json.loads(rr['host_settings'])
        hosts[rr['host_id']] = rr;

    cur.close();
    conn.close();
    return hosts;
Example #23
0
def getHostData():
    conn = DataDB.getDataConnection()
    hosts = {}

    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor);

    cur.execute("SELECT * FROM monitor_data.hosts WHERE host_enabled = true ORDER BY host_id ASC;")
    for r in cur:
        rr = dict(r)
        rr['settings'] = json.loads(rr['host_settings'])
        rr['uishortname'] = rr['settings']['uiShortName'].lower().replace('-','')
        rr['uilongname'] = rr['settings']['uiLongName']
        hosts[rr['host_id']] = rr

    cur.close()
    conn.close()
    return hosts
Example #24
0
def getCpuLoad(hostId=1):
    load = { "load_15min_avg" : [] , "load_15min_max" : [] }

    sql = """ SELECT date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
                     AVG(load_1min_value) AS load_15min_avg,
                     MAX(load_1min_value) AS load_15min_max
                FROM monitor_data.host_load WHERE load_host_id = """ + str(adapt(hostId)) + """ AND load_timestamp > ('now'::timestamp - '9 days'::interval)
                GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
                ORDER BY 1 ASC """

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)
    for record in cur:
        load['load_15min_avg'].append( (record['load_timestamp'] , round( float(record['load_15min_avg'])/100,2) ) )
        load['load_15min_max'].append( (record['load_timestamp'] , round( float(record['load_15min_max'])/100,2) ) )

    return load
Example #25
0
def getCpuLoad(hostId=1):
    load = { "load_15min_avg" : [] , "load_15min_max" : [] }

    sql = """ SELECT date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
                     AVG(load_1min_value) AS load_15min_avg,
                     MAX(load_1min_value) AS load_15min_max
                FROM monitor_data.host_load WHERE load_host_id = """ + str(adapt(hostId)) + """ AND load_timestamp > ('now'::timestamp - '9 days'::interval)
                GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
                ORDER BY 1 ASC """

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)
    for record in cur:
        load['load_15min_avg'].append( (record['load_timestamp'] , round( float(record['load_15min_avg'])/100,2) ) )
        load['load_15min_max'].append( (record['load_timestamp'] , round( float(record['load_15min_max'])/100,2) ) )

    return load
Example #26
0
def getWalVolumes(hostId=1):
    load = { "wal_15min_growth" : []}

    sql = """
            SELECT 
                date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
                coalesce(max(xlog_location_mb)-min(xlog_location_mb),0)  AS wal_15min_growth
            FROM monitor_data.host_load WHERE load_host_id = """ + str(adapt(hostId)) + """ AND load_timestamp > ('now'::timestamp - '9 days'::interval)
            GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
            ORDER BY 1 ASC
            """

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)
    for record in cur:
        load['wal_15min_growth'].append( (record['load_timestamp'] , record['wal_15min_growth'] ) )

    return load
Example #27
0
def getLoad(hostId=1):
    sql = """select xaxis, sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) / (1*15*60*1000) AS load_15min,
                           sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) / (4*15*60*1000) AS load_1hour
               from ( select xaxis,sum(d_self_time) d_self_time from (""" + getSQL("('now'::timestamp - '9 days'::interval)" ,hostId) + """) dataTabel group by xaxis ) loadTable """

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    load = { 'load_15min' : [], 'load_1hour': [] }

    print ( sql )

    cur.execute(sql)
    lastTime = None
    skip15min=0
    skip1h=0

    for record in cur:
        currentTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)
        if lastTime != None:
            if currentTime - lastTime > ( 15 * 60 * 1000):
                skip15min = 2
                skip1h=5

        if skip15min>0:
            skip15min -= 1
        else:
            load['load_15min'].append((record['xaxis'], round ( record['load_15min'], 2 ) ) )

        if skip1h > 0:
            skip1h -= 1
        else:
            load['load_1hour'].append((record['xaxis'], round ( record['load_1hour'] , 2 )))

        lastTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)

    cur.close()
    conn.close()

    return load
Example #28
0
def getApiPerformanceIssues(hostname, api_from, api_to):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute(
        """select * from monitor_data.get_sproc_threshold_sinners_for_release(%s,%s,%s)""",
        (hostname, api_from, api_to))
    data = []  # cur.fetchall()
    for r in cur:
        row = {
            'host_name':
            r['host_name'],
            'host_id':
            r['host_id'],
            'sproc_schema':
            r['sproc_schema'],
            'sproc_name':
            r['sproc_name'],
            'calltime_change_pct':
            r['calltime_change_pct'],
            'share_on_total_runtime':
            r['share_on_total_runtime'],
            'execution_avg1':
            r['execution_avg1'],
            'execution_avg2':
            r['execution_avg2'],
            'calls1':
            r['calls1'],
            'calls2':
            r['calls2'],
            'callscount_change_pct':
            r['callscount_change_pct'],
            'allowed_runtime_growth_pct':
            r['allowed_runtime_growth_pct'],
            'allowed_share_on_total_runtime_pct':
            r['allowed_share_on_total_runtime_pct'],
        }
        data.append(row)
    cur.close()
    conn.close()
    return data
Example #29
0
def getLoad(hostId=1):
    sql = """select xaxis, sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) / (1*15*60*1000) AS load_15min,
                           sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) / (4*15*60*1000) AS load_1hour
               from ( select xaxis,sum(d_self_time) d_self_time from (""" + getSQL("('now'::timestamp - '9 days'::interval)" ,hostId) + """) dataTabel group by xaxis ) loadTable """

    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    load = { 'load_15min' : [], 'load_1hour': [] }

    cur.execute(sql)
    lastTime = None
    skip15min=0
    skip1h=0

    for record in cur:
        currentTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)
        if lastTime != None:
            if currentTime - lastTime > ( 15 * 60 * 1000):
                skip15min = 2
                skip1h=5

        if skip15min>0:
            skip15min -= 1
        else:
            load['load_15min'].append((record['xaxis'], round ( record['load_15min'], 2 ) ) )

        if skip1h > 0:
            skip1h -= 1
        else:
            load['load_1hour'].append((record['xaxis'], round ( record['load_1hour'] , 2 )))

        lastTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)

    cur.close()
    conn.close()

    return load
Example #30
0
def getTablePerformanceIssues(hostname, date_from, date_to):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute("""select * from monitor_data.get_table_threshold_sinners_for_period(%s,%s,%s)""", (hostname, date_from, date_to))
    data = [] # cur.fetchall()
    for r in cur:
        row = {'host_name' : r['host_name'],
              'host_id' : r['host_id'],
              'schema_name' : r['schema_name'],
              'table_name' : r['table_name'],
              'day' : r['day'],
              'scan_change_pct' : r['scan_change_pct'],
              'scans1': r['scans1'],
              'scans2': r['scans2'],
              'size1': r['size1'],
              'size2': r['size2'],
              'size_change_pct': r['size_change_pct'],
              'allowed_seq_scan_pct': r['allowed_seq_scan_pct'],
              }
        data.append(row)
    cur.close()
    conn.close()
    return data
Example #31
0
def getApiPerformanceIssues(hostname, api_from, api_to):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute("""select * from monitor_data.get_sproc_threshold_sinners_for_release(%s,%s,%s)""", (hostname, api_from, api_to))
    data = [] # cur.fetchall()
    for r in cur:
        row = {'host_name' : r['host_name'],
              'host_id' : r['host_id'],
              'sproc_schema' : r['sproc_schema'],
              'sproc_name' : r['sproc_name'],
              'calltime_change_pct' : r['calltime_change_pct'],
              'share_on_total_runtime' : r['share_on_total_runtime'],
              'execution_avg1': r['execution_avg1'],
              'execution_avg2': r['execution_avg2'],
              'calls1': r['calls1'],
              'calls2': r['calls2'],
              'callscount_change_pct': r['callscount_change_pct'],
              'allowed_runtime_growth_pct': r['allowed_runtime_growth_pct'],
              'allowed_share_on_total_runtime_pct': r['allowed_share_on_total_runtime_pct'],
              }
        data.append(row)
    cur.close()
    conn.close()
    return data
Example #32
0
def getIndexIssues(hostname):
    q_invalid = """
        SELECT
        *,
        CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
                SELECT
                %s as host_name,
                %s as host_id,
                schemaname||'.'||relname AS table_full_name,
                schemaname||'.'||indexrelname AS index_full_name,
                index_size_bytes,
                indexes_size_bytes,
                pg_size_pretty(index_size_bytes) AS index_size,
                pg_size_pretty(indexes_size_bytes) AS indexes_size,
                pg_size_pretty(table_size_bytes) AS table_size,
                sum(index_size_bytes) over () AS total_marked_index_size_bytes
                FROM
                (
                  SELECT quote_ident(schemaname) as schemaname,
                         quote_ident(relname) as relname,
                         quote_ident(indexrelname) as indexrelname,
                         pg_relation_size(i.indexrelid) AS index_size_bytes,
                         pg_indexes_size(i.relid) AS indexes_size_bytes,                 
                         pg_relation_size(i.relid) AS table_size_bytes
                  FROM pg_stat_user_indexes i
                  JOIN pg_index USING(indexrelid) 
                  WHERE NOT indisvalid
                ) a                
        ) b 
        ORDER BY index_size_bytes DESC, index_full_name
    """
    q_unused = """
        SELECT
        *,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
          SELECT
          *,
          pg_size_pretty(index_size_bytes) AS index_size,
          pg_size_pretty(indexes_size_bytes) AS indexes_size,
          pg_size_pretty(table_size_bytes) AS table_size,
          CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
          sum(index_size_bytes) over () AS total_marked_index_size_bytes
          FROM (
          SELECT   %s as host_name,
                   %s as host_id,
                   quote_ident(schemaname)||'.'||quote_ident(relname) AS table_full_name,
                   quote_ident(schemaname)||'.'||quote_ident(indexrelname) AS index_full_name,
                   pg_relation_size(i.indexrelid) as index_size_bytes,
                   pg_indexes_size(i.relid) AS indexes_size_bytes,
                   pg_relation_size(i.relid) AS table_size_bytes,
                   idx_scan AS scans
              FROM pg_stat_user_indexes i 
              JOIN pg_index USING(indexrelid) 
              WHERE NOT indisunique
              AND NOT schemaname LIKE ANY (ARRAY['tmp%%','temp%%'])
          ) a
          WHERE index_size_bytes > %s
          AND scans <= %s          
        ) b
        ORDER BY scans, index_size_bytes DESC
    """
    q_duplicate = """
        SELECT %s AS host_name,
               %s as host_id,
               n.nspname||'.'||ci.relname AS index_full_name,
               n.nspname||'.'||ct.relname AS table_full_name,
               pg_size_pretty(pg_total_relation_size(ct.oid)) AS table_size,
               pg_total_relation_size(ct.oid) AS table_size_bytes,
               n.nspname AS schema_name,
               index_names,
               def,
               count
        FROM (
          select regexp_replace(replace(pg_get_indexdef(i.indexrelid),c.relname,'X'), '^CREATE UNIQUE','CREATE') as def,
                 max(indexrelid) as indexrelid,
                 max(indrelid) as indrelid,
                 count(1),
                 array_agg(relname::text) as index_names
            from pg_index i
            join pg_class c
              on c.oid = i.indexrelid
           where indisvalid
           group 
              by regexp_replace(replace(pg_get_indexdef(i.indexrelid),c.relname,'X'), '^CREATE UNIQUE','CREATE')
          having count(1) > 1
        ) a
          JOIN pg_class ci
            ON ci.oid=a.indexrelid        
          JOIN pg_class ct
            ON ct.oid=a.indrelid
          JOIN pg_namespace n
            ON n.oid=ct.relnamespace
         ORDER
            BY count DESC, table_size_bytes DESC, schema_name, table_full_name
    """
    q_active_hosts="""
        select
            host_id,
            host_name,
            host_user,
            host_password,
            host_db
        from monitor_data.hosts
        where host_enabled
        and (%s = 'all' or host_name=%s)
        """
    q_indexing_thresholds="""select * from monitor_data.perf_indexes_thresholds"""
    data_invalid = []
    data_unused = []
    data_duplicate = []
    data_noconnect = []
    conn=None

    hosts = DataDB.execute(q_active_hosts, (hostname, hostname))      
    indexing_thresholds = DataDB.execute(q_indexing_thresholds)[0]

    for h in hosts:
        try:
            #print ('processing: {}', h)
            conn = psycopg2.connect(host=h['host_name'], dbname=h['host_db'], user=h['host_user'], password=h['host_password'],connect_timeout='3')
            cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
            cur.execute(q_invalid, (h['host_name'], h['host_id']))
            data_invalid += cur.fetchall()
            cur.execute(q_unused, (h['host_name'], h['host_id'], indexing_thresholds['pit_min_size_to_report'], indexing_thresholds['pit_max_scans_to_report']))
            data_unused += cur.fetchall()
            cur.execute(q_duplicate, (h['host_name'], h['host_id']))
            data_duplicate += cur.fetchall()
        except Exception, e:
            print ('ERROR could not connect to {}:{}'.format(h['host_name'], e))
            data_noconnect.append({'host_id':h['host_id'],'host_name': h['host_name']})
        finally:
Example #33
0
def getLoadReportData(hostId=None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    query = """
            with q as (
                select 
                  t_host_id
                , week
                , (select round((sum(tsd_table_size) + sum(tsd_table_size))/10^9::numeric,1)::text
                     from monitor_data.table_size_data
                    where tsd_timestamp = max) as db_size 
                from (
                    select
                        t_host_id
                      , extract(week from tsd_timestamp) as week               
                      , max(tsd_timestamp)
                    from  monitor_data.table_size_data
                        , monitor_data.tables
                    where tsd_timestamp > ('now'::timestamp - '9 weeks'::interval)
                    and tsd_table_id = t_id
                    and (%s is null or t_host_id = %s)
                    group by t_host_id, extract(week from tsd_timestamp)
                ) a
            )
            select 
                  load_host_id AS id,
                  extract(week from load_timestamp)::text AS kw,
                  round(avg(load_1min_value)/100,2) AS avg,
                  round(max(load_1min_value)/100,2) AS max,
                  to_char(min(load_timestamp::date),'dd.mm.YYYY') AS min_date,
                  to_char(max(load_timestamp::date),'dd.mm.YYYY') AS max_date,
                  min(load_timestamp::date) AS sort_date,
                  max(q.db_size) as db_size,
                  round((max(xlog_location_mb) - min(xlog_location_mb)) / 1000.0, 1)  as wal_written
             from monitor_data.host_load
                , monitor_data.hosts
                , q
            where host_id = load_host_id
              and host_enabled
              and load_timestamp > ('now'::timestamp - '9 weeks'::interval)
              and extract(dow from load_timestamp) IN(1,2,3,4,5)                      
              and q.t_host_id = load_host_id
              and q.week = extract(week from load_timestamp)
              and (%s is null or host_id = %s)
            group by load_host_id, extract(week from load_timestamp)
            order by 1 ASC,7 DESC
            """
    cur.execute(query, (hostId,hostId,hostId,hostId))

    data = defaultdict(list)

    lastRR = None

    for r in cur:

        rr = {'id' : r['id'],
              'avg' : r['avg'],
              'max' : r['max'],
              'min_date' : r['min_date'],
              'max_date' : r['max_date'],
              'db_size' : r['db_size'],
              'wal_written' : r['wal_written'],
              'trendAvg': 0,
              'trendMax': 0,
              'kw' : r['kw']
              }

        if lastRR != None and lastRR['id']==rr['id']:
            if lastRR['max'] < r['max']:
                lastRR['trendMax'] = -1
            elif lastRR['max'] > r['max']:
                lastRR['trendMax'] = 1

            if lastRR['avg'] < r['avg']:
                lastRR['trendAvg'] = -1
            elif lastRR['avg'] > r['avg']:
                lastRR['trendAvg'] = 1

            if lastRR['db_size'] < r['db_size']:
                lastRR['trendSize'] = -1
            elif lastRR['db_size'] > r['db_size']:
                lastRR['trendSize'] = 1

            if lastRR['wal_written'] < r['wal_written']:
                lastRR['trendWal'] = -1
            elif lastRR['wal_written'] > r['wal_written']:
                lastRR['trendWal'] = 1

        data[int(r['id'])].append(rr);
        lastRR = rr

    cur.close()
    conn.close()

    return sorted(data.values(), key = lambda x : hosts.hosts[x[0]['id']]['settings']['uiShortName'])
Example #34
0
        try:
            #print ('processing: {}', h)
            conn = psycopg2.connect(host=h['host_name'], dbname=h['host_db'], user=h['host_user'], password=h['host_password'],connect_timeout='3')
            cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
            cur.execute(q_invalid, (h['host_name'], h['host_id']))
            data_invalid += cur.fetchall()
            cur.execute(q_unused, (h['host_name'], h['host_id'], indexing_thresholds['pit_min_size_to_report'], indexing_thresholds['pit_max_scans_to_report']))
            data_unused += cur.fetchall()
            cur.execute(q_duplicate, (h['host_name'], h['host_id']))
            data_duplicate += cur.fetchall()
        except Exception, e:
            print ('ERROR could not connect to {}:{}'.format(h['host_name'], e))
            data_noconnect.append({'host_id':h['host_id'],'host_name': h['host_name']})
        finally:
            if conn and not conn.closed:
                conn.close()
    
    data_invalid.sort(key=lambda x:x['index_size_bytes'],reverse=True)
    data_unused.sort(key=lambda x:x['index_size_bytes'],reverse=True)
    data_duplicate.sort(key=lambda x:x['table_size_bytes'],reverse=True)

    return {'invalid':data_invalid, 'duplicate':data_duplicate, 'unused':data_unused, 'noconnect':data_noconnect}

if __name__ == '__main__':
    DataDB.setConnectionString("dbname=dbmonitor host=localost user=postgres password=postgres")
    #print (getTablePerformanceIssues('customer1.db.zalando',datetime.date(2013,8,23),datetime.date(2013,8,26)))
    #print (getApiPerformanceIssues('stock2.db.zalando','r13_00_33','r13_00_34'))
    #print (getIndexIssues('all'))
    print (getIndexIssues('bm-master.db.zalando')['data_unused'])

Example #35
0
def getTableData(host, name, interval=None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableSql(host, name, interval))

    d = {
        "table_size": [],
        "index_size": [],
        "seq_scans": [],
        "index_scans": [],
        "ins": [],
        "upd": [],
        "del": [],
        "hot": [],
    }

    last_is = None
    last_ss = None

    last_ins = None
    last_del = None
    last_upd = None
    last_hot = None
    last_timestamp = 0

    for r in cur:
        d["table_size"].append((r["tsd_timestamp"], r["tsd_table_size"]))
        d["index_size"].append((r["tsd_timestamp"], r["tsd_index_size"]))

        if int(time.mktime(r["tsd_timestamp"].timetuple()) * 1000) - last_timestamp <= (15 * 60 * 1000):
            if last_ss != None:
                d["seq_scans"].append((r["tsd_timestamp"], r["tsd_seq_scans"] - last_ss))

            if last_is != None:
                d["index_scans"].append((r["tsd_timestamp"], r["tsd_index_scans"] - last_is))

            if last_ins != None and last_ins != 0:
                d["ins"].append((r["tsd_timestamp"], r["tsd_tup_ins"] - last_ins))

            if last_del != None and last_del != 0:
                d["del"].append((r["tsd_timestamp"], r["tsd_tup_del"] - last_del))

            if last_upd != None and last_upd != 0:
                d["upd"].append((r["tsd_timestamp"], r["tsd_tup_upd"] - last_upd))

            if last_hot != None and last_hot != 0:
                d["hot"].append((r["tsd_timestamp"], r["tsd_tup_hot_upd"] - last_hot))

        last_is = r["tsd_index_scans"]
        last_ss = r["tsd_seq_scans"]

        last_ins = r["tsd_tup_ins"]
        last_del = r["tsd_tup_del"]
        last_upd = r["tsd_tup_upd"]
        last_hot = r["tsd_tup_hot_upd"]

        last_timestamp = int(time.mktime(r["tsd_timestamp"].timetuple()) * 1000)

    cur.close()
    DataDB.closeDataConnection(conn)

    return d
Example #36
0
def getTopTables(hostId=1, limit=10, order=None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    if limit == None:
        limit = ""
    else:
        limit = """ LIMIT """ + str(adapt(limit))

    if order == None:
        order = 2

    order = { 1: "ORDER BY schema ASC,name ASC ",
              2: "ORDER BY table_size DESC" ,
              3: "ORDER BY table_size - min_table_size DESC",
              4: "ORDER BY CASE WHEN min_table_size > 0 THEN table_size::float / min_table_size ELSE 0 END DESC",
              5: "ORDER BY index_size DESC",
              6: "ORDER BY index_size - min_index_size DESC",
              7: "ORDER BY CASE WHEN min_index_size > 0 THEN index_size::float / min_index_size ELSE 0 END DESC" }[int(order)]

    cur.execute("""SELECT MAX(tsd_timestamp) AS max_date
                     FROM monitor_data.table_size_data
                    WHERE tsd_table_id IN ( SELECT t_id FROM monitor_data.tables WHERE t_host_id = """+str(adapt(hostId))+""")
                      AND tsd_timestamp > current_date - 1""")

    maxTime = None
    for record in cur:
        maxTime = record['max_date']

    if maxTime == None:
        return []

    sql = """SELECT * FROM ( SELECT t_schema AS schema,
                          t_name AS name,
                          tsd_table_size AS table_size,
                          tsd_index_size AS index_size,
                          COALESCE ( ( SELECT MIN(tsd_table_size) FROM monitor_data.table_size_data st WHERE td.tsd_table_id = st.tsd_table_id AND st.tsd_timestamp > ('now'::timestamp - '7 days'::interval) ), 0) AS min_table_size,
                          COALESCE ( ( SELECT MIN(tsd_index_size) FROM monitor_data.table_size_data st WHERE td.tsd_table_id = st.tsd_table_id AND st.tsd_timestamp > ('now'::timestamp - '7 days'::interval) ), 0) AS min_index_size

                     FROM monitor_data.table_size_data td
                     JOIN monitor_data.tables ON t_id = td.tsd_table_id
                    WHERE td.tsd_timestamp = """ + str(adapt(maxTime)) + """ AND t_host_id = """ + str(adapt(hostId)) + """ ) _t """ + order + """ """ + limit

    cur.execute( sql )

    list = []
    for r in cur:

        d = {}
        for k in r.keys():
            d[k] = r[k]

        d['table_size_pretty'] = makePrettySize( r['table_size'] )
        d['index_size_pretty'] = makePrettySize( r['index_size'] )
        d['table_size_delta'] = makePrettySize( r['table_size'] - r['min_table_size'] )
        d['index_size_delta'] = makePrettySize( r['index_size'] - r['min_index_size'] )
        if r['min_table_size'] > 0:
            d['growth'] = round( ( ( float(r['table_size']) / r['min_table_size'] ) - 1) * 100 , 1 )
        else:
            d['growth'] = 0

        if r['min_index_size'] > 0:
            d['growth_index'] = round( ( ( float(r['index_size']) / r['min_index_size']) - 1) * 100 , 1 )
        else:
            d['growth_index'] = 0

        list.append(d)

    return list
Example #37
0
                         indexing_thresholds['pit_min_size_to_report'],
                         indexing_thresholds['pit_max_scans_to_report']))
            data_unused += cur.fetchall()
        except Exception, e:
            print('ERROR could not connect to {}:{}'.format(h['host_name'], e))
            data_noconnect.append({
                'host_id': h['host_id'],
                'host_name': h['host_name']
            })
        finally:
            if conn and not conn.closed:
                conn.close()

    data_invalid.sort(key=lambda x: x['index_size_bytes'], reverse=True)
    data_unused.sort(key=lambda x: x['index_size_bytes'], reverse=True)

    return {
        'invalid': data_invalid,
        'unused': data_unused,
        'noconnect': data_noconnect
    }


if __name__ == '__main__':
    DataDB.setConnectionString(
        "dbname=dbmonitor host=localost user=postgres password=postgres")
    #print (getTablePerformanceIssues('customer1.db.zalando',datetime.date(2013,8,23),datetime.date(2013,8,26)))
    #print (getApiPerformanceIssues('stock2.db.zalando','r13_00_33','r13_00_34'))
    #print (getIndexIssues('all'))
    print(getIndexIssues('bm-master.db.zalando')['data_unused'])
Example #38
0
def getDatabaseSizes(host_id = None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSizeTrendSQL(host_id))
    size_data = {}

    current_host = 0

    last_timestamp = None

    for record in cur:

        if record['t_host_id'] != current_host:
            current_host = record['t_host_id']
            set_ins = False
            set_del = False
            set_upd = False

            l_ins = None
            l_upd = None
            l_del = None
            last_timestamp = None

        if last_timestamp == None:
            last_timestamp = int(time.mktime(record['tsd_timestamp'].timetuple()) * 1000)

        if not record['t_host_id'] in size_data:
            size_data[record['t_host_id']] = { 'size' : [] , 'ins': [], 'upd': [], 'del':[] }

        """ exclude 0 values, otherwise there is a big peak at start, with wraparound this should be ok"""

        if not set_ins and record['s_ins']!=0:
            l_ins = record['s_ins']
            set_ins = True

        if not set_upd and record['s_upd']!=0:
            l_upd = record['s_upd']
            set_upd = True

        if not set_del and record['s_del']!=0:
            l_del = record['s_del']
            set_del = True

        if l_ins == None:
            l_ins = record['s_ins']

        if l_upd == None:
            l_upd = record['s_upd']

        if l_del == None:
            l_del = record['s_del']

        size_data[record['t_host_id']]['size'].append( ( record['tsd_timestamp'] , record['size'] ) )
        size_data[record['t_host_id']]['ins'].append( ( record['tsd_timestamp'] , max( record['s_ins'] - l_ins , 0)  ) )
        size_data[record['t_host_id']]['del'].append( ( record['tsd_timestamp'] , max( record['s_del'] - l_del , 0)  ) )
        size_data[record['t_host_id']]['upd'].append( ( record['tsd_timestamp'] , max( record['s_upd'] - l_upd , 0)  ) )

        l_ins = record['s_ins']
        l_upd = record['s_upd']
        l_del = record['s_del']

        last_timestamp = int(time.mktime(record['tsd_timestamp'].timetuple()) * 1000)

    cur.close()
    DataDB.closeDataConnection(conn)

    return size_data
Example #39
0
def getLoadReportData():
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    query = """
            with q as (
                select 
                  t_host_id
                , week
                , (select round((sum(tsd_table_size) + sum(tsd_table_size))/10^9::numeric,1)::text
                     from monitor_data.table_size_data
                    where tsd_timestamp = max) as db_size 
                from (
                    select
                        t_host_id
                      , extract(week from tsd_timestamp) as week               
                      , max(tsd_timestamp)
                    from  monitor_data.table_size_data
                        , monitor_data.tables
                    where tsd_timestamp > ('now'::timestamp - '9 weeks'::interval)
                    and tsd_table_id = t_id
                    group by t_host_id, extract(week from tsd_timestamp)
                ) a
            )
            select 
                  load_host_id AS id,
                  extract(week from load_timestamp)::text AS kw,
                  round(avg(load_1min_value)/100,2) AS avg,
                  round(max(load_1min_value)/100,2) AS max,
                  to_char(min(load_timestamp::date),'dd.mm.YYYY') AS min_date,
                  to_char(max(load_timestamp::date),'dd.mm.YYYY') AS max_date,
                  min(load_timestamp::date) AS sort_date,
                  max(q.db_size) as db_size
             from monitor_data.host_load
                , monitor_data.hosts
                , q
            where host_id = load_host_id
              and host_enabled
              and load_timestamp > ('now'::timestamp - '9 weeks'::interval)
              and extract(dow from load_timestamp) IN(1,2,3,4,5)                      
              and q.t_host_id = load_host_id
              and q.week = extract(week from load_timestamp)
            group by load_host_id, extract(week from load_timestamp)
            order by 1 ASC,7 DESC
            """
    cur.execute(query)

    data = defaultdict(list)

    lastRR = None

    for r in cur:

        rr = {
            'id': r['id'],
            'avg': r['avg'],
            'max': r['max'],
            'min_date': r['min_date'],
            'max_date': r['max_date'],
            'db_size': r['db_size'],
            'trendAvg': 0,
            'trendMax': 0,
            'kw': r['kw']
        }

        if lastRR != None and lastRR['id'] == rr['id']:
            if lastRR['max'] < r['max']:
                lastRR['trendMax'] = -1
            elif lastRR['max'] > r['max']:
                lastRR['trendMax'] = 1

            if lastRR['avg'] < r['avg']:
                lastRR['trendAvg'] = -1
            elif lastRR['avg'] > r['avg']:
                lastRR['trendAvg'] = 1

            if lastRR['db_size'] < r['db_size']:
                lastRR['trendSize'] = -1
            elif lastRR['db_size'] > r['db_size']:
                lastRR['trendSize'] = 1

        data[int(r['id'])].append(rr)
        lastRR = rr

    cur.close()
    conn.close()

    return sorted(
        data.values(),
        key=lambda x: hosts.hosts[x[0]['id']]['settings']['uiShortName'])
Example #40
0
def getIndexIssues(hostname):
    q_invalid = """
        SELECT
        *,
        CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
                SELECT
                %s as host_name,
                %s as host_id,
                schemaname||'.'||relname AS table_full_name,
                schemaname||'.'||indexrelname AS index_full_name,
                index_size_bytes,
                indexes_size_bytes,
                pg_size_pretty(index_size_bytes) AS index_size,
                pg_size_pretty(indexes_size_bytes) AS indexes_size,
                pg_size_pretty(table_size_bytes) AS table_size,
                sum(index_size_bytes) over () AS total_marked_index_size_bytes
                FROM
                (
                  SELECT quote_ident(schemaname) as schemaname,
                         quote_ident(relname) as relname,
                         quote_ident(indexrelname) as indexrelname,
                         pg_relation_size(i.indexrelid) AS index_size_bytes,
                         pg_indexes_size(i.relid) AS indexes_size_bytes,                 
                         pg_relation_size(i.relid) AS table_size_bytes
                  FROM pg_stat_user_indexes i
                  JOIN pg_index USING(indexrelid) 
                  WHERE NOT indisvalid
                ) a
                ORDER BY index_size_bytes DESC, relname
        ) b 
    """
    q_unused = """
        SELECT
        *,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
          SELECT
          *,
          pg_size_pretty(index_size_bytes) AS index_size,
          pg_size_pretty(indexes_size_bytes) AS indexes_size,
          pg_size_pretty(table_size_bytes) AS table_size,
          CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
          sum(index_size_bytes) over () AS total_marked_index_size_bytes
          FROM (
          SELECT   %s as host_name,
                   %s as host_id,
                   quote_ident(schemaname)||'.'||quote_ident(relname) AS table_full_name,
                   quote_ident(schemaname)||'.'||quote_ident(indexrelname) AS index_full_name,
                   pg_relation_size(i.indexrelid) as index_size_bytes,
                   pg_indexes_size(i.relid) AS indexes_size_bytes,
                   pg_relation_size(i.relid) AS table_size_bytes,
                   idx_scan AS scans
              FROM pg_stat_user_indexes i 
              JOIN pg_index USING(indexrelid) 
              WHERE NOT indisunique
              AND NOT schemaname LIKE ANY (ARRAY['tmp%%','temp%%'])
          ) a
          WHERE index_size_bytes > %s
          AND scans <= %s
          ORDER BY scans, index_size_bytes DESC
        ) b
    """
    q_active_hosts = """
        select
            host_id,
            host_name,
            host_user,
            host_password,
            host_db
        from monitor_data.hosts
        where host_enabled
        and (%s = 'all' or host_name=%s)
        """
    q_indexing_thresholds = """select * from monitor_data.perf_indexes_thresholds"""
    data_invalid = []
    data_unused = []
    data_noconnect = []
    conn = None

    hosts = DataDB.execute(q_active_hosts, (hostname, hostname))
    indexing_thresholds = DataDB.execute(q_indexing_thresholds)[0]

    for h in hosts:
        try:
            #print ('processing: {}', h)
            conn = psycopg2.connect(host=h['host_name'],
                                    dbname=h['host_db'],
                                    user=h['host_user'],
                                    password=h['host_password'],
                                    connect_timeout='3')
            cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
            cur.execute(q_invalid, (h['host_name'], h['host_id']))
            data_invalid += cur.fetchall()
            cur.execute(q_unused,
                        (h['host_name'], h['host_id'],
                         indexing_thresholds['pit_min_size_to_report'],
                         indexing_thresholds['pit_max_scans_to_report']))
            data_unused += cur.fetchall()
        except Exception, e:
            print('ERROR could not connect to {}:{}'.format(h['host_name'], e))
            data_noconnect.append({
                'host_id': h['host_id'],
                'host_name': h['host_name']
            })
        finally:
Example #41
0
def getTopTables(hostId=1, limit=10, order=None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    if limit == None:
        limit = ""
    else:
        limit = """ LIMIT """ + str(adapt(limit))

    if order == None:
        order = 2

    order = {
        1:
        "ORDER BY schema ASC,name ASC ",
        2:
        "ORDER BY table_size DESC",
        3:
        "ORDER BY table_size - min_table_size DESC",
        4:
        "ORDER BY CASE WHEN min_table_size > 0 THEN table_size::float / min_table_size ELSE 0 END DESC",
        5:
        "ORDER BY index_size DESC",
        6:
        "ORDER BY index_size - min_index_size DESC",
        7:
        "ORDER BY CASE WHEN min_index_size > 0 THEN index_size::float / min_index_size ELSE 0 END DESC"
    }[int(order)]

    cur.execute("""SELECT MAX(tsd_timestamp) AS max_date
                     FROM monitor_data.table_size_data
                    WHERE tsd_table_id IN ( SELECT t_id FROM monitor_data.tables WHERE t_host_id = """
                + str(adapt(hostId)) + """)
                      AND tsd_timestamp > current_date - 1""")

    maxTime = None
    for record in cur:
        maxTime = record['max_date']

    if maxTime == None:
        return []

    sql = """SELECT * FROM ( SELECT t_schema AS schema,
                          t_name AS name,
                          tsd_table_size AS table_size,
                          tsd_index_size AS index_size,
                          COALESCE ( ( SELECT MIN(tsd_table_size) FROM monitor_data.table_size_data st WHERE td.tsd_table_id = st.tsd_table_id AND st.tsd_timestamp > ('now'::timestamp - '7 days'::interval) ), 0) AS min_table_size,
                          COALESCE ( ( SELECT MIN(tsd_index_size) FROM monitor_data.table_size_data st WHERE td.tsd_table_id = st.tsd_table_id AND st.tsd_timestamp > ('now'::timestamp - '7 days'::interval) ), 0) AS min_index_size

                     FROM monitor_data.table_size_data td
                     JOIN monitor_data.tables ON t_id = td.tsd_table_id
                    WHERE td.tsd_timestamp = """ + str(
        adapt(maxTime)) + """ AND t_host_id = """ + str(
            adapt(hostId)) + """ ) _t """ + order + """ """ + limit

    cur.execute(sql)

    list = []
    for r in cur:

        d = {}
        for k in r.keys():
            d[k] = r[k]

        d['table_size_pretty'] = makePrettySize(r['table_size'])
        d['index_size_pretty'] = makePrettySize(r['index_size'])
        d['table_size_delta'] = makePrettySize(r['table_size'] -
                                               r['min_table_size'])
        d['index_size_delta'] = makePrettySize(r['index_size'] -
                                               r['min_index_size'])
        if r['min_table_size'] > 0:
            d['growth'] = round(
                ((float(r['table_size']) / r['min_table_size']) - 1) * 100, 1)
        else:
            d['growth'] = 0

        if r['min_index_size'] > 0:
            d['growth_index'] = round(
                ((float(r['index_size']) / r['min_index_size']) - 1) * 100, 1)
        else:
            d['growth_index'] = 0

        list.append(d)

    return list
Example #42
0
def getIndexIssues(hostname):
    q_invalid = """
        SELECT
        *,
        CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
                SELECT
                %s as host_name,
                %s as host_id,
                schemaname||'.'||relname AS table_full_name,
                schemaname||'.'||indexrelname AS index_full_name,
                index_size_bytes,
                indexes_size_bytes,
                pg_size_pretty(index_size_bytes) AS index_size,
                pg_size_pretty(indexes_size_bytes) AS indexes_size,
                pg_size_pretty(table_size_bytes) AS table_size,
                sum(index_size_bytes) over () AS total_marked_index_size_bytes
                FROM
                (
                  SELECT quote_ident(schemaname) as schemaname,
                         quote_ident(relname) as relname,
                         quote_ident(indexrelname) as indexrelname,
                         pg_relation_size(i.indexrelid) AS index_size_bytes,
                         pg_indexes_size(i.relid) AS indexes_size_bytes,                 
                         pg_relation_size(i.relid) AS table_size_bytes
                  FROM pg_stat_user_indexes i
                  JOIN pg_index USING(indexrelid) 
                  WHERE NOT indisvalid
                ) a
                ORDER BY index_size_bytes DESC, relname
        ) b 
    """
    q_unused = """
        SELECT
        *,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
          SELECT
          *,
          pg_size_pretty(index_size_bytes) AS index_size,
          pg_size_pretty(indexes_size_bytes) AS indexes_size,
          pg_size_pretty(table_size_bytes) AS table_size,
          CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
          sum(index_size_bytes) over () AS total_marked_index_size_bytes
          FROM (
          SELECT   %s as host_name,
                   %s as host_id,
                   quote_ident(schemaname)||'.'||quote_ident(relname) AS table_full_name,
                   quote_ident(schemaname)||'.'||quote_ident(indexrelname) AS index_full_name,
                   pg_relation_size(i.indexrelid) as index_size_bytes,
                   pg_indexes_size(i.relid) AS indexes_size_bytes,
                   pg_relation_size(i.relid) AS table_size_bytes,
                   idx_scan AS scans
              FROM pg_stat_user_indexes i 
              JOIN pg_index USING(indexrelid) 
              WHERE NOT indisunique
              AND NOT schemaname LIKE ANY (ARRAY['tmp%%','temp%%'])
          ) a
          WHERE index_size_bytes > %s
          AND scans <= %s
          ORDER BY scans, index_size_bytes DESC
        ) b
    """
    q_active_hosts="""
        select
            host_id,
            host_name,
            host_user,
            host_password,
            host_db
        from monitor_data.hosts
        where host_enabled
        and (%s = 'all' or host_name=%s)
        """
    q_indexing_thresholds="""select * from monitor_data.perf_indexes_thresholds"""
    data_invalid = []
    data_unused = []
    data_noconnect = []
    conn=None

    hosts = DataDB.execute(q_active_hosts, (hostname, hostname))      
    indexing_thresholds = DataDB.execute(q_indexing_thresholds)[0]

    for h in hosts:
        try:
            #print ('processing: {}', h)
            conn = psycopg2.connect(host=h['host_name'], dbname=h['host_db'], user=h['host_user'], password=h['host_password'],connect_timeout='3')
            cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
            cur.execute(q_invalid, (h['host_name'], h['host_id']))
            data_invalid += cur.fetchall()
            cur.execute(q_unused, (h['host_name'], h['host_id'], indexing_thresholds['pit_min_size_to_report'], indexing_thresholds['pit_max_scans_to_report']))
            data_unused += cur.fetchall()
        except Exception, e:
            print ('ERROR could not connect to {}:{}'.format(h['host_name'], e))
            data_noconnect.append({'host_id':h['host_id'],'host_name': h['host_name']})
        finally:
Example #43
0
def getDatabaseSizes(host_id=None):
    conn = DataDB.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSizeTrendSQL(host_id))
    size_data = {}

    current_host = 0

    last_timestamp = None

    for record in cur:

        if record['t_host_id'] != current_host:
            current_host = record['t_host_id']
            set_ins = False
            set_del = False
            set_upd = False

            l_ins = None
            l_upd = None
            l_del = None
            last_timestamp = None

        if last_timestamp == None:
            last_timestamp = int(
                time.mktime(record['tsd_timestamp'].timetuple()) * 1000)

        if not record['t_host_id'] in size_data:
            size_data[record['t_host_id']] = {
                'size': [],
                'ins': [],
                'upd': [],
                'del': []
            }
        """ exclude 0 values, otherwise there is a big peak at start, with wraparound this should be ok"""

        if not set_ins and record['s_ins'] != 0:
            l_ins = record['s_ins']
            set_ins = True

        if not set_upd and record['s_upd'] != 0:
            l_upd = record['s_upd']
            set_upd = True

        if not set_del and record['s_del'] != 0:
            l_del = record['s_del']
            set_del = True

        if l_ins == None:
            l_ins = record['s_ins']

        if l_upd == None:
            l_upd = record['s_upd']

        if l_del == None:
            l_del = record['s_del']

        size_data[record['t_host_id']]['size'].append(
            (record['tsd_timestamp'], record['size']))
        size_data[record['t_host_id']]['ins'].append(
            (record['tsd_timestamp'], max(record['s_ins'] - l_ins, 0)))
        size_data[record['t_host_id']]['del'].append(
            (record['tsd_timestamp'], max(record['s_del'] - l_del, 0)))
        size_data[record['t_host_id']]['upd'].append(
            (record['tsd_timestamp'], max(record['s_upd'] - l_upd, 0)))

        l_ins = record['s_ins']
        l_upd = record['s_upd']
        l_del = record['s_del']

        last_timestamp = int(
            time.mktime(record['tsd_timestamp'].timetuple()) * 1000)

    cur.close()
    DataDB.closeDataConnection(conn)

    return size_data
Example #44
0
def main():
    parser = ArgumentParser(description='PGObserver Frontend')
    parser.add_argument('-c',
                        '--config',
                        help='Path to config file. (default: %s)' %
                        DEFAULT_CONF_FILE,
                        dest="config",
                        default=DEFAULT_CONF_FILE)

    args = parser.parse_args()

    args.config = os.path.expanduser(args.config)

    if not os.path.exists(args.config):
        print 'Configuration file missing:', args.config
        parser.print_help()
        return

    with open(args.config, 'rb') as fd:
        settings = json.load(fd)

    conn_string = ' '.join(
        ("dbname=" + settings['database']['name'],
         "host=" + settings['database']['host'],
         "user="******"password="******"port=" + str(settings['database']['port'])))

    print "Setting connection string to ... " + conn_string

    DataDB.setConnectionString(conn_string)

    if 'logfiles' in settings:
        logdata.setFilter(settings['logfiles']['liveuserfilter'])

    current_dir = os.path.dirname(os.path.abspath(__file__))

    conf = ({
        'global': {
            'server.socket_host': '0.0.0.0',
            'server.socket_port': int(settings['frontend']['port'])
        },
        '/': {
            'tools.staticdir.root': current_dir
        },
        '/static': {
            'tools.staticdir.dir': 'static',
            'tools.staticdir.on': True
        }
    })

    tplE.setup(settings)

    root = None

    for h in hosts.getHostData().values():
        mf = MonitorFrontend.MonitorFrontend(h['host_id'])

        if root == None:
            root = mf

        setattr(root, h['settings']['uiShortName'].lower().replace('-', ''),
                mf)

    root.report = report.Report()
    root.sprocs = sprocsfrontend.SprocFrontend()
    root.tables = tablesfrontend.TableFrontend()

    cherrypy.quickstart(root, config=conf)