Пример #1
0
def getTop10Interval(order=avgRuntimeOrder,interval=None,hostId = 1, limit = 10):

    sql = """select regexp_replace("name", E'(\\\\(.*\\\\))','()') AS "name",
                    round( sum(d_calls) , 0 ) AS "calls",
                    round( sum(d_total_time) , 0 ) AS "totalTime",
                    round( sum(d_total_time) / sum(d_calls) , 0 ) AS "avgTime"
               from ( """ + getSQL(interval, hostId) + """) tt
              where d_calls > 0
              group by "name"
              order by """+order+"""  limit """ + str(adapt(limit))

    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)

    sprocs = []

    for record in cur:
        record['avgTime'] = makeTimeIntervalReadable(record['avgTime'])
        record['totalTime'] = makeTimeIntervalReadable(record['totalTime'])
        sprocs.append(record)

    conn.close()

    return sprocs
Пример #2
0
def getTablePerformanceIssues(hostname, date_from, date_to):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute(
        """select * from monitor_data.get_table_threshold_sinners_for_period(%s,%s,%s)""",
        (hostname, date_from, date_to))
    data = []  # cur.fetchall()
    for r in cur:
        row = {
            'host_name': r['host_name'],
            'host_id': r['host_id'],
            'schema_name': r['schema_name'],
            'table_name': r['table_name'],
            'day': r['day'],
            'scan_change_pct': r['scan_change_pct'],
            'scans1': r['scans1'],
            'scans2': r['scans2'],
            'size1': r['size1'],
            'size2': r['size2'],
            'size_change_pct': r['size_change_pct'],
            'allowed_seq_scan_pct': r['allowed_seq_scan_pct'],
        }
        data.append(row)
    cur.close()
    conn.close()
    return data
Пример #3
0
def getTop10Interval(order=avgRuntimeOrder, interval=None, hostId=1, limit=10):

    sql = """select regexp_replace("name", E'(\\\\(.*\\\\))','()') AS "name",
                    round( sum(d_calls) , 0 ) AS "calls",
                    round( sum(d_total_time) , 0 ) AS "totalTime",
                    round( sum(d_total_time) / sum(d_calls)::numeric, 1) AS "avgTime"
               from ( """ + getSQL(interval, hostId) + """) tt
              where d_calls > 0
              group by "name"
              order by """ + order + """  limit """ + str(adapt(limit))

    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(sql)

    sprocs = []

    for record in cur:
        record['avgTime'] = makeTimeIntervalReadable(record['avgTime'])
        record['totalTime'] = makeTimeIntervalReadable(record['totalTime'])
        sprocs.append(record)

    conn.close()

    return sprocs
Пример #4
0
def getTableData(host, name, interval = None):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableSql(host,name,interval))

    d = { 'table_size' : [], 'index_size' : [], 'seq_scans': [], 'index_scans' : [], 'ins':[], 'upd':[], 'del':[], 'hot':[] }

    last_is = None
    last_ss = None

    last_ins = None
    last_del = None
    last_upd = None
    last_hot = None
    last_timestamp = 0

    for r in cur:
        d['table_size'].append ( ( r['tsd_timestamp'] , r['tsd_table_size'] ) )
        d['index_size'].append ( ( r['tsd_timestamp'] , r['tsd_index_size'] ) )

        if int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000) - last_timestamp <= ( 15*60*1000 ):
            if last_ss != None:
                d['seq_scans'].append  ( ( r['tsd_timestamp'] , r['tsd_seq_scans']-last_ss ) )

            if last_is != None:
                d['index_scans'].append( ( r['tsd_timestamp'] , r['tsd_index_scans'] - last_is ) )

            if last_ins != None and last_ins != 0:
                d['ins'].append( ( r['tsd_timestamp'] , r['tsd_tup_ins'] - last_ins ) )

            if last_del != None and last_del != 0:
                d['del'].append( ( r['tsd_timestamp'] , r['tsd_tup_del'] - last_del ) )

            if last_upd != None and last_upd != 0:
                d['upd'].append( ( r['tsd_timestamp'] , r['tsd_tup_upd'] - last_upd ) )

            if last_hot != None and last_hot != 0:
                d['hot'].append( ( r['tsd_timestamp'] , r['tsd_tup_hot_upd'] - last_hot ) )

        last_is = r['tsd_index_scans']
        last_ss = r['tsd_seq_scans']

        last_ins = r['tsd_tup_ins']
        last_del = r['tsd_tup_del']
        last_upd = r['tsd_tup_upd']
        last_hot = r['tsd_tup_hot_upd']

        last_timestamp = int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000)

    cur.close()
    datadb.closeDataConnection(conn)

    return d
Пример #5
0
def getGroupsData():
    conn = datadb.getDataConnection()
    groups = {}
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute("SELECT * FROM monitor_data.host_groups;")
    for g in cur:
        groups [ g['group_id'] ] = g['group_name']

    cur.close()
    conn.close()
    return groups
Пример #6
0
def getTableData(host, name, interval = None):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableSql(host,name,interval))

    d = { 'table_size' : [], 'index_size' : [], 'seq_scans': [], 'index_scans' : [], 'ins':[], 'upd':[], 'del':[], 'hot':[] }

    last_is = None
    last_ss = None

    last_ins = None
    last_del = None
    last_upd = None
    last_hot = None
    last_timestamp = 0

    for r in cur:
        d['table_size'].append ( ( r['tsd_timestamp'] , r['tsd_table_size'] ) )
        d['index_size'].append ( ( r['tsd_timestamp'] , r['tsd_index_size'] ) )

        if last_ss != None:
            d['seq_scans'].append  ( ( r['tsd_timestamp'] , r['tsd_seq_scans']-last_ss ) )

        if last_is != None:
            d['index_scans'].append( ( r['tsd_timestamp'] , r['tsd_index_scans'] - last_is ) )

        if last_ins != None and last_ins != 0:
            d['ins'].append( ( r['tsd_timestamp'] , r['tsd_tup_ins'] - last_ins ) )

        if last_del != None and last_del != 0:
            d['del'].append( ( r['tsd_timestamp'] , r['tsd_tup_del'] - last_del ) )

        if last_upd != None and last_upd != 0:
            d['upd'].append( ( r['tsd_timestamp'] , r['tsd_tup_upd'] - last_upd ) )

        if last_hot != None and last_hot != 0:
            d['hot'].append( ( r['tsd_timestamp'] , r['tsd_tup_hot_upd'] - last_hot ) )

        last_is = r['tsd_index_scans']
        last_ss = r['tsd_seq_scans']

        last_ins = r['tsd_tup_ins']
        last_del = r['tsd_tup_del']
        last_upd = r['tsd_tup_upd']
        last_hot = r['tsd_tup_hot_upd']

        last_timestamp = int(time.mktime(r['tsd_timestamp'].timetuple()) * 1000)

    cur.close()
    datadb.closeDataConnection(conn)

    return d
Пример #7
0
def getAllHostsData():
    conn = datadb.getDataConnection()
    hosts = {}

    cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)

    cur.execute("SELECT host_id, host_name, host_port, host_db, host_settings, host_group_id, host_enabled,"
                " host_ui_shortname, host_ui_longname"
                " FROM monitor_data.hosts ORDER BY host_id ASC;")
    for r in cur:
        r['uishortname'] = r['host_ui_shortname'].lower().replace('-','')
        r['uilongname'] = r['host_ui_longname']
        hosts[r['host_id']] = r

    cur.close()
    conn.close()
    return hosts
Пример #8
0
def getIndexesDataForTable(host, full_name, date_from, date_to):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)

    cur.execute(getSingleTableSql(host, full_name, date_from, date_to))
    data = cur.fetchall()
    cur.close()
    datadb.closeDataConnection(conn)

    all_data = []
    d = { 'size' : [], 'scan' : [], 'tup_read': [] } #, 'tup_fetch' : [] }

    last_scan = None
    last_tup_read = None
#    last_tup_fetch = None
    last_name = None
    last_index_size = None
    last_total_end_size = None
    last_pct_of_total_end_size = None

    for r in data:
        if last_name != None:
            if last_name != r['name'] and len(d['size']) > 0:
                all_data.append({'index_name':last_name, 'data':d, 'last_index_size': round(last_index_size / 1024**2), 'total_end_size': round(last_total_end_size / 1024**2), 'pct_of_total_end_size':last_pct_of_total_end_size})
                d = { 'size' : [], 'scan' : [], 'tup_read': [] } # , 'tup_fetch' : [] }
            
            d['size'].append( ( r['timestamp'] , r['size'] ) )
            d['scan'].append( ( r['timestamp'] , 0 if last_scan > r['scan'] else r['scan'] - last_scan ) )
            d['tup_read'].append( ( r['timestamp'] , 0 if last_tup_read > r['tup_read'] else r['tup_read'] - last_tup_read ) )
#            d['tup_fetch'].append( ( r['timestamp'] , 0 if last_tup_fetch > r['tup_fetch'] else r['tup_fetch'] - last_tup_fetch ) )


        last_scan = r['scan']
        last_tup_read = r['tup_read']
#        last_tup_fetch = r['tup_fetch']
        last_name = r['name']
        last_index_size = r['size']
        last_total_end_size = r['total_end_size']
        last_pct_of_total_end_size = r['pct_of_total_end_size']

    if len(d) > 0:
        all_data.append({'index_name':last_name, 'data':d, 'last_index_size': round(last_index_size / 1024**2), 'total_end_size': round(last_total_end_size / 1024**2), 'pct_of_total_end_size':last_pct_of_total_end_size})

    return all_data
Пример #9
0
def getIndexesDataForTable(host, full_name, date_from, date_to):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)

    cur.execute(getSingleTableSql(host, full_name, date_from, date_to))
    data = cur.fetchall()
    cur.close()
    datadb.closeDataConnection(conn)

    all_data = []
    d = { 'size' : [], 'scan' : [], 'tup_read': [] } #, 'tup_fetch' : [] }

    last_scan = None
    last_tup_read = None
#    last_tup_fetch = None
    last_name = None
    last_index_size = 0
    last_total_end_size = 0
    last_pct_of_total_end_size = 0

    for r in data:
        if last_name != None:
            if last_name != r['name'] and len(d['size']) > 0:
                all_data.append({'index_name':last_name, 'data':d, 'last_index_size': round(last_index_size / 1024**2), 'total_end_size': round(last_total_end_size / 1024**2), 'pct_of_total_end_size':last_pct_of_total_end_size})
                d = { 'size' : [], 'scan' : [], 'tup_read': [] } # , 'tup_fetch' : [] }
            
            d['size'].append( ( r['timestamp'] , r['size'] ) )
            d['scan'].append( ( r['timestamp'] , 0 if last_scan > r['scan'] else r['scan'] - last_scan ) )
            d['tup_read'].append( ( r['timestamp'] , 0 if last_tup_read > r['tup_read'] else r['tup_read'] - last_tup_read ) )
#            d['tup_fetch'].append( ( r['timestamp'] , 0 if last_tup_fetch > r['tup_fetch'] else r['tup_fetch'] - last_tup_fetch ) )


        last_scan = r['scan']
        last_tup_read = r['tup_read']
#        last_tup_fetch = r['tup_fetch']
        last_name = r['name']
        last_index_size = r['size']
        last_total_end_size = r['total_end_size']
        last_pct_of_total_end_size = r['pct_of_total_end_size']

    if len(data) > 0:
        all_data.append({'index_name':last_name, 'data':d, 'last_index_size': round(last_index_size / 1024**2), 'total_end_size': round(last_total_end_size / 1024**2), 'pct_of_total_end_size':last_pct_of_total_end_size})

    return all_data
Пример #10
0
def getActiveSprocsOrderedBy( hostId, order = " ORDER BY SUM(delta_total_time) DESC"):
    sql = """SELECT sproc_name
               FROM ( """ + viewSprocs(hostId) + """ ) t JOIN monitor_data.sprocs ON sp_sproc_id = sproc_id
               WHERE sproc_host_id = """ + str(adapt(hostId)) + """
               GROUP BY sproc_name
             """ + order + """;
          """

    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    list= []
    cur.execute( sql )

    for r in cur:
        list.append ( r['sproc_name'] )

    cur.close()
    datadb.closeDataConnection(conn)
    return list
Пример #11
0
def getTableIOData(host, name, interval = None):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableIOSql(host,name,interval))

    d = { 'heap_read' : [], 'heap_hit' : [], 'index_read' : [], 'index_hit': [] }

    last_hr = None
    last_hh = None
    last_ir = None
    last_ih = None
    last_timestamp = 0

    for r in cur:

        if int(time.mktime(r['tio_timestamp'].timetuple()) * 1000) - last_timestamp <= ( 15*60*1000 ):
            if last_hr != None:
                d['heap_read'].append(( r['tio_timestamp'] , r['tio_heap_read'] - last_hr ))

            if last_hh != None:
                d['heap_hit'].append(( r['tio_timestamp'] , r['tio_heap_hit'] - last_hh ))

            if last_ir != None:
                d['index_read'].append(( r['tio_timestamp'] , r['tio_idx_read'] - last_ir ))

            if last_ih != None:
                d['index_hit'].append(( r['tio_timestamp'] , r['tio_idx_hit'] - last_ih ))

        last_hr = r['tio_heap_read']
        last_hh = r['tio_heap_hit']
        last_ir = r['tio_idx_read']
        last_ih = r['tio_idx_hit']

        last_timestamp = int(time.mktime(r['tio_timestamp'].timetuple()) * 1000)


    cur.close()
    datadb.closeDataConnection(conn)

    return d
Пример #12
0
def getApiPerformanceIssues(hostname, api_from, api_to):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute(
        """select * from monitor_data.get_sproc_threshold_sinners_for_release(%s,%s,%s)""",
        (hostname, api_from, api_to))
    data = []  # cur.fetchall()
    for r in cur:
        row = {
            'host_name':
            r['host_name'],
            'host_id':
            r['host_id'],
            'sproc_schema':
            r['sproc_schema'],
            'sproc_name':
            r['sproc_name'],
            'calltime_change_pct':
            r['calltime_change_pct'],
            'share_on_total_runtime':
            r['share_on_total_runtime'],
            'execution_avg1':
            r['execution_avg1'],
            'execution_avg2':
            r['execution_avg2'],
            'calls1':
            r['calls1'],
            'calls2':
            r['calls2'],
            'callscount_change_pct':
            r['callscount_change_pct'],
            'allowed_runtime_growth_pct':
            r['allowed_runtime_growth_pct'],
            'allowed_share_on_total_runtime_pct':
            r['allowed_share_on_total_runtime_pct'],
        }
        data.append(row)
    cur.close()
    conn.close()
    return data
Пример #13
0
def getTableIOData(host, name, interval=None):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    cur.execute(getSingleTableIOSql(host, name, interval))

    d = {'heap_read': [], 'heap_hit': [], 'index_read': [], 'index_hit': []}

    last_hr = None
    last_hh = None
    last_ir = None
    last_ih = None

    for r in cur:

        if last_hr != None:
            d['heap_read'].append(
                (r['tio_timestamp'], r['tio_heap_read'] - last_hr))

        if last_hh != None:
            d['heap_hit'].append(
                (r['tio_timestamp'], r['tio_heap_hit'] - last_hh))

        if last_ir != None:
            d['index_read'].append(
                (r['tio_timestamp'], r['tio_idx_read'] - last_ir))

        if last_ih != None:
            d['index_hit'].append(
                (r['tio_timestamp'], r['tio_idx_hit'] - last_ih))

        last_hr = r['tio_heap_read']
        last_hh = r['tio_heap_hit']
        last_ir = r['tio_idx_read']
        last_ih = r['tio_idx_hit']

    cur.close()
    datadb.closeDataConnection(conn)

    return d
Пример #14
0
def getTablePerformanceIssues(hostname, date_from, date_to):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute("""select * from monitor_data.get_table_threshold_sinners_for_period(%s,%s,%s)""", (hostname, date_from, date_to))
    data = [] # cur.fetchall()
    for r in cur:
        row = {'host_name' : r['host_name'],
              'host_id' : r['host_id'],
              'schema_name' : r['schema_name'],
              'table_name' : r['table_name'],
              'day' : r['day'],
              'scan_change_pct' : r['scan_change_pct'],
              'scans1': r['scans1'],
              'scans2': r['scans2'],
              'size1': r['size1'],
              'size2': r['size2'],
              'size_change_pct': r['size_change_pct'],
              'allowed_seq_scan_pct': r['allowed_seq_scan_pct'],
              }
        data.append(row)
    cur.close()
    conn.close()
    return data
Пример #15
0
def getApiPerformanceIssues(hostname, api_from, api_to):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute("""select * from monitor_data.get_sproc_threshold_sinners_for_release(%s,%s,%s)""", (hostname, api_from, api_to))
    data = [] # cur.fetchall()
    for r in cur:
        row = {'host_name' : r['host_name'],
              'host_id' : r['host_id'],
              'sproc_schema' : r['sproc_schema'],
              'sproc_name' : r['sproc_name'],
              'calltime_change_pct' : r['calltime_change_pct'],
              'share_on_total_runtime' : r['share_on_total_runtime'],
              'execution_avg1': r['execution_avg1'],
              'execution_avg2': r['execution_avg2'],
              'calls1': r['calls1'],
              'calls2': r['calls2'],
              'callscount_change_pct': r['callscount_change_pct'],
              'allowed_runtime_growth_pct': r['allowed_runtime_growth_pct'],
              'allowed_share_on_total_runtime_pct': r['allowed_share_on_total_runtime_pct'],
              }
        data.append(row)
    cur.close()
    conn.close()
    return data
Пример #16
0
def getLoadReportData(hostId=None, weeks=10):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    query = """
            with q as (
                select 
                  tsd_host_id as host_id
                , week
                , (select round((sum(tsd_table_size) + sum(tsd_index_size))/(1024*1024*1024)::numeric,1)::text --in GB
                     from monitor_data.table_size_data
                    where tsd_timestamp = max) as db_size 
                from (
                    select
                        tsd_host_id
                      , extract(week from tsd_timestamp) as week               
                      , max(tsd_timestamp)
                    from  monitor_data.table_size_data
                    where tsd_timestamp >= date_trunc('week', now()) - '""" + str(weeks) + """ weeks'::interval
                    and (%s is null or tsd_host_id = %s)
                    group by tsd_host_id, extract(week from tsd_timestamp)
                ) a
            )
            select 
                  load_host_id AS id,
                  extract(week from load_timestamp)::text AS kw,
                  round(avg(load_15min_value)/100,2) AS avg,
                  round(max(load_15min_value)/100,2) AS max,
                  to_char(min(load_timestamp::date),'dd.mm.YYYY') AS min_date,
                  to_char(max(load_timestamp::date),'dd.mm.YYYY') AS max_date,
                  min(load_timestamp::date) AS sort_date,
                  max(q.db_size) as db_size,
                  round((max(xlog_location_mb) - min(xlog_location_mb)) / 1024.0, 1)  as wal_written
             from monitor_data.host_load hl
                , monitor_data.hosts h
                , q
            where h.host_id = hl.load_host_id
              and host_enabled
              and load_timestamp >= date_trunc('week', now()) - '""" + str(weeks) + """ weeks'::interval
              and extract(dow from load_timestamp) IN(1,2,3,4,5)                      
              and q.host_id = load_host_id
              and q.week = extract(week from load_timestamp)
              and (%s is null or hl.load_host_id = %s)
            group by load_host_id, extract(week from load_timestamp)
            order by 1 ASC,7 DESC
            """
    cur.execute(query, (hostId,hostId,hostId,hostId))

    data = defaultdict(list)

    lastRR = None

    for r in cur:

        rr = {'id' : r['id'],
              'avg' : r['avg'],
              'max' : r['max'],
              'min_date' : r['min_date'],
              'max_date' : r['max_date'],
              'db_size' : r['db_size'],
              'wal_written' : r['wal_written'],
              'trendAvg': 0,
              'trendMax': 0,
              'kw' : r['kw']
              }

        if lastRR != None and lastRR['id']==rr['id']:
            if lastRR['max'] < r['max']:
                lastRR['trendMax'] = -1
            elif lastRR['max'] > r['max']:
                lastRR['trendMax'] = 1

            if lastRR['avg'] < r['avg']:
                lastRR['trendAvg'] = -1
            elif lastRR['avg'] > r['avg']:
                lastRR['trendAvg'] = 1

            if lastRR['db_size'] < r['db_size']:
                lastRR['trendSize'] = -1
            elif lastRR['db_size'] > r['db_size']:
                lastRR['trendSize'] = 1

            if lastRR['wal_written'] < r['wal_written']:
                lastRR['trendWal'] = -1
            elif lastRR['wal_written'] > r['wal_written']:
                lastRR['trendWal'] = 1

        data[int(r['id'])].append(rr);
        lastRR = rr

    cur.close()
    conn.close()

    return sorted(data.values(), key = lambda x : hosts.hosts[x[0]['id']]['uishortname'])
Пример #17
0
def getIndexesDataForTable(host, full_name, date_from, date_to):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)

    cur.execute(getSingleTableSql(host, full_name, date_from, date_to))
    data = cur.fetchall()
    cur.close()
    datadb.closeDataConnection(conn)

    all_data = []
    d = {"size": [], "scan": [], "tup_read": []}  # , 'tup_fetch' : [] }

    last_scan = None
    last_tup_read = None
    #    last_tup_fetch = None
    last_name = None
    last_index_size = 0
    last_total_end_size = 0
    last_pct_of_total_end_size = 0

    for r in data:
        if last_name != None:
            if last_name != r["name"] and len(d["size"]) > 0:
                all_data.append(
                    {
                        "index_name": last_name,
                        "data": d,
                        "last_index_size": round(last_index_size / 1024 ** 2),
                        "total_end_size": round(last_total_end_size / 1024 ** 2),
                        "pct_of_total_end_size": last_pct_of_total_end_size,
                    }
                )
                d = {"size": [], "scan": [], "tup_read": []}  # , 'tup_fetch' : [] }

            d["size"].append((r["timestamp"], r["size"]))
            d["scan"].append((r["timestamp"], 0 if last_scan > r["scan"] else r["scan"] - last_scan))
            d["tup_read"].append(
                (r["timestamp"], 0 if last_tup_read > r["tup_read"] else r["tup_read"] - last_tup_read)
            )
        #            d['tup_fetch'].append( ( r['timestamp'] , 0 if last_tup_fetch > r['tup_fetch'] else r['tup_fetch'] - last_tup_fetch ) )

        last_scan = r["scan"]
        last_tup_read = r["tup_read"]
        #        last_tup_fetch = r['tup_fetch']
        last_name = r["name"]
        last_index_size = r["size"]
        last_total_end_size = r["total_end_size"]
        last_pct_of_total_end_size = r["pct_of_total_end_size"]

    if len(data) > 0:
        all_data.append(
            {
                "index_name": last_name,
                "data": d,
                "last_index_size": round(last_index_size / 1024 ** 2),
                "total_end_size": round(last_total_end_size / 1024 ** 2),
                "pct_of_total_end_size": last_pct_of_total_end_size,
            }
        )

    return all_data
Пример #18
0
def getLoad(hostId, days='8'):
    days += 'days'
    sql = """
        SELECT
          xaxis,
          MAX(load_15min) as load_15min -- needed for 15min overlap
        FROM (
            SELECT
              sla_timestamp as xaxis,
              sla_load_15min as load_15min
            FROM
              monitor_data.sproc_load_agg
            WHERE
              sla_host_id = """ + str(adapt(hostId)) + """
              AND sla_timestamp > now() - """ + str(
        adapt(days)) + """::interval
              AND sla_timestamp < now() - '2 hours'::interval
            UNION ALL
              SELECT
                xaxis,
                (sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) / (1*15*60*1000))::numeric(8,2) AS load_15min
              FROM
                ( SELECT
                    date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval AS xaxis,
                    sum(t.delta_self_time) AS d_self_time
                  FROM ( SELECT
                           spd.sp_timestamp,
                           COALESCE(spd.sp_self_time - lag(spd.sp_self_time) OVER w, 0::bigint) AS delta_self_time
                         FROM
                           monitor_data.sproc_performance_data spd
                         WHERE
                           spd.sp_host_id = """ + str(adapt(hostId)) + """
                           AND sp_timestamp >= now() - '2 hours 15 minutes'::interval --15 minutes overlap due to window
                         WINDOW w AS
                           ( PARTITION BY spd.sp_sproc_id ORDER BY spd.sp_timestamp )
                       ) t
                  GROUP BY
                    date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                  ORDER BY
                    date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                ) loadTable
            ) a
            GROUP BY
              xaxis
            ORDER BY
              xaxis
    """
    if not tplE._settings.get('run_aggregations'):
        sql = """
            SELECT
              xaxis,
              load_15min
            FROM (
                  SELECT
                    xaxis,
                    (sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) / (1*15*60*1000))::numeric(8,2) AS load_15min
                  FROM
                    ( SELECT
                        date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval AS xaxis,
                        sum(t.delta_self_time) AS d_self_time
                      FROM ( SELECT
                               spd.sp_timestamp,
                               COALESCE(spd.sp_self_time - lag(spd.sp_self_time) OVER w, 0::bigint) AS delta_self_time
                             FROM
                               monitor_data.sproc_performance_data spd
                             WHERE
                               spd.sp_host_id = """ + str(adapt(hostId)) + """
                               AND sp_timestamp > now() - """ + str(
            adapt(days)) + """::interval
                             WINDOW w AS
                               ( PARTITION BY spd.sp_sproc_id ORDER BY spd.sp_timestamp )
                           ) t
                      GROUP BY
                        date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                      ORDER BY
                        date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                    ) loadTable
                ) a
                ORDER BY
                  xaxis
        """

    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    load = {'load_15min': []}
    cur.execute(sql)
    lastTime = None
    skip15min = 0

    for record in cur:
        currentTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)
        if lastTime != None:
            if currentTime - lastTime > (15 * 60 * 1000):
                skip15min = 2

        if skip15min > 0:
            skip15min -= 1
        else:
            load['load_15min'].append(
                (record['xaxis'], round(record['load_15min'], 2)))

        lastTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)

    cur.close()
    conn.close()

    return load
Пример #19
0
def getLoad(hostId, days='8'):
    days += 'days'
    sql = """
        SELECT
          xaxis,
          MAX(load_15min) as load_15min -- needed for 15min overlap
        FROM (
            SELECT
              sla_timestamp as xaxis,
              sla_load_15min as load_15min
            FROM
              monitor_data.sproc_load_agg
            WHERE
              sla_host_id = """ +str(adapt(hostId)) + """
              AND sla_timestamp > now() - """ + str(adapt(days)) + """::interval
              AND sla_timestamp < now() - '2 hours'::interval
            UNION ALL
              SELECT
                xaxis,
                (sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) / (1*15*60*1000))::numeric(8,2) AS load_15min
              FROM
                ( SELECT
                    date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval AS xaxis,
                    sum(t.delta_self_time) AS d_self_time
                  FROM ( SELECT
                           spd.sp_timestamp,
                           COALESCE(spd.sp_self_time - lag(spd.sp_self_time) OVER w, 0::bigint) AS delta_self_time
                         FROM
                           monitor_data.sproc_performance_data spd
                         WHERE
                           spd.sp_host_id = """ + str(adapt(hostId)) + """
                           AND sp_timestamp >= now() - '2 hours 15 minutes'::interval --15 minutes overlap due to window
                         WINDOW w AS
                           ( PARTITION BY spd.sp_sproc_id ORDER BY spd.sp_timestamp )
                       ) t
                  GROUP BY
                    date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                  ORDER BY
                    date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                ) loadTable
            ) a
            GROUP BY
              xaxis
            ORDER BY
              xaxis
    """
    if not tplE._settings['run_aggregations']:
        sql = """
            SELECT
              xaxis,
              load_15min
            FROM (
                  SELECT
                    xaxis,
                    (sum(d_self_time) OVER (ORDER BY xaxis ASC ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) / (1*15*60*1000))::numeric(8,2) AS load_15min
                  FROM
                    ( SELECT
                        date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval AS xaxis,
                        sum(t.delta_self_time) AS d_self_time
                      FROM ( SELECT
                               spd.sp_timestamp,
                               COALESCE(spd.sp_self_time - lag(spd.sp_self_time) OVER w, 0::bigint) AS delta_self_time
                             FROM
                               monitor_data.sproc_performance_data spd
                             WHERE
                               spd.sp_host_id = """ + str(adapt(hostId)) + """
                               AND sp_timestamp > now() - """ + str(adapt(days)) + """::interval
                             WINDOW w AS
                               ( PARTITION BY spd.sp_sproc_id ORDER BY spd.sp_timestamp )
                           ) t
                      GROUP BY
                        date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                      ORDER BY
                        date_trunc('hour'::text, t.sp_timestamp) + floor(date_part('minute'::text, t.sp_timestamp) / 15::double precision) * '00:15:00'::interval
                    ) loadTable
                ) a
                ORDER BY
                  xaxis
        """

    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)

    load = { 'load_15min' : [] }
    cur.execute(sql)
    lastTime = None
    skip15min=0

    for record in cur:
        currentTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)
        if lastTime != None:
            if currentTime - lastTime > ( 15 * 60 * 1000):
                skip15min = 2

        if skip15min>0:
            skip15min -= 1
        else:
            load['load_15min'].append((record['xaxis'], round ( record['load_15min'], 2 ) ) )

        lastTime = int(time.mktime(record['xaxis'].timetuple()) * 1000)

    cur.close()
    conn.close()

    return load
Пример #20
0
def getLoadReportData(hostId=None, weeks=10):
    conn = datadb.getDataConnection()
    cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
    query = """
            with q as (
                select 
                  tsd_host_id as host_id
                , week
                , (select round((sum(tsd_table_size) + sum(tsd_index_size))/(1024*1024*1024)::numeric,1)::text --in GB
                     from monitor_data.table_size_data
                    where tsd_timestamp = max) as db_size 
                from (
                    select
                        tsd_host_id
                      , extract(week from tsd_timestamp) as week               
                      , max(tsd_timestamp)
                    from  monitor_data.table_size_data
                    where tsd_timestamp >= date_trunc('week', now()) - '""" + str(
        weeks) + """ weeks'::interval
                    and (%s is null or tsd_host_id = %s)
                    group by tsd_host_id, extract(week from tsd_timestamp)
                ) a
            )
            select 
                  load_host_id AS id,
                  extract(week from load_timestamp)::text AS kw,
                  round(avg(load_15min_value)/100,2) AS avg,
                  round(max(load_15min_value)/100,2) AS max,
                  to_char(min(load_timestamp::date),'dd.mm.YYYY') AS min_date,
                  to_char(max(load_timestamp::date),'dd.mm.YYYY') AS max_date,
                  min(load_timestamp::date) AS sort_date,
                  max(q.db_size) as db_size,
                  round((max(xlog_location_mb) - min(xlog_location_mb)) / 1024.0, 1)  as wal_written
             from monitor_data.host_load hl
                , monitor_data.hosts h
                , q
            where h.host_id = hl.load_host_id
              and host_enabled
              and load_timestamp >= date_trunc('week', now()) - '""" + str(
            weeks) + """ weeks'::interval
              and extract(dow from load_timestamp) IN(1,2,3,4,5)                      
              and q.host_id = load_host_id
              and q.week = extract(week from load_timestamp)
              and (%s is null or hl.load_host_id = %s)
            group by load_host_id, extract(week from load_timestamp)
            order by 1 ASC,7 DESC
            """
    cur.execute(query, (hostId, hostId, hostId, hostId))

    data = defaultdict(list)

    lastRR = None

    for r in cur:

        rr = {
            'id': r['id'],
            'avg': r['avg'],
            'max': r['max'],
            'min_date': r['min_date'],
            'max_date': r['max_date'],
            'db_size': r['db_size'],
            'wal_written': r['wal_written'],
            'trendAvg': 0,
            'trendMax': 0,
            'kw': r['kw']
        }

        if lastRR != None and lastRR['id'] == rr['id']:
            if lastRR['max'] < r['max']:
                lastRR['trendMax'] = -1
            elif lastRR['max'] > r['max']:
                lastRR['trendMax'] = 1

            if lastRR['avg'] < r['avg']:
                lastRR['trendAvg'] = -1
            elif lastRR['avg'] > r['avg']:
                lastRR['trendAvg'] = 1

            if lastRR['db_size'] < r['db_size']:
                lastRR['trendSize'] = -1
            elif lastRR['db_size'] > r['db_size']:
                lastRR['trendSize'] = 1

            if lastRR['wal_written'] < r['wal_written']:
                lastRR['trendWal'] = -1
            elif lastRR['wal_written'] > r['wal_written']:
                lastRR['trendWal'] = 1

        data[int(r['id'])].append(rr)
        lastRR = rr

    cur.close()
    conn.close()

    return sorted(data.values(),
                  key=lambda x: hosts.hosts[x[0]['id']]['uishortname'])