Esempio n. 1
0
def saveHost(hostDict):
    sql_cols = ""
    sql_vals = []
    if hostDict['host_id'] == '':
       hostDict.pop('host_id')


    if 'host_id' not in hostDict:   # insert new host
        sql_last_user, sql_last_pw = getLastInsertedHostUserAndPassword()
        sql = "INSERT INTO hosts ("
        for k, v in hostDict.iteritems():
            sql_cols += k + ","
            sql_vals.append(v)
        sql_cols += "host_user, host_password"
        sql += sql_cols + ") VALUES (" + ("%s," * len(sql_vals))
        sql += "COALESCE((" + sql_last_user + "),'dummyuser'), COALESCE((" + sql_last_pw + "), 'dummypass')) RETURNING host_id"
        ret = datadb.execute(sql, tuple(sql_vals))
        return ret[0]['host_id']
    else:   # update
        host_id = hostDict['host_id']
        hostDict.pop('host_id')
        hostDict.update({'host_last_modified':'now'})
        sql = "UPDATE hosts SET\n"
        for k, v in hostDict.iteritems():
            sql_cols += k + "=%s,\n"
            sql_vals.append(v)
        sql_cols = sql_cols.strip(",\n")
        sql += sql_cols
        sql += "\nWHERE host_id = " + host_id + " RETURNING host_id"
        ret = datadb.execute(sql, tuple(sql_vals))
        return ret[0]['host_id']
Esempio n. 2
0
 def topsprocsbyruntime(self):
     q = """
         select
             host_db_export_name as db,
             sproc_name,
             total_runtime
         from (
             select
                 *,
                 row_number() over(partition by host_db_export_name order by total_runtime desc)
             from (
                 select
                     host_db_export_name,
                     substring(sproc_name, 1, position ('(' in sproc_name)-1) as sproc_name,
                     max(sp_total_time)-min(sp_total_time) as total_runtime
                 from sprocs
                 join sproc_performance_data on sp_sproc_id = sproc_id
                 join hosts on host_id = sproc_host_id 
                 where sp_timestamp > now() - '7days'::interval
                 and host_db_export_name is not null
                 group by 1, 2
             ) a
         ) b
         where row_number <= 10
         order by host_db_export_name, total_runtime desc
     """
     topbyruntime = datadb.execute(q)
     retdict=defaultdict(list)
     for r in topbyruntime:
         retdict[r['db']].append(r['sproc_name'])
     return json.dumps(retdict)
Esempio n. 3
0
def getStatStatements(host_name, date1=None, date2=None, order_by='1', limit='50'):
    order_by = int(order_by) + 1
    sql = '''
select
*
from (
select
  max(ssd_query) as query,
  max(ssd_calls) - min(ssd_calls) as calls,
  max(ssd_total_time) - min(ssd_total_time) as total_time,
  max(ssd_blks_read) - min(ssd_blks_read) as blks_read,
  max(ssd_blks_written) - min(ssd_blks_written) as blks_written,
  max(ssd_temp_blks_read) - min(ssd_temp_blks_read) as temp_blks_read,
  max(ssd_temp_blks_written) - min(ssd_temp_blks_written) as temp_blks_written,
  ssd_query_id as query_id
from
  monitor_data.stat_statements_data
  join
  monitor_data.hosts on ssd_host_id = host_id
where
  host_name = %s
  and ssd_timestamp >= coalesce(%s, current_date-1) and ssd_timestamp < coalesce(%s, now())
group by
  ssd_query_id
) a
order by ''' + str(order_by) + '''
  desc
limit ''' + limit
    return datadb.execute(sql, (host_name, date1, date2))
Esempio n. 4
0
def pgo_get_data_and_columns_from_view(host_id, view_name, max_days_to_fetch, idb_latest_timestamp=None):
    dt_now = datetime.now()
    from_timestamp = idb_latest_timestamp
    to_timestamp = dt_now

    if from_timestamp is None:
        from_timestamp = dt_now - timedelta(days=max_days_to_fetch)

    if from_timestamp < dt_now - timedelta(days=MAX_DAYS_TO_SELECT_AT_A_TIME):
        to_timestamp = from_timestamp + timedelta(days=MAX_DAYS_TO_SELECT_AT_A_TIME)
    else:
        to_timestamp = to_timestamp - timedelta(seconds=SAFETY_SECONDS_FOR_LATEST_DATA)

    if from_timestamp >= to_timestamp:
        return [], None

    sql = open(os.path.join(TEMPLATES_FOLDER, view_name)).read()
    sql_params = {'host_id': host_id, 'from_timestamp': from_timestamp, 'to_timestamp': to_timestamp}

    logging.debug("Executing:")
    logging.debug("%s", datadb.mogrify(sql, sql_params))

    view_data, columns = datadb.execute(sql, sql_params)

    # removing timestamp, we only want to store the utc epoch "time" column
    timestamp_index = columns.index('timestamp')
    if timestamp_index != 0:
        raise Exception('"timestamp" needs to be the 1st column returned!')
    columns.remove('timestamp')

    ret_data = []
    for d in view_data:
        ret_data.append(list(d[1:]))

    return ret_data, columns
Esempio n. 5
0
def getDatabaseSizes(host_id = None, days='8'):
    size_data = {}
    current_host = 0
    last_timestamp = None

    for record in datadb.execute(getSizeTrendSQL(host_id, days)):

        if record['tsd_host_id'] != current_host:
            current_host = record['tsd_host_id']
            set_ins = False
            set_del = False
            set_upd = False

            l_ins = None
            l_upd = None
            l_del = None
            last_timestamp = None

        if last_timestamp == None:
            last_timestamp = int(time.mktime(record['tsd_timestamp'].timetuple()) * 1000)

        if not record['tsd_host_id'] in size_data:
            size_data[record['tsd_host_id']] = { 'size' : [] , 'ins': [], 'upd': [], 'del':[] }

        """ exclude 0 values, otherwise there is a big peak at start, with wraparound this should be ok"""

        if not set_ins and record['s_ins']!=0:
            l_ins = record['s_ins']
            set_ins = True

        if not set_upd and record['s_upd']!=0:
            l_upd = record['s_upd']
            set_upd = True

        if not set_del and record['s_del']!=0:
            l_del = record['s_del']
            set_del = True

        if l_ins == None:
            l_ins = record['s_ins']

        if l_upd == None:
            l_upd = record['s_upd']

        if l_del == None:
            l_del = record['s_del']

        size_data[record['tsd_host_id']]['size'].append( ( record['tsd_timestamp'] , record['size'] ) )
        size_data[record['tsd_host_id']]['ins'].append( ( record['tsd_timestamp'] , max( record['s_ins'] - l_ins , 0)  ) )
        size_data[record['tsd_host_id']]['del'].append( ( record['tsd_timestamp'] , max( record['s_del'] - l_del , 0)  ) )
        size_data[record['tsd_host_id']]['upd'].append( ( record['tsd_timestamp'] , max( record['s_upd'] - l_upd , 0)  ) )

        l_ins = record['s_ins']
        l_upd = record['s_upd']
        l_del = record['s_del']

        last_timestamp = int(time.mktime(record['tsd_timestamp'].timetuple()) * 1000)

    return size_data
Esempio n. 6
0
def getTopStatementsData(hostId, interval1='3hours', interval2='1hour', limit='10'):
    data = datadb.execute(TOP_STATEMENTS_SQL, {'host_id': hostId,
                                               'interval1': interval1, 'interval2': interval2,
                                               'limit': limit})
    for d in data:
        d['avg_time_pretty'] = topsprocs.makeTimeIntervalReadable(d['avg_ms'])
        d['total_time_pretty'] = topsprocs.makeTimeIntervalReadable(d['total_ms'])

    return {x: list(y) for x, y in itertools.groupby(data, lambda x: x['mode'])}
Esempio n. 7
0
def getLocksReport(host_name, date1, date2):
    # IN p_from_date timestamp, IN p_from_hour integer, IN p_to_date timestamp, IN p_is_ignore_advisory boolean DEFAULT true,
    # OUT host_name text, OUT total_time_ss bigint, OUT threads_count bigint, OUT incidents_count bigint, OUT blocked_query text, OUT one_blocking_query text
    q_locks = '''
        select * from monitor_data.blocking_last_day(%s, %s)
        where (host_name = %s or %s = 'all')
        and total_time_ss > 5
        order by host_name, incidents_count desc
    '''
    return datadb.execute(q_locks, (date1, date2, host_name, host_name))
Esempio n. 8
0
def getBlockedProcessesCounts(hostId, days='8'):
    ret = []
    days += 'days'

    sql = """
with
q_wait_startpoints as (
     select
       date_trunc('hour'::text, query_start) + floor(date_part('minute'::text, query_start) / 15::double precision) * '00:15:00'::interval as wait_start_timestamp,
       count(1) as wait_starts
     from
     (
          select
            query_start,
            query,
            min(bp_timestamp) as wait_start,
            max(bp_timestamp) as wait_end,
            count(1)
          from
            monitor_data.blocking_processes
          where
            bp_host_id = """ + str(adapt(hostId)) + """
            and bp_timestamp > now() - """ + str(adapt(days)) + """::interval
            and waiting
          group by
            1, 2
          order by
            1, 2
     ) a
     where
       wait_end - wait_start >= '5 seconds'::interval
     group by
       date_trunc('hour'::text, query_start) + floor(date_part('minute'::text, query_start) / 15::double precision) * '00:15:00'::interval
),
q_timeline as (
     select * from (select generate_series(current_date - """ + str(adapt(days)) + """::interval, now(), '00:15:00'::interval) AS ts) a
     where ts > now() - """ + str(adapt(days)) + """::interval
)
SELECT
  date_trunc('hour'::text, q_timeline.ts) + floor(date_part('minute'::text, q_timeline.ts) / 15::double precision) * '00:15:00'::interval AS ts,
  coalesce(wait_starts, 0) as count
FROM
  q_timeline
  left join
  q_wait_startpoints on q_wait_startpoints.wait_start_timestamp = q_timeline.ts
ORDER BY
  1 ASC
            """

    for record in datadb.execute(sql):
        ret.append( (record['ts'] , record['count'] ) )

    return ret
Esempio n. 9
0
def get_all_dbnames():
    sql = """
    WITH RECURSIVE t(dbname) AS (
       SELECT MIN(dbname) AS dbname FROM metrics
       UNION
       SELECT (SELECT MIN(dbname) FROM metrics WHERE dbname > t.dbname)
       FROM t
    )
    SELECT dbname FROM t WHERE dbname NOTNULL ORDER BY 1;
    """
    ret, err = datadb.execute(sql, on_metric_store=True)
    if err:
        raise Exception('Failed to delete from "metric": ' + err)
    return [x['dbname'] for x in ret]
Esempio n. 10
0
def set_bulk_password(params, cmd_args):
    err = ''
    sql = """update pgwatch2.monitored_db set md_password_type = %(bulk_password_type)s,  md_password = %(bulk_password)s, md_last_modified_on = now() where (md_password, md_password_type) is distinct from (%(bulk_password)s, %(bulk_password_type)s)"""

    if params.get('bulk_password_type') == 'aes-gcm-256':    # NB! when changing this part also review insert/update_monitored_db()
        if not cmd_args.aes_gcm_keyphrase:
            return "Password encryption not possible as keyphrase/keyfile not specified on UI startup - use the PW2_AES_GCM_KEYPHRASE env. variable or --aes-gcm-keyphrase/ aes-gcm-keyphrase-file params", 0

        params['bulk_password'] = crypto.encrypt(cmd_args.aes_gcm_keyphrase, params.get('bulk_password'))

    ret, _ = datadb.execute(sql, params)
    if ret and len(ret) == 1:
        return err, ret[0]['rows_affected']
    return err, '0'
Esempio n. 11
0
def retrieve_bgwriter_stats(hostId, from_date, to_date=datetime.now()):
    """Loads the prepared performance indicators from monitordb and 
    returns a dataset with data points which can be displayed as graph"""
    
    sql="""select date_trunc('hour', sbd_timestamp) as sbd_timestamp,
       sum(elapsed) as elapsed,
       sum(checkpoints_timed) as checkpoints_timed,
       sum(checkpoints_req) as checkpoints_req,
       sum(buffers_checkpoint) as buffers_checkpoint,
       sum(buffers_clean) as buffers_clean,
       sum(buffers_backend) as buffers_backend,
       max(block_size) as block_size
  from (
    SELECT sbd_timestamp, sbd_timestamp - lead(sbd_timestamp, 1, sbd_timestamp) over hostpart AS elapsed,
      sbd_checkpoints_timed - lead(sbd_checkpoints_timed, 1, sbd_checkpoints_timed) OVER hostpart as checkpoints_timed,
      sbd_checkpoints_req - lead(sbd_checkpoints_req, 1, sbd_checkpoints_req) OVER hostpart as checkpoints_req,
      sbd_buffers_checkpoint - lead(sbd_buffers_checkpoint, 1, sbd_buffers_checkpoint) OVER hostpart as buffers_checkpoint,
      sbd_buffers_clean - lead(sbd_buffers_clean, 1, sbd_buffers_clean) OVER hostpart as buffers_clean,
      sbd_buffers_backend - lead(sbd_buffers_backend, 1, sbd_buffers_backend) OVER hostpart as buffers_backend,
      sbd_buffers_alloc - lead(sbd_buffers_alloc, 1, sbd_buffers_alloc) OVER hostpart as buffers_alloc,
      (select cast(current_setting('block_size') as integer)) as block_size
      FROM monitor_data.stat_bgwriter_data
     WHERE sbd_host_id = %(hostId)s
       AND sbd_timestamp > %(from)s
     WINDOW hostpart as (PARTITION BY sbd_host_id ORDER BY sbd_timestamp DESC)
      ) as a
where a.elapsed > '0 sec'::interval
group by date_trunc('hour', sbd_timestamp)
order by 1"""
    rows = datadb.execute(sql, {'hostId':hostId, 'from': from_date})
    result = {'avgWritesPerCheckpoint': [],
              'checkpointRequestPercentage': [],
              'checkpoint_write_percentage': [],
              'backend_write_percentage': [],
              'written_per_second': [],}
    for row in rows:
        checkpoints = (row['checkpoints_timed'] + row['checkpoints_req'])
        timepoint = time.mktime(row['sbd_timestamp'].timetuple()) * 1000
        total_buffer_writes = row['buffers_checkpoint'] + row['buffers_clean'] + row['buffers_backend']
        block_size = row['block_size']
        # there might be too many gathered data which result in 0 difference or somebody reseted statistics
        if checkpoints > 0:
            avg_chp_write = row['buffers_checkpoint'] *  block_size / checkpoints
            result['avgWritesPerCheckpoint'].append({'x': timepoint, 'y': avg_chp_write})
            result['checkpointRequestPercentage'].append({'x': timepoint, 'y': 100*row['checkpoints_req']/checkpoints})
        if total_buffer_writes > 0:
            result['checkpoint_write_percentage'].append({'x': timepoint, 'y': 100*row['buffers_checkpoint']/total_buffer_writes})
            result['backend_write_percentage'].append({'x': timepoint, 'y': 100*row['buffers_backend']/total_buffer_writes})
            result['written_per_second'].append({'x': timepoint, 'y': block_size*total_buffer_writes/row['elapsed'].seconds})
    return result
Esempio n. 12
0
def getStatStatements(uishortname,
                      date1=None,
                      date2=None,
                      order_by='1',
                      limit='50',
                      no_copy_ddl=True,
                      min_calls='3'):
    order_by = int(order_by) + 1
    sql = '''
        select
          ltrim(regexp_replace(query, E'[ \\t\\r]+' , ' ', 'g')) as query,
          calls,
          total_time,
          blks_read,
          blks_written,
          temp_blks_read,
          temp_blks_written,
          case when calls > 0 then round(total_time / calls::numeric) else null end as avg_runtime_ms,
          query_id,
          host_ui_shortname
        from (
        select
          max(ssd_query) as query,
          max(ssd_calls) - min(ssd_calls) as calls,
          max(ssd_total_time) - min(ssd_total_time) as total_time,
          max(ssd_blks_read) - min(ssd_blks_read) as blks_read,
          max(ssd_blks_written) - min(ssd_blks_written) as blks_written,
          max(ssd_temp_blks_read) - min(ssd_temp_blks_read) as temp_blks_read,
          max(ssd_temp_blks_written) - min(ssd_temp_blks_written) as temp_blks_written,
          ssd_query_id as query_id,
          host_ui_shortname
        from
          monitor_data.stat_statements_data
          join
          monitor_data.hosts on ssd_host_id = host_id
        where
          host_ui_shortname = %s
          and ssd_timestamp >= coalesce(%s, current_date-1) and ssd_timestamp < coalesce(%s, now())
          and case when %s then not upper(ssd_query) like any(array['COPY%%', 'CREATE%%']) else true end
        group by
          host_ui_shortname, ssd_query_id
        ) a
        where
           calls >= %s::int
        order by ''' + str(order_by) + '''
          desc nulls last
        limit ''' + limit
    return datadb.execute(
        sql,
        (uishortname, date1, date2, True if no_copy_ddl else False, min_calls))
Esempio n. 13
0
def update_preset_config(params):
    sql = """
        update
          pgwatch2.preset_config
        set
          pc_description = %(pc_description)s,
          pc_config = %(pc_config)s,
          pc_last_modified_on = now()
        where
          pc_name = %(pc_name)s
    """
    ret, err = datadb.execute(sql, params)
    if err:
        raise Exception('Failed to update "preset_config": ' + err)
Esempio n. 14
0
def getGetActiveFrontendAnnouncementIfAny():
    announcement = None
    sql = """
      SELECT fa_announcement_text FROM frontpage_announcement WHERE fa_validity_range @> now()::timestamp;
    """

    try:
        announcement = datadb.execute(sql)
        if announcement:
            announcement = announcement[0]['fa_announcement_text']
    except:
        print('Exception reading frontpage_announcement table. is it there?')

    return announcement
Esempio n. 15
0
def get_all_metrics():
    sql = """
        select
          m_id, m_name, m_pg_version_from, m_sql, m_sql_su, coalesce(m_comment, '') as m_comment, m_is_active, m_is_helper,
          date_trunc('second', m_last_modified_on::timestamp) as m_last_modified_on, m_master_only, m_standby_only,
          coalesce(m_column_attrs::text, '') as m_column_attrs, coalesce(ma_metric_attrs::text, '') as ma_metric_attrs
        from
          pgwatch2.metric
          left join
          pgwatch2.metric_attribute on (ma_metric_name = m_name)
        order by
          m_is_active desc, m_name, m_pg_version_from
    """
    return datadb.execute(sql)[0]
Esempio n. 16
0
def getSingleSprocData(hostId, name, interval=None):
    data = { 'calls' : [], 'self_time': [], 'total_time' : [] , 'avg_time' : [] , 'avg_self_time': [] , 'name' : None }
    if name:
        dataset = datadb.execute( getSingleSprocSQL(hostId, name, interval ) )
        for r in dataset:
            if not data['name']:
                data['name'] = r['name']    # in case sproc is present in multiple apis the name from 1st api will be used
            data['calls'].append( ( r['xaxis'] , r['d_calls'] ) )
            data['total_time'].append ( ( r['xaxis'] , r['d_total_time'] ) )
            data['self_time'].append ( ( r['xaxis'] , r['d_self_time'] ) )
            data['avg_time'].append ( ( r['xaxis'] , r['d_avg_time'] ) )
            data['avg_self_time'].append ( ( r['xaxis'] , r['d_avg_self_time'] ) )

    return data
Esempio n. 17
0
def getHostsDataForConnecting(hostname='all'):
    q_active_hosts="""
        select
            host_id,
            host_name,
            host_port,
            host_user,
            host_password,
            host_db
        from monitor_data.hosts
        where host_enabled
        and (%s = 'all' or host_name=%s)
        """
    return datadb.execute(q_active_hosts, (hostname, hostname))
Esempio n. 18
0
def getGetActiveFrontendAnnouncementIfAny():
    announcement = None
    sql = """
      SELECT fa_announcement_text FROM frontpage_announcement WHERE fa_validity_range @> now()::timestamp;
    """

    try:
        announcement = datadb.execute(sql)
        if announcement:
            announcement = announcement[0]['fa_announcement_text']
    except:
        print('Exception reading frontpage_announcement table. is it there?')

    return announcement
Esempio n. 19
0
def getCpuLoad(hostId, days='8'):
    load = { "load_15min_avg" : [] , "load_15min_max" : [] }
    days += 'days'
    sql = """ SELECT date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
                     AVG(load_1min_value) AS load_15min_avg,
                     MAX(load_1min_value) AS load_15min_max
                FROM monitor_data.host_load WHERE load_host_id = """ + str(adapt(hostId)) + """ AND load_timestamp > now() - """ + str(adapt(days)) + """::interval
                GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
                ORDER BY 1 ASC """

    for record in datadb.execute(sql):
        load['load_15min_avg'].append( (record['load_timestamp'] , round( float(record['load_15min_avg'])/100,2) ) )
        load['load_15min_max'].append( (record['load_timestamp'] , round( float(record['load_15min_max'])/100,2) ) )

    return load
Esempio n. 20
0
def getAllActiveSprocNames(hostId):
    sql = """
    select
      distinct regexp_replace(sproc_name,'(\(.*\))','') as sproc_name
    from
      sprocs
      join sproc_performance_data on sp_sproc_id = sproc_id
    where sproc_host_id = %s
      and sp_host_id = %s
      and sp_timestamp > now() - '1 day'::interval
    """
    ret = datadb.execute(sql, (hostId, hostId))
    ret = [ x['sproc_name'] for x in ret ]
    ret.sort()
    return ret
Esempio n. 21
0
def get_all_monitored_dbs():
    sql = """
        select
          *,
          date_trunc('second', md_last_modified_on) as md_last_modified_on,
          md_config::text,
          md_custom_tags::text,
          coalesce(md_include_pattern, '') as md_include_pattern,
          coalesce(md_exclude_pattern, '') as md_exclude_pattern
        from
          pgwatch2.monitored_db
        order by
          md_is_enabled desc, md_id
    """
    return datadb.execute(sql)[0]
Esempio n. 22
0
def exec_for_time_pairs(sql, dbname, pairs, decimal_digits=2):
    ret = []
    for time_window, agg_interval in pairs:
        data, err = datadb.execute(sql, {
            'dbname': dbname,
            'time_window': time_window,
            'agg_interval': agg_interval,
            'decimal_digits': decimal_digits
        },
                                   on_metric_store=True,
                                   quiet=True)
        # print(data, err)
        if not err and data:
            ret.append(data[0]['metric'])
    return ret
Esempio n. 23
0
def get_active_db_uniques():
    sql = """
        select
          md_unique_name
        from
          pgwatch2.monitored_db
        where
          md_is_enabled
        order by
          1
    """
    ret, err = datadb.execute(sql)
    if ret:
        return [x['md_unique_name'] for x in ret]
    return []
Esempio n. 24
0
def insert_metric(params):
    sql = """
        insert into
          pgwatch2.metric (m_name, m_pg_version_from, m_sql, m_comment, m_is_active, m_is_helper, m_master_only, m_standby_only, m_column_attrs)
        values
          (%(m_name)s, %(m_pg_version_from)s, %(m_sql)s, %(m_comment)s, %(m_is_active)s, %(m_is_helper)s, %(m_master_only)s, %(m_standby_only)s, , %(m_column_attrs)s)
        returning m_id
    """
    cherrypy_checkboxes_to_bool(
        params,
        ['m_is_active', 'm_is_helper', 'm_master_only', 'm_standby_only'])
    ret, err = datadb.execute(sql, params)
    if err:
        raise Exception('Failed to insert into "metric": ' + err)
    return ret[0]['m_id']
Esempio n. 25
0
def find_top_growth_statements(dbname, sort_column, start_time=(datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z',
                               end_time=datetime.utcnow().isoformat() + 'Z', limit=20):
    """start_time/end_time expect UTC date inputs currently!"""
    if sort_column not in STATEMENT_SORT_COLUMNS:
        raise Exception('unknown sort column: ' + sort_column)
    ret = []        # list of dicts with all columns from "stat_statements"
    sql = r"""
    select
      queryid,
      query,
      round(sum(total_time - total_time_lag)::numeric, 2) as total_time,
      round(avg((total_time - total_time_lag)/(calls - calls_lag))::numeric, 2) as mean_time,
      sum(calls - calls_lag) as calls,
      sum(shared_blks_hit - shared_blks_hit_lag) as shared_blks_hit,
      sum(shared_blks_read - shared_blks_read_lag) as shared_blks_read,
      sum(shared_blks_written - shared_blks_written_lag) as shared_blks_written,
      sum(temp_blks_written - temp_blks_written_lag) as temp_blks_written,
      round(sum(blk_read_time - blk_read_time_lag)::numeric, 2) as blk_read_time,
      round(sum(blk_write_time - blk_write_time_lag)::numeric, 2) as blk_write_time
    from (
      select
        tag_data->>'queryid' as queryid,
        tag_data->>'query' as query,
        (data->>'total_time')::float8 as total_time, lag((data->>'total_time')::float8) over w as total_time_lag,
        (data->>'calls')::float8 as calls, lag((data->>'calls')::float8) over w as calls_lag,
        (data->>'shared_blks_hit')::float8 as shared_blks_hit, lag((data->>'shared_blks_hit')::float8) over w as shared_blks_hit_lag,
        (data->>'shared_blks_read')::float8 as shared_blks_read, lag((data->>'shared_blks_read')::float8) over w as shared_blks_read_lag,
        (data->>'shared_blks_written')::float8 as shared_blks_written, lag((data->>'shared_blks_written')::float8) over w as shared_blks_written_lag,
        (data->>'temp_blks_read')::float8 as temp_blks_read, lag((data->>'temp_blks_read')::float8) over w as temp_blks_read_lag,
        (data->>'temp_blks_written')::float8 as temp_blks_written, lag((data->>'temp_blks_written')::float8) over w as temp_blks_written_lag,
        (data->>'blk_read_time')::float8 as blk_read_time, lag((data->>'blk_read_time')::float8) over w as blk_read_time_lag,
        (data->>'blk_write_time')::float8 as blk_write_time, lag((data->>'blk_write_time')::float8) over w as blk_write_time_lag
      from stat_statements
        where dbname = %(dbname)s
        and time between %(start_time)s and %(end_time)s
        and not tag_data->>'query' ~* E'\\(extract\\(\\$\\d+\\W*from\\W*now\\(\\)\\)\\W?\\*\\W*\\$\\d+\\).*::\\w+\\W+as\\W+epoch_ns\\W*,'
        and not tag_data->>'query' ~* E'/\\*\\W*pgwatch2_generated\\W*\\*/'
      window w as (partition by tag_data->>'queryid' order by time)
    ) x
    where calls > calls_lag
    group by 1, 2
    order by {sort_column} desc
    limit %(limit)s
    """
    data, _ = datadb.execute(sql.format(sort_column=sort_column), {'dbname': dbname, 'start_time': start_time, 'end_time': end_time, 'limit': limit}, on_metric_store=True)
    return data
Esempio n. 26
0
def getAllActiveSprocNames(hostId, active_days=1):
    active_days = str(adapt(active_days)) + ' days'
    sql = """
    select
      distinct regexp_replace(sproc_name, E'(\\\\(.*\\\\))', '') as sproc_name
    from
      sprocs
      join sproc_performance_data on sp_sproc_id = sproc_id
    where sproc_host_id = %(host_id)s
      and sp_host_id = %(host_id)s
      and sp_timestamp > now() - %(active_days)s::interval
    order by
      1
    """
    ret = datadb.execute(sql, {'host_id': hostId, 'active_days': active_days})
    ret = [x['sproc_name'] for x in ret]
    return ret
Esempio n. 27
0
def update_metric(params):
    sql = """
        update
          pgwatch2.metric
        set
          m_name = %(m_name)s,
          m_pg_version_from = %(m_pg_version_from)s,
          m_sql = %(m_sql)s,
          m_is_active = %(m_is_active)s,
          m_last_modified_on = now()
        where
          m_id = %(m_id)s
    """
    cherrypy_checkboxes_to_bool(params, ['m_is_active'])
    ret, err = datadb.execute(sql, params)
    if err:
        raise Exception('Failed to update "metric": ' + err)
Esempio n. 28
0
def getWalVolumes(hostId, days='8'):
    load = { "wal_15min_growth" : []}
    days += 'days'

    sql = """
            SELECT 
                date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
                coalesce(max(xlog_location_mb)-min(xlog_location_mb),0)  AS wal_15min_growth
            FROM monitor_data.host_load WHERE load_host_id = """ + str(adapt(hostId)) + """ AND load_timestamp > ('now'::timestamp - """ + str(adapt(days)) + """::interval)
            GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
            ORDER BY 1 ASC
            """

    for record in datadb.execute(sql):
        load['wal_15min_growth'].append( (record['load_timestamp'] , record['wal_15min_growth'] ) )

    return load
Esempio n. 29
0
def get_monitored_db_by_id(id):
    sql = """
        select
          *,
          date_trunc('second', md_last_modified_on) as md_last_modified_on,
          md_config::text,
          md_custom_tags::text,
          coalesce(md_include_pattern, '') as md_include_pattern,
          coalesce(md_exclude_pattern, '') as md_exclude_pattern
        from
          pgwatch2.monitored_db
        where
          md_id = %s
    """
    data, err = datadb.execute(sql, (id, ))
    if not data:
        return None
    return data[0]
Esempio n. 30
0
def insert_monitored_db(params):
    sql = """
        insert into
          pgwatch2.monitored_db (md_unique_name, md_hostname, md_port, md_dbname, md_user, md_password,
          md_sslmode, md_is_enabled, md_preset_config_name, md_config, md_statement_timeout_seconds)
        values
          (%(md_unique_name)s, %(md_hostname)s, %(md_port)s, %(md_dbname)s, %(md_user)s, %(md_password)s,
          %(md_sslmode)s, %(md_is_enabled)s, %(md_preset_config_name)s, %(md_config)s, %(md_statement_timeout_seconds)s)
        returning
          md_id
    """
    cherrypy_checkboxes_to_bool(params, ['md_is_enabled', 'md_sslmode'])
    cherrypy_empty_text_to_nulls(params,
                                 ['md_preset_config_name', 'md_config'])
    ret, err = datadb.execute(sql, params)
    if err:
        raise Exception('Failed to insert into "monitored_db": ' + err)
    return ret[0]['md_id']
Esempio n. 31
0
def getTopStatementsData(hostId,
                         interval1='3hours',
                         interval2='1hour',
                         limit='10'):
    data = datadb.execute(
        TOP_STATEMENTS_SQL, {
            'host_id': hostId,
            'interval1': interval1,
            'interval2': interval2,
            'limit': limit
        })
    for d in data:
        d['avg_time_pretty'] = topsprocs.makeTimeIntervalReadable(d['avg_ms'])
        d['total_time_pretty'] = topsprocs.makeTimeIntervalReadable(
            d['total_ms'])

    return {
        x: list(y)
        for x, y in itertools.groupby(data, lambda x: x['mode'])
    }
Esempio n. 32
0
def getWalVolumes(hostId, days='8'):
    load = {"wal_15min_growth": []}
    days += 'days'

    sql = """
            SELECT 
                date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
                coalesce(max(xlog_location_mb)-min(xlog_location_mb),0)  AS wal_15min_growth
            FROM monitor_data.host_load WHERE load_host_id = """ + str(
        adapt(
            hostId)) + """ AND load_timestamp > ('now'::timestamp - """ + str(
                adapt(days)) + """::interval)
            GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
            ORDER BY 1 ASC
            """

    for record in datadb.execute(sql):
        load['wal_15min_growth'].append(
            (record['load_timestamp'], record['wal_15min_growth']))

    return load
Esempio n. 33
0
def getCpuLoad(hostId, days='8'):
    load = {"load_15min_avg": [], "load_15min_max": []}
    days += 'days'
    sql = """ SELECT date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval AS load_timestamp,
                     AVG(load_1min_value) AS load_15min_avg,
                     MAX(load_1min_value) AS load_15min_max
                FROM monitor_data.host_load WHERE load_host_id = """ + str(
        adapt(hostId)) + """ AND load_timestamp > now() - """ + str(
            adapt(days)) + """::interval
                GROUP BY date_trunc('hour'::text, load_timestamp) + floor(date_part('minute'::text, load_timestamp) / 15::double precision) * '00:15:00'::interval
                ORDER BY 1 ASC """

    for record in datadb.execute(sql):
        load['load_15min_avg'].append(
            (record['load_timestamp'],
             round(float(record['load_15min_avg']) / 100, 2)))
        load['load_15min_max'].append(
            (record['load_timestamp'],
             round(float(record['load_15min_max']) / 100, 2)))

    return load
Esempio n. 34
0
def getStatStatements(host_name, date1=None, date2=None, order_by='1', limit='50', no_copy_ddl=True, min_calls='3'):
    order_by = int(order_by) + 1
    sql = '''
select
  query,
  calls,
  total_time,
  blks_read,
  blks_written,
  temp_blks_read,
  temp_blks_written,
  case when calls > 0 then round(total_time / calls::numeric) else null end as avg_runtime_ms,
  query_id
from (
select
  max(ssd_query) as query,
  max(ssd_calls) - min(ssd_calls) as calls,
  max(ssd_total_time) - min(ssd_total_time) as total_time,
  max(ssd_blks_read) - min(ssd_blks_read) as blks_read,
  max(ssd_blks_written) - min(ssd_blks_written) as blks_written,
  max(ssd_temp_blks_read) - min(ssd_temp_blks_read) as temp_blks_read,
  max(ssd_temp_blks_written) - min(ssd_temp_blks_written) as temp_blks_written,
  ssd_query_id as query_id
from
  monitor_data.stat_statements_data
  join
  monitor_data.hosts on ssd_host_id = host_id
where
  host_name = %s
  and ssd_timestamp >= coalesce(%s, current_date-1) and ssd_timestamp < coalesce(%s, now())
  and case when %s then not upper(ssd_query) like any(array['COPY%%', 'CREATE%%']) else true end
group by
  ssd_query_id
) a
where
   calls >= %s::int
order by ''' + str(order_by) + '''
  desc nulls last
limit ''' + limit
    return datadb.execute(sql, (host_name, date1, date2, True if no_copy_ddl else False, min_calls))
Esempio n. 35
0
def getStatStatementsGraph(hostid, query_id, date1, date2):
    sql = """
select
  ssd_query_id as query_id,
  ssd_timestamp as timestamp,
  ssd_query as query,
  ssd_calls  as calls,
  ssd_total_time as total_time,
  ssd_blks_read as blks_read,
  ssd_blks_written as blks_written,
  ssd_temp_blks_read as temp_blks_read,
  ssd_temp_blks_written as temp_blks_written
from
  monitor_data.stat_statements_data
where
  ssd_host_id = %s
  and ssd_query_id = %s
  and ssd_timestamp between %s and %s
order by
  ssd_timestamp
    """
    return datadb.execute(sql, (hostid, query_id, date1, date2))
Esempio n. 36
0
def getStatStatementsGraph(hostid, query_id, date1, date2):
    sql = """
select
  ssd_query_id as query_id,
  ssd_timestamp as timestamp,
  ssd_query as query,
  ssd_calls  as calls,
  ssd_total_time as total_time,
  ssd_blks_read as blks_read,
  ssd_blks_written as blks_written,
  ssd_temp_blks_read as temp_blks_read,
  ssd_temp_blks_written as temp_blks_written
from
  monitor_data.stat_statements_data
where
  ssd_host_id = %s
  and ssd_query_id = %s
  and ssd_timestamp between %s and %s
order by
  ssd_timestamp
    """
    return datadb.execute(sql, (hostid, query_id, date1, date2))
Esempio n. 37
0
def update_metric(params):
    sql = """
        update
          pgwatch2.metric
        set
          m_name = %(m_name)s,
          m_pg_version_from = %(m_pg_version_from)s,
          m_sql = %(m_sql)s,
          m_comment = %(m_comment)s,
          m_is_active = %(m_is_active)s,
          m_is_helper = %(m_is_helper)s,
          m_master_only = %(m_master_only)s,
          m_standby_only = %(m_standby_only)s,
          m_column_attrs = %(m_column_attrs)s,
          m_last_modified_on = now()
        where
          m_id = %(m_id)s
    """
    cherrypy_checkboxes_to_bool(params, ['m_is_active', 'm_is_helper', 'm_master_only', 'm_standby_only'])
    ret, err = datadb.execute(sql, params)
    if err:
        raise Exception('Failed to update "metric": ' + err)
Esempio n. 38
0
def getLoadReportDataDailyAvg(hostId, weeks=10):
    query = """
            select
              *
            from (
            select
              load_timestamp::date::timestamp as date,
              round(avg(load_15min_value)/100,2) AS cpu_15,
              (max(xlog_location_mb) - min(xlog_location_mb)) * 10^6  as wal_b
            from
              monitor_data.host_load
            where
              load_host_id = %(host_id)s
              and load_timestamp > now() - '""" + str(weeks) + """ weeks'::interval
            group by
              load_timestamp::date
            having
              max(xlog_location_mb) >= min(xlog_location_mb)
            ) a
            order by
              1
            """
    return datadb.execute(query, {'host_id': hostId})
Esempio n. 39
0
def getLoadReportDataDailyAvg(hostId, weeks=10):
    query = """
            select
              *
            from (
            select
              load_timestamp::date::timestamp as date,
              round(avg(load_15min_value)/100,2) AS cpu_15,
              (max(xlog_location_mb) - min(xlog_location_mb)) * 10^6  as wal_b
            from
              monitor_data.host_load
            where
              load_host_id = %(host_id)s
              and load_timestamp > now() - '""" + str(
        weeks) + """ weeks'::interval
            group by
              load_timestamp::date
            having
              max(xlog_location_mb) >= min(xlog_location_mb)
            ) a
            order by
              1
            """
    return datadb.execute(query, {'host_id': hostId})
Esempio n. 40
0
def get_schema_usage_for_host(uishortname, date1, date2, filter=''):
    sql = """
      select
        sud_schema_name as schema_name,
        sud_timestamp::date AS date,
        array[max(sud_sproc_calls) - min(sud_sproc_calls), (max(sud_seq_scans) + max(sud_idx_scans)) - (min(sud_seq_scans) + min(sud_idx_scans)),
          max(sud_tup_ins) - min(sud_tup_ins),  max(sud_tup_upd) - min(sud_tup_upd),  max(sud_tup_del) - min(sud_tup_del)] as daily_counts
      from
        monitor_data.schema_usage_data
      where
        sud_host_id = (select host_id from monitor_data.hosts where host_ui_shortname = %s)
        and sud_timestamp between %s and %s
        and sud_schema_name like '%%'||%s||'%%'
      group by
        sud_schema_name, sud_timestamp::date
      order by
        1, 2
        """
    usage = datadb.execute(sql, (uishortname, date1, date2, filter))
    ret = OrderedDict()
    for u in usage:
        if u['schema_name'] not in ret: ret[u['schema_name']] = []
        ret[u['schema_name']].append((u['date'], u['daily_counts']))
    return ret
Esempio n. 41
0
def get_schema_usage_for_host(host_name, date1, date2, filter=''):
    sql = """
      select
        sud_schema_name as schema_name,
        sud_timestamp::date AS date,
        array[max(sud_sproc_calls) - min(sud_sproc_calls), (max(sud_seq_scans) + max(sud_idx_scans)) - (min(sud_seq_scans) + min(sud_idx_scans)),
          max(sud_tup_ins) - min(sud_tup_ins),  max(sud_tup_upd) - min(sud_tup_upd),  max(sud_tup_del) - min(sud_tup_del)] as daily_counts
      from
        monitor_data.schema_usage_data
      where
        sud_host_id = (select host_id from monitor_data.hosts where host_name = %s)
        and sud_timestamp between %s and %s
        and sud_schema_name like '%%'||%s||'%%'
      group by
        sud_schema_name, sud_timestamp::date
      order by
        1, 2
        """
    usage = datadb.execute(sql, (host_name, date1, date2, filter))
    ret = OrderedDict()
    for u in usage:
        if u['schema_name'] not in ret: ret[u['schema_name']] = []
        ret[u['schema_name']].append((u['date'], u['daily_counts']))
    return ret
Esempio n. 42
0
def getTopTables(hostId=1, limit=10, order=2):
    limit_sql = "" if limit is None else """ LIMIT """ + str(adapt(limit))

    order_by_sql = { 1: "ORDER BY schema ASC,name ASC ",
              2: "ORDER BY table_size DESC" ,
              3: "ORDER BY table_size - min_table_size DESC",
              4: "ORDER BY CASE WHEN min_table_size > 0 THEN table_size::float / min_table_size ELSE 0 END DESC",
              5: "ORDER BY index_size DESC",
              6: "ORDER BY index_size - min_index_size DESC",
              7: "ORDER BY CASE WHEN min_index_size > 0 THEN index_size::float / min_index_size ELSE 0 END DESC",
              8: "ORDER BY iud_delta DESC"}[int(order)]

    sql = """
        with
        q_min_max_timestamps AS (
              SELECT
                MIN(tsd_timestamp) AS min_date,
                MAX(tsd_timestamp) AS max_date
              FROM monitor_data.table_size_data
              WHERE tsd_host_id = %s
              AND tsd_timestamp > now() - '7 days'::interval
        ),
        q_min_sizes AS (
              SELECT
                tsd_table_id,
                tsd_table_size as min_table_size,
                tsd_index_size as min_index_size,
                tsd_tup_ins + tsd_tup_upd + tsd_tup_del as min_iud
              FROM
                monitor_data.table_size_data st
                JOIN q_min_max_timestamps on true
              WHERE
                st.tsd_host_id = %s
                AND st.tsd_timestamp = q_min_max_timestamps.min_date
        ),
        q_max_sizes AS (
              SELECT
                tsd_table_id,
                tsd_table_size as max_table_size,
                tsd_index_size as max_index_size,
                tsd_tup_ins + tsd_tup_upd + tsd_tup_del as max_iud
              FROM
                monitor_data.table_size_data st
                JOIN q_min_max_timestamps on true
              WHERE
                st.tsd_host_id = %s
                AND st.tsd_timestamp = q_min_max_timestamps.max_date
        )
        SELECT
        *
        FROM (
        SELECT
          t_schema AS schema,
          t_name AS name,
          q_max_sizes.max_table_size AS table_size,
          COALESCE(q_min_sizes.min_table_size, 0) AS min_table_size,
          q_max_sizes.max_table_size - COALESCE(q_min_sizes.min_table_size, 0) AS table_size_delta,
          q_max_sizes.max_index_size AS index_size,
          COALESCE(q_min_sizes.min_index_size, 0) AS min_index_size,
          q_max_sizes.max_index_size - COALESCE(q_min_sizes.min_index_size, 0) AS index_size_delta,
          q_max_sizes.max_iud - COALESCE(q_min_sizes.min_iud, 0) AS iud_delta
        FROM
          q_max_sizes
          LEFT JOIN
          q_min_sizes ON q_min_sizes.tsd_table_id = q_max_sizes.tsd_table_id
          JOIN
          monitor_data.tables ON t_id = q_max_sizes.tsd_table_id
        WHERE
          t_host_id = %s
        ) t
        """ + order_by_sql + limit_sql

    list = datadb.execute(sql, (hostId, hostId, hostId, hostId))
    for d in list:

        d['table_size_pretty'] = makePrettySize( d['table_size'] )
        d['index_size_pretty'] = makePrettySize( d['index_size'] )
        d['table_size_delta'] = makePrettySize( d['table_size_delta'] )
        d['index_size_delta'] = makePrettySize( d['index_size_delta'] )
        if d['min_table_size'] > 0:
            d['growth'] = round( ( ( float(d['table_size']) / d['min_table_size'] ) - 1) * 100 , 1 )
        else:
            d['growth'] = 0

        if d['min_index_size'] > 0:
            d['growth_index'] = round( ( ( float(d['index_size']) / d['min_index_size']) - 1) * 100 , 1 )
        else:
            d['growth_index'] = 0
        d['iud_delta'] =  makePrettyCounter(d['iud_delta'])

    return list
Esempio n. 43
0
    def get_last_loads_and_sizes(self):
        yyyymm = datetime.datetime.now().strftime('%Y%m')
        sql = """
                with
                q_last_5m_loads as (
                  select
                    distinct on (load_host_id)
                    load_host_id as host_id,
                    round(load_5min_value/100.0,1) as last_5min_load
                  from
                    monitor_data_partitions.host_load_""" + yyyymm + """
                  where
                    load_timestamp > now() - '1 day'::interval
                  order by
                    load_host_id, load_timestamp desc
                ),
                q_size as (
                  select
                    distinct on (tsda_host_id)
                    tsda_host_id as host_id,
                    pg_size_pretty(tsda_db_size) last_agg_size
                  from
                    monitor_data_partitions.table_size_data_agg_""" + yyyymm + """
                  where
                   tsda_timestamp > now() - '1 day'::interval
                  order by
                    tsda_host_id, tsda_timestamp desc
                )
                select
                  h.host_id,
                  h.host_ui_shortname,
                  h.host_ui_longname,
                  hg.group_name,
                  coalesce(q_last_5m_loads.last_5min_load::text, '-') as last_5min_load,
                  coalesce(q_size.last_agg_size::text, '-') as last_agg_size
                from
                  hosts h
                  left join
                    q_last_5m_loads
                      on q_last_5m_loads.host_id = h.host_id
                  left join
                    q_size
                      on q_size.host_id = h.host_id
                  left join
                    host_groups hg
                      on hg.group_id = h.host_group_id
                where
                  host_enabled
                order by
                  case when hg.group_id = 0 then 0 else 1 end, hg.group_name nulls first, h.host_ui_longname
        """
        sql_no_aggr = """
            select
              h.host_id,
              h.host_ui_shortname,
              h.host_ui_longname,
              hg.group_name,
              coalesce((select round(load_5min_value/100.0,1)::text from host_load where load_host_id = h.host_id and load_timestamp > (now() - '1day'::interval) order by load_timestamp desc limit 1), '-') as last_5min_load,
              '-' as last_agg_size
            from
              hosts h
              left join
              host_groups hg on hg.group_id = h.host_group_id
            where
              host_enabled
            order by
              case when hg.group_id = 0 then 0 else 1 end, hg.group_name nulls first, h.host_ui_longname
        """

        if tplE._settings.get('run_aggregations'):
            return datadb.execute(sql)
        else:
            return datadb.execute(sql_no_aggr) # no "last db size"
Esempio n. 44
0
def getTopTables(hostId, date_from, date_to, order=None, limit=10, pattern=''):
    limit_sql = "" if limit is None else """ LIMIT """ + str(adapt(limit))
    if not order:
        order = 2   # size

    order_by_sql = { 1: "ORDER BY schema ASC,name ASC ",
              2: "ORDER BY table_size DESC" ,
              3: "ORDER BY table_size - min_table_size DESC",
              4: "ORDER BY CASE WHEN min_table_size > 0 THEN table_size::float / min_table_size ELSE 0 END DESC",
              5: "ORDER BY index_size DESC",
              6: "ORDER BY index_size - min_index_size DESC",
              7: "ORDER BY CASE WHEN min_index_size > 0 THEN index_size::float / min_index_size ELSE 0 END DESC",
              8: "ORDER BY iud_delta DESC",
              9: "ORDER BY s_delta DESC",
              10: "ORDER BY i_delta DESC",
              11: "ORDER BY u_delta DESC",
              12: "ORDER BY d_delta DESC"
            }[int(order)]

    sql = """
        with
        q_min_max_timestamps AS (
              SELECT
                tsd_host_id as host_id,
                MIN(tsd_timestamp) AS min_date,
                MAX(tsd_timestamp) AS max_date
              FROM monitor_data.table_size_data
              JOIN monitor_data.hosts h ON h.host_id = tsd_host_id
              WHERE (%s is null or tsd_host_id = %s)
              AND tsd_timestamp >= %s::timestamp
              AND tsd_timestamp <= %s::timestamp
              GROUP BY 1
        ),
        q_min_sizes AS (
              SELECT
                tsd_host_id,
                tsd_table_id,
                tsd_table_size as min_table_size,
                tsd_index_size as min_index_size,
                tsd_tup_ins + tsd_tup_upd + tsd_tup_del as min_iud,
                tsd_seq_scans as min_s,
                tsd_tup_ins as min_i,
                tsd_tup_upd as min_u,
                tsd_tup_del as min_d
              FROM
                monitor_data.table_size_data st
                JOIN q_min_max_timestamps on q_min_max_timestamps.host_id = st.tsd_host_id
              WHERE
                st.tsd_timestamp = q_min_max_timestamps.min_date
        ),
        q_max_sizes AS (
              SELECT
                tsd_host_id,
                tsd_table_id,
                tsd_table_size as max_table_size,
                tsd_index_size as max_index_size,
                tsd_tup_ins + tsd_tup_upd + tsd_tup_del as max_iud,
                tsd_seq_scans as max_s,
                tsd_tup_ins as max_i,
                tsd_tup_upd as max_u,
                tsd_tup_del as max_d
              FROM
                monitor_data.table_size_data st
                JOIN q_min_max_timestamps on q_min_max_timestamps.host_id = st.tsd_host_id
              WHERE
                st.tsd_timestamp = q_min_max_timestamps.max_date
        )
        SELECT
        *
        FROM (
        SELECT
          q_max_sizes.tsd_host_id as host_id,
          t_schema AS schema,
          t_name AS name,
          q_max_sizes.max_table_size AS table_size,
          COALESCE(q_min_sizes.min_table_size, 0) AS min_table_size,
          q_max_sizes.max_table_size - COALESCE(q_min_sizes.min_table_size, 0) AS table_size_delta,
          q_max_sizes.max_index_size AS index_size,
          COALESCE(q_min_sizes.min_index_size, 0) AS min_index_size,
          q_max_sizes.max_index_size - COALESCE(q_min_sizes.min_index_size, 0) AS index_size_delta,
          q_max_sizes.max_iud - COALESCE(q_min_sizes.min_iud, 0) AS iud_delta,
          q_max_sizes.max_s - COALESCE(q_min_sizes.min_s, 0) AS s_delta,
          q_max_sizes.max_i - COALESCE(q_min_sizes.min_i, 0) AS i_delta,
          q_max_sizes.max_u - COALESCE(q_min_sizes.min_u, 0) AS u_delta,
          q_max_sizes.max_d - COALESCE(q_min_sizes.min_d, 0) AS d_delta
        FROM
          q_max_sizes
          LEFT JOIN
          q_min_sizes ON q_min_sizes.tsd_table_id = q_max_sizes.tsd_table_id
          JOIN
          monitor_data.tables ON t_id = q_max_sizes.tsd_table_id
        ) t
         WHERE name ilike %s
        """ + order_by_sql + limit_sql

    pattern = '%' + pattern + '%'

    list = datadb.execute(sql, (hostId, hostId, date_from, date_to, pattern))
    for d in list:

        d['table_size_pretty'] = makePrettySize( d['table_size'] )
        d['index_size_pretty'] = makePrettySize( d['index_size'] )
        d['table_size_delta'] = makePrettySize( d['table_size_delta'] )
        d['index_size_delta'] = makePrettySize( d['index_size_delta'] )
        if d['min_table_size'] > 0:
            d['growth'] = round( ( ( float(d['table_size']) / d['min_table_size'] ) - 1) * 100 , 1 )
        else:
            d['growth'] = 0

        if d['min_index_size'] > 0:
            d['growth_index'] = round( ( ( float(d['index_size']) / d['min_index_size']) - 1) * 100 , 1 )
        else:
            d['growth_index'] = 0
        d['iud_delta'] =  makePrettyCounter(d['iud_delta'])
        d['s_delta'] =  makePrettyCounter(d['s_delta'])
        d['i_delta'] =  makePrettyCounter(d['i_delta'])
        d['u_delta'] =  makePrettyCounter(d['u_delta'])
        d['d_delta'] =  makePrettyCounter(d['d_delta'])
        # d['hostuiname'] = hosts.getHostData()[]['host_ui_longname']

    return list
Esempio n. 45
0
def getDatabaseStatistics(hostid, days='8'):
    days += 'days'
    sql = """
        select
          sdd_timestamp,
          sdd_numbackends,
          sdd_xact_commit,
          sdd_xact_rollback,
          sdd_blks_read,
          sdd_blks_hit,
          sdd_temp_files,
          sdd_temp_bytes,
          sdd_deadlocks,
          sdd_blk_read_time,
          sdd_blk_write_time
        from
          monitor_data.stat_database_data
        where
          sdd_host_id = %s
          and sdd_timestamp >= current_date - %s::interval
        order by
          sdd_timestamp
    """
    data = datadb.execute(sql, (hostid, days))
    ret = []
    prev_row = None
    for row in data:
        rr = {}
        if prev_row:
            rr['timestamp'] = row['sdd_timestamp']

            rr['numbackends'] = row['sdd_numbackends']
            # commit_delta = max(row['sdd_xact_commit'] - prev_row['sdd_xact_commit'], 0) # max() is for cases where stats are reset
            rollback_delta = max(row['sdd_xact_rollback'] - prev_row['sdd_xact_rollback'], 0)
            rr['rollbacks'] = rollback_delta
            # blks_read_delta = max(row['sdd_blks_read'] - prev_row['sdd_blks_read'], 0)
            # blks_hit_delta = max(row['sdd_blks_hit'] - prev_row['sdd_blks_hit'], 0)
            # blk_read_time_delta = max(row['sdd_blk_read_time'] - prev_row['sdd_blk_read_time'], 0)
            # blk_write_time_delta = max(row['sdd_blk_write_time'] - prev_row['sdd_blk_write_time'], 0)
            #rr['temp_files'] = max(row['sdd_temp_files'] - prev_row['sdd_temp_files'], 0)
            rr['temp_files_bytes'] = max(row['sdd_temp_bytes'] - prev_row['sdd_temp_bytes'], 0)
            rr['deadlocks'] = max(row['sdd_deadlocks'] - prev_row['sdd_deadlocks'], 0)

            # if commit_delta + rollback_delta > 0:
            #     rr['rollback_ratio'] = round (rollback_delta / float(commit_delta + rollback_delta), 1)
            # else:
            #     rr['rollback_ratio'] = 0
            # if blks_read_delta + blks_hit_delta > 0:
            #     rr['buffers_miss_ratio'] = round (blks_read_delta / float(blks_read_delta + blks_hit_delta), 1)
            # else:
            #     rr['buffers_miss_ratio'] = 0
            # if blk_read_time_delta + blk_write_time_delta > 0:
            #     rr['write_time_ratio'] = round (blk_write_time_delta / float(blk_read_time_delta + blk_write_time_delta), 1)
            # else:
            #     rr['write_time_ratio'] = 0

            # was done in the data fetching query previously but this seems to be faster
            time_delta = row['sdd_timestamp'] - prev_row['sdd_timestamp']
            rr = apply_average(rr, 15, time_delta, keys_to_skip=['timestamp','numbackends', 'rollback_ratio','buffers_miss_ratio','write_time_ratio'])

            ret.append(rr)
        prev_row = row

    return ret
Esempio n. 46
0
    def get_last_loads_and_sizes(self):
        yyyymm = datetime.datetime.now().strftime('%Y%m')
        sql = """
                with
                q_last_5m_loads as (
                  select
                    distinct on (load_host_id)
                    load_host_id as host_id,
                    round(load_5min_value/100.0,1) as last_5min_load
                  from
                    monitor_data_partitions.host_load_""" + yyyymm + """
                  where
                    load_timestamp > now() - '1 day'::interval
                  order by
                    load_host_id, load_timestamp desc
                ),
                q_size as (
                  select
                    distinct on (tsda_host_id)
                    tsda_host_id as host_id,
                    pg_size_pretty(tsda_db_size) last_agg_size
                  from
                    monitor_data_partitions.table_size_data_agg_""" + yyyymm + """
                  where
                   tsda_timestamp > now() - '1 day'::interval
                  order by
                    tsda_host_id, tsda_timestamp desc
                )
                select
                  h.host_id,
                  h.host_ui_shortname,
                  h.host_ui_longname,
                  hg.group_name,
                  coalesce(q_last_5m_loads.last_5min_load::text, '-') as last_5min_load,
                  coalesce(q_size.last_agg_size::text, '-') as last_agg_size
                from
                  hosts h
                  left join
                    q_last_5m_loads
                      on q_last_5m_loads.host_id = h.host_id
                  left join
                    q_size
                      on q_size.host_id = h.host_id
                  left join
                    host_groups hg
                      on hg.group_id = h.host_group_id
                where
                  host_enabled
                order by
                  case when hg.group_id = 0 then 0 else 1 end, hg.group_name nulls first, h.host_ui_longname
        """
        sql_no_aggr = """
            select
              h.host_id,
              h.host_ui_shortname,
              h.host_ui_longname,
              hg.group_name,
              coalesce((select round(load_5min_value/100.0,1)::text from host_load where load_host_id = h.host_id and load_timestamp > (now() - '1day'::interval) order by load_timestamp desc limit 1), '-') as last_5min_load,
              '-' as last_agg_size
            from
              hosts h
              left join
              host_groups hg on hg.group_id = h.host_group_id
            where
              host_enabled
            order by
              case when hg.group_id = 0 then 0 else 1 end, hg.group_name nulls first, h.host_ui_longname
        """

        if tplE._settings.get('run_aggregations'):
            return datadb.execute(sql)
        else:
            return datadb.execute(sql_no_aggr)  # no "last db size"
Esempio n. 47
0
def set_bulk_config(params):
    sql = """update pgwatch2.monitored_db set md_preset_config_name = %(bulk_preset_config_name)s, md_config = null, md_last_modified_on = now() where md_preset_config_name != %(bulk_preset_config_name)s"""
    ret, _ = datadb.execute(sql, params)
    if ret and len(ret) == 1:
        return ret[0]['rows_affected']
    return '0'
Esempio n. 48
0
def set_bulk_timeout(params):
    sql = """update pgwatch2.monitored_db set md_statement_timeout_seconds = %(bulk_timeout_seconds)s, md_last_modified_on = now() where md_statement_timeout_seconds != %(bulk_timeout_seconds)s"""
    ret, _ = datadb.execute(sql, params)
    if ret and len(ret) == 1:
        return ret[0]['rows_affected']
    return '0'
Esempio n. 49
0
def get_all_top_level_metric_tables():
    sql = """select table_name from admin.get_top_level_metric_tables()"""
    ret, err = datadb.execute(sql, on_metric_store=True)
    if err:
        raise Exception('Failed to determine storage schema type:' + err)
    return [x["table_name"] for x in ret]
Esempio n. 50
0
def enable_all_dbs():
    sql = """update pgwatch2.monitored_db set md_is_enabled = true, md_last_modified_on = now() where not md_is_enabled"""
    ret, _ = datadb.execute(sql)
    if ret and len(ret) == 1:
        return ret[0]['rows_affected']
    return '0'
Esempio n. 51
0
def insert_monitored_db(params, cmd_args=None):
    ret = []
    # to enable adding DBs via POST requests where nonmandatory fields are not specified
    expected_monitored_db_params = [('md_port', '5432'), ('md_password', ''),
                                    ('md_root_ca_path', ''),
                                    ('md_client_cert_path', ''),
                                    ('md_client_key_path', ''),
                                    ('md_config', ''),
                                    ('md_statement_timeout_seconds', '5'),
                                    ('md_dbtype', 'postgres'),
                                    ('md_only_if_master', False),
                                    ('md_custom_tags', ''),
                                    ('md_host_config', ''),
                                    ('md_include_pattern', ''),
                                    ('md_exclude_pattern', ''),
                                    ('md_group', 'default'),
                                    ('md_password_type', 'plain-text'),
                                    ('md_sslmode', 'disable')]
    for p, default in expected_monitored_db_params:
        if not p in params:
            params[p] = default
    sql_insert_new_db = """
        insert into
          pgwatch2.monitored_db (md_unique_name, md_hostname, md_port, md_dbname, md_user, md_password, md_password_type, md_is_superuser,
          md_sslmode, md_root_ca_path,md_client_cert_path, md_client_key_path, md_is_enabled, md_preset_config_name, md_config, md_statement_timeout_seconds, md_dbtype,
          md_include_pattern, md_exclude_pattern, md_custom_tags, md_group, md_host_config, md_only_if_master)
        values
          (%(md_unique_name)s, %(md_hostname)s, %(md_port)s, %(md_dbname)s, %(md_user)s, %(md_password)s, %(md_password_type)s, %(md_is_superuser)s,
          %(md_sslmode)s, %(md_root_ca_path)s, %(md_client_cert_path)s, %(md_client_key_path)s, %(md_is_enabled)s, %(md_preset_config_name)s, %(md_config)s, %(md_statement_timeout_seconds)s, %(md_dbtype)s,
          %(md_include_pattern)s, %(md_exclude_pattern)s, %(md_custom_tags)s, %(md_group)s, %(md_host_config)s, %(md_only_if_master)s)
        returning
          md_id
    """
    sql_active_dbs = "select datname from pg_database where not datistemplate and datallowconn"
    cherrypy_checkboxes_to_bool(params, [
        'md_is_enabled', 'md_sslmode', 'md_is_superuser', 'md_only_if_master'
    ])
    cherrypy_empty_text_to_nulls(params, [
        'md_preset_config_name', 'md_config', 'md_custom_tags',
        'md_host_config'
    ])
    password_plain = params['md_password']
    if password_plain == '***':
        raise Exception(
            "'***' cannot be used as password, denotes unchanged password")

    if params.get('md_password_type') == 'aes-gcm-256':
        if not cmd_args.aes_gcm_keyphrase:
            ret.append(
                "FYI - skipping password encryption as keyphrase/keyfile not specified on UI startup (hint: use the PW2_AES_GCM_KEYPHRASE env. variable or --aes-gcm-keyphrase param)"
            )
            params['md_password_type'] = 'plain-text'
        else:
            params['md_password'] = crypto.encrypt(cmd_args.aes_gcm_keyphrase,
                                                   password_plain)

    if not params['md_dbname'] and params['md_dbtype'] not in [
            'postgres-continuous-discovery', 'patroni',
            'patroni-continuous-discovery'
    ]:  # add all DBs found
        if params['md_dbtype'] == 'postgres':
            # get all active non-template DBs from the entered host
            active_dbs_on_host, err = datadb.executeOnRemoteHost(
                sql_active_dbs,
                host=params['md_hostname'],
                port=params['md_port'],
                dbname='template1',
                user=params['md_user'],
                password=password_plain,
                sslmode=params['md_sslmode'])
            if err:
                raise Exception(
                    "Could not read active DBs from specified host!")
            active_dbs_on_host = [x['datname'] for x in active_dbs_on_host]

            # "subtract" DBs that are already monitored
            currently_monitored_dbs, err = datadb.execute(
                "select md_dbname from pgwatch2.monitored_db where "
                " (md_hostname, md_port) = (%(md_hostname)s, %(md_port)s)",
                params)
            if err:
                raise Exception(
                    "Could not read currently active DBs from config DB!")
            currently_monitored_dbs = [
                x['md_dbname'] for x in currently_monitored_dbs
            ]

            params_copy = params.copy()
            dbs_to_add = set(active_dbs_on_host) - set(currently_monitored_dbs)
            for db_to_add in dbs_to_add:
                params_copy['md_unique_name'] = '{}_{}'.format(
                    params['md_unique_name'], db_to_add)
                params_copy['md_dbname'] = db_to_add
                retdata, err = datadb.execute(sql_insert_new_db, params_copy)
                if err:
                    raise Exception('Failed to insert into "monitored_db": ' +
                                    err)
            if currently_monitored_dbs:
                ret.append(
                    'Warning! Some DBs not added as already under monitoring: '
                    + ', '.join(currently_monitored_dbs))
            else:
                ret.append('{} DBs added: {}'.format(len(dbs_to_add),
                                                     ', '.join(dbs_to_add)))
        elif params['md_dbtype'] == 'pgbouncer':
            # get all configured pgbouncer DBs
            params['md_dbname'] = 'pgbouncer'
            active_dbs_on_host, err = datadb.executeOnRemoteHost(
                "show databases",
                host=params['md_hostname'],
                port=params['md_port'],
                dbname='pgbouncer',
                user=params['md_user'],
                password=password_plain,
                sslmode=params['md_sslmode'])
            if err:
                raise Exception(
                    "Could not read active DBs from specified host!")
            active_dbs_on_host = [x['name'] for x in active_dbs_on_host]

            # "subtract" DBs that are already monitored
            currently_monitored_dbs, err = datadb.execute(
                "select md_dbname from pgwatch2.monitored_db where "
                " (md_hostname, md_port) = (%(md_hostname)s, %(md_port)s)",
                params)
            if err:
                raise Exception(
                    "Could not read currently active DBs from config DB!")
            currently_monitored_dbs = [
                x['md_dbname'] for x in currently_monitored_dbs
            ]

            params_copy = params.copy()
            dbs_to_add = set(active_dbs_on_host) - set(currently_monitored_dbs)
            for db_to_add in dbs_to_add:
                params_copy['md_unique_name'] = '{}_{}'.format(
                    params['md_unique_name'], db_to_add)
                params_copy['md_dbname'] = db_to_add
                retdata, err = datadb.execute(sql_insert_new_db, params_copy)
                if err:
                    raise Exception('Failed to insert into "monitored_db": ' +
                                    err)
            if currently_monitored_dbs:
                ret.append(
                    'Warning! Some DBs not added as already under monitoring: '
                    + ', '.join(currently_monitored_dbs))
            else:
                ret.append('{} DBs added: {}'.format(len(dbs_to_add),
                                                     ', '.join(dbs_to_add)))
    else:  # only 1 DB
        if params['md_dbtype'] in [
                'postgres-continuous-discovery', 'patroni-continuous-discovery'
        ]:
            params['md_dbname'] = ''
        data, err = datadb.execute(sql_insert_new_db, params)
        if err:
            raise Exception('Failed to insert into "monitored_db": ' + err)
        ret.append('Host with ID {} added!'.format(data[0]['md_id']))

        if params['md_dbtype'] in ['patroni', 'patroni-continuous-discovery']:
            ret.append(
                'Actual DB hosts will be discovered by the metrics daemon via DCS'
            )  # check if DCS is accessible? would cause more deps...
            return ret

        if params['md_dbtype'] == 'postgres-continuous-discovery':
            params['md_dbname'] = 'template1'
        data, err = datadb.executeOnRemoteHost('select 1',
                                               params['md_hostname'],
                                               params['md_port'],
                                               params['md_dbname'],
                                               params['md_user'],
                                               password_plain,
                                               sslmode=params['md_sslmode'],
                                               quiet=True)
        if err:
            ret.append('Could not connect to specified host: ' + str(err))
    return ret
Esempio n. 52
0
def update_monitored_db(params, cmd_args=None):
    ret = []
    password_plain = params['md_password']
    old_row_data = get_monitored_db_by_id(params['md_id'])

    if params.get('md_password_type') == 'aes-gcm-256' and old_row_data.get(
            'md_password_type'
    ) == 'plain-text':  # NB! when changing this part also review set_bulk_password()
        if not cmd_args.aes_gcm_keyphrase:
            ret.append(
                "FYI - not enabling password encryption as keyphrase/keyfile not specified on UI startup (hint: use the PW2_AES_GCM_KEYPHRASE env. variable or --aes-gcm-keyphrase param)"
            )
            params['md_password_type'] = old_row_data['md_password_type']
            params['md_password'] = '******'
        else:
            if params['md_password'] != '***':
                params['md_password'] = crypto.encrypt(
                    cmd_args.aes_gcm_keyphrase, password_plain)
            else:
                params['md_password'] = crypto.encrypt(
                    cmd_args.aes_gcm_keyphrase,
                    old_row_data.get('md_password'))
    elif params.get('md_password_type') == 'plain-text' and old_row_data.get(
            'md_password_type') == 'aes-gcm-256':
        if not cmd_args.aes_gcm_keyphrase:
            ret.append(
                "FYI - skipping password decryption as keyphrase/keyfile not specified on UI startup (hint: use the PW2_AES_GCM_KEYPHRASE env. variable or --aes-gcm-keyphrase param)"
            )
            params['md_password_type'] = old_row_data['md_password_type']
            params['md_password'] = '******'
        else:
            if params['md_password'] == '***':
                params['md_password'] = crypto.decrypt(
                    cmd_args.aes_gcm_keyphrase,
                    old_row_data.get('md_password'))

    sql = """
        with q_old as (
          /* using CTE to be enable detect if connect info is being changed */
          select * from pgwatch2.monitored_db
          where md_id = %(md_id)s
        )
        update
          pgwatch2.monitored_db new
        set
          md_group = %(md_group)s,
          md_hostname = %(md_hostname)s,
          md_port = %(md_port)s,
          md_dbname = %(md_dbname)s,
          md_include_pattern = %(md_include_pattern)s,
          md_exclude_pattern = %(md_exclude_pattern)s,
          md_user = %(md_user)s,
          md_password = case when %(md_password)s = '***' and %(md_password_type)s = new.md_password_type then new.md_password else %(md_password)s end,
          md_password_type = %(md_password_type)s,
          md_is_superuser = %(md_is_superuser)s,
          md_sslmode = %(md_sslmode)s,
          md_root_ca_path = %(md_root_ca_path)s,
          md_client_cert_path = %(md_client_cert_path)s,
          md_client_key_path = %(md_client_key_path)s,
          md_dbtype = %(md_dbtype)s,
          md_is_enabled = %(md_is_enabled)s,
          md_preset_config_name = %(md_preset_config_name)s,
          md_config = %(md_config)s,
          md_host_config = %(md_host_config)s,
          md_only_if_master = %(md_only_if_master)s,
          md_custom_tags = %(md_custom_tags)s,
          md_statement_timeout_seconds = %(md_statement_timeout_seconds)s,
          md_last_modified_on = now()
        from
          q_old
        where
          new.md_id = %(md_id)s
        returning
          (q_old.md_hostname, q_old.md_port, q_old.md_dbname, q_old.md_user, q_old.md_password,
          q_old.md_sslmode, q_old.md_root_ca_path, q_old.md_client_cert_path, q_old.md_client_key_path) is distinct from
            (%(md_hostname)s, %(md_port)s, %(md_dbname)s, %(md_user)s,
            case when %(md_password)s = '***' then q_old.md_password else %(md_password)s end, %(md_sslmode)s,
            %(md_root_ca_path)s, %(md_client_cert_path)s, %(md_client_key_path)s
            ) as connection_data_changed,
            case when %(md_password)s = '***' and %(md_password_type)s = q_old.md_password_type then q_old.md_password else %(md_password)s end as md_password
    """
    cherrypy_checkboxes_to_bool(params, [
        'md_is_enabled', 'md_sslmode', 'md_is_superuser', 'md_only_if_master'
    ])
    cherrypy_empty_text_to_nulls(params, [
        'md_preset_config_name', 'md_config', 'md_custom_tags',
        'md_host_config'
    ])
    if params['md_dbtype'] == 'postgres-continuous-discovery':
        params['md_dbname'] = ''

    data, err = datadb.execute(sql, params)
    if err:
        raise Exception('Failed to update "monitored_db": ' + err)
    ret.append('Updated!')

    if params['md_dbtype'] in ['patroni', 'patroni-continuous-discovery']:
        return ret  # check if DCS is accessible?

    # check connection if connect string changed or inactive host activated
    if data[0]['connection_data_changed'] or (
            old_row_data and
        (not old_row_data['md_is_enabled'] and params['md_is_enabled'])
    ):  # show warning when changing connect data but cannot connect
        if params.get(
                'md_password_type'
        ) == 'aes-gcm-256' and cmd_args.aes_gcm_keyphrase and data[0][
                'md_password'] and data[0]['md_password'].find('-') > 0:
            password_plain = crypto.decrypt(cmd_args.aes_gcm_keyphrase,
                                            data[0]['md_password'])
        else:
            password_plain = data[0]['md_password']
        data, err = datadb.executeOnRemoteHost(
            'select 1',
            params['md_hostname'],
            params['md_port'],
            'template1' if params['md_dbtype']
            == 'postgres-continuous-discovery' else params['md_dbname'],
            params['md_user'],
            password_plain,
            sslmode=params['md_sslmode'],
            sslrootcert=params['md_root_ca_path'],
            sslcert=params['md_client_cert_path'],
            sslkey=params['md_client_key_path'],
            quiet=True)
        if err:
            ret.append(
                'Could not connect to specified host (ignore if gatherer daemon runs on another host): '
                + str(err))

    return ret
Esempio n. 53
0
def get_unused_schemas(host_name, from_date, to_date, filter):
    sql = """
with
q_max_daily_timestamps as (
  select
    sud_host_id as host_id,
    sud_schema_name as schema_name,
    --sud_timestamp::date as day,
    max(sud_timestamp) as timestamp
  from
    monitor_data.schema_usage_data s
  where
    sud_timestamp between %s and %s
    and sud_host_id in (select host_id from monitor_data.hosts where host_name = %s or %s = 'all')
    and (not sud_schema_name like any (array['pg\_%%', '%%\_data'])
         and sud_schema_name not in ('public', '_v'))
    and sud_schema_name like '%%'||%s||'%%'
  group by
    sud_host_id, sud_schema_name, sud_timestamp::date
  order by
    sud_host_id, sud_schema_name, sud_timestamp::date
),
q_min_max as (
  select
    sud_host_id as host_id,
    sud_schema_name as schema_name,
    min(sud_timestamp),
    max(sud_timestamp)
  from
    monitor_data.schema_usage_data
  where
    sud_timestamp between %s and %s
    and sud_schema_name like '%%'||%s||'%%'
  group by
    sud_host_id, sud_schema_name
),
q_endofday_total_counts as (
  select
    host_id,
    schema_name,
    timestamp,
    sud_sproc_calls /* + max(sud_seq_scans)  + max(sud_idx_scans) */ + sud_tup_ins + sud_tup_upd + sud_tup_del as daily_total
  from
    monitor_data.schema_usage_data
    join
    q_max_daily_timestamps on sud_host_id = host_id and sud_schema_name = schema_name and sud_timestamp = timestamp
  order by
    1, 2, 3
)
select
  h.host_name,
  h.host_db,
  h.host_id,
  b.schema_name,
  mm.min,
  mm.max
from
  (
    select
      host_id,
      schema_name,
      bool_and(is_same_as_prev) is_unchanged
    from
      (
        select
          *,
          case
            when lag(daily_total) over w is null then true --1st day
            when lag(daily_total) over w > daily_total then true --stats reset/overflow
            else lag(daily_total) over w = daily_total
          end as is_same_as_prev
        from
          q_endofday_total_counts
        window w as
          (partition by host_id, schema_name  order by host_id, schema_name, timestamp)
      ) a
    group by
      host_id, schema_name
    order by
      host_id, schema_name
  ) b
  join
  monitor_data.hosts h on h.host_id = b.host_id
  join
  q_min_max mm on mm.host_id = h.host_id and mm.schema_name = b.schema_name
where
  is_unchanged
order by
  host_name, schema_name
        """

    unused = datadb.execute(sql, (from_date, to_date, host_name, host_name, filter, from_date, to_date, filter))
    return unused
Esempio n. 54
0
def getIndexIssues(hostname):
    q_invalid = """
        SELECT
        *,
        CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
                SELECT
                %s as host_name,
                %s as host_id,
                schemaname||'.'||relname AS table_full_name,
                schemaname||'.'||indexrelname AS index_full_name,
                index_size_bytes,
                indexes_size_bytes,
                pg_size_pretty(index_size_bytes) AS index_size,
                pg_size_pretty(indexes_size_bytes) AS indexes_size,
                pg_size_pretty(table_size_bytes) AS table_size,
                sum(index_size_bytes) over () AS total_marked_index_size_bytes
                FROM
                (
                  SELECT quote_ident(schemaname) as schemaname,
                         quote_ident(relname) as relname,
                         quote_ident(indexrelname) as indexrelname,
                         pg_relation_size(i.indexrelid) AS index_size_bytes,
                         pg_indexes_size(i.relid) AS indexes_size_bytes,                 
                         pg_relation_size(i.relid) AS table_size_bytes
                  FROM pg_stat_user_indexes i
                  JOIN pg_index USING(indexrelid) 
                  WHERE NOT indisvalid
                ) a                
        ) b 
        ORDER BY index_size_bytes DESC, index_full_name
    """
    q_unused = """
        SELECT
        *,
        pg_size_pretty(total_marked_index_size_bytes::bigint) AS total_marked_index_size
        FROM (
          SELECT
          *,
          pg_size_pretty(index_size_bytes) AS index_size,
          pg_size_pretty(indexes_size_bytes) AS indexes_size,
          pg_size_pretty(table_size_bytes) AS table_size,
          CASE WHEN indexes_size_bytes = 0 THEN 0 ELSE round((index_size_bytes::numeric / indexes_size_bytes::numeric)*100,1) END AS pct_of_tables_index_space,
          sum(index_size_bytes) over () AS total_marked_index_size_bytes
          FROM (
          SELECT   %s as host_name,
                   %s as host_id,
                   quote_ident(schemaname)||'.'||quote_ident(relname) AS table_full_name,
                   quote_ident(schemaname)||'.'||quote_ident(indexrelname) AS index_full_name,
                   pg_relation_size(i.indexrelid) as index_size_bytes,
                   pg_indexes_size(i.relid) AS indexes_size_bytes,
                   pg_relation_size(i.relid) AS table_size_bytes,
                   idx_scan AS scans
              FROM pg_stat_user_indexes i 
              JOIN pg_index USING(indexrelid) 
              WHERE NOT indisunique
              AND NOT schemaname LIKE ANY (ARRAY['tmp%%','temp%%'])
          ) a
          WHERE index_size_bytes > %s
          AND scans <= %s          
        ) b
        ORDER BY scans, index_size_bytes DESC
    """
    q_duplicate = """
        SELECT %s AS host_name,
               %s as host_id,
               n.nspname||'.'||ci.relname AS index_full_name,
               n.nspname||'.'||ct.relname AS table_full_name,
               pg_size_pretty(pg_total_relation_size(ct.oid)) AS table_size,
               pg_total_relation_size(ct.oid) AS table_size_bytes,
               n.nspname AS schema_name,
               index_names,
               def,
               count
        FROM (
          select regexp_replace(replace(pg_get_indexdef(i.indexrelid),c.relname,'X'), '^CREATE UNIQUE','CREATE') as def,
                 max(indexrelid) as indexrelid,
                 max(indrelid) as indrelid,
                 count(1),
                 array_agg(relname::text) as index_names
            from pg_index i
            join pg_class c
              on c.oid = i.indexrelid
           where indisvalid
           group 
              by regexp_replace(replace(pg_get_indexdef(i.indexrelid),c.relname,'X'), '^CREATE UNIQUE','CREATE')
          having count(1) > 1
        ) a
          JOIN pg_class ci
            ON ci.oid=a.indexrelid        
          JOIN pg_class ct
            ON ct.oid=a.indrelid
          JOIN pg_namespace n
            ON n.oid=ct.relnamespace
         ORDER
            BY count DESC, table_size_bytes DESC, schema_name, table_full_name
    """
    q_active_hosts="""
        select
            host_id,
            host_name,
            host_user,
            host_password,
            host_db
        from monitor_data.hosts
        where host_enabled
        and (%s = 'all' or host_name=%s)
        """
    q_indexing_thresholds="""select * from monitor_data.perf_indexes_thresholds"""
    data_invalid = []
    data_unused = []
    data_duplicate = []
    data_noconnect = []
    conn=None

    hosts = datadb.execute(q_active_hosts, (hostname, hostname))
    indexing_thresholds = datadb.execute(q_indexing_thresholds)[0]

    for h in hosts:
        try:
            conn = psycopg2.connect(host=h['host_name'], dbname=h['host_db'], user=h['host_user'], password=h['host_password'],connect_timeout='3')
            cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
            cur.execute(q_invalid, (h['host_name'], h['host_id']))
            data_invalid += cur.fetchall()
            cur.execute(q_unused, (h['host_name'], h['host_id'], indexing_thresholds['pit_min_size_to_report'], indexing_thresholds['pit_max_scans_to_report']))
            data_unused += cur.fetchall()
            cur.execute(q_duplicate, (h['host_name'], h['host_id']))
            data_duplicate += cur.fetchall()
        except Exception, e:
            print ('ERROR could not connect to {}:{}'.format(h['host_name'], e))
            data_noconnect.append({'host_id':h['host_id'],'host_name': h['host_name']})
        finally:
Esempio n. 55
0
def is_sproc_installed(sproc_name):
    sql = """select * from pg_proc where proname = %s"""
    return datadb.execute(sql, (sproc_name,))