Ejemplo n.º 1
0
def main():
    parser = OptionParser('%prog [options] (username|email) [...]')
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) == 0:
        parser.error("Must specify username (Person.name)")

    log = logger(options)

    con = None
    try:
        log.debug("Connecting to database")
        con = connect()
        for username in args:
            if not close_account(con, log, username):
                log.debug("Rolling back")
                con.rollback()
                return 1
        log.debug("Committing changes")
        con.commit()
        return 0
    except:
        log.exception("Unhandled exception")
        log.debug("Rolling back")
        if con is not None:
            con.rollback()
        return 1
Ejemplo n.º 2
0
def main():
    parser = OptionParser(
            '%prog [options] (username|email) [...]'
            )
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) == 0:
        parser.error("Must specify username (Person.name)")

    log = logger(options)

    con = None
    try:
        log.debug("Connecting to database")
        con = connect()
        for username in args:
            if not close_account(con, log, username):
                log.debug("Rolling back")
                con.rollback()
                return 1
        log.debug("Committing changes")
        con.commit()
        return 0
    except:
        log.exception("Unhandled exception")
        log.debug("Rolling back")
        if con is not None:
            con.rollback()
        return 1
Ejemplo n.º 3
0
def main():
    parser = OptionParser()
    logger_options(parser)
    db_options(parser)

    options, args = parser.parse_args()

    if len(args) > 0:
        parser.error("Too many arguments.")

    log = logger(options)

    log.debug("Connecting")
    con = connect()
    con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    cur = con.cursor()

    cur.execute('show server_version')
    pg_version = LooseVersion(cur.fetchone()[0])

    log.debug("Disabling autovacuum on all tables in the database.")
    if pg_version < LooseVersion('8.4.0'):
        cur.execute("""
            INSERT INTO pg_autovacuum
            SELECT pg_class.oid, FALSE, -1,-1,-1,-1,-1,-1,-1,-1
            FROM pg_class
            WHERE relkind in ('r','t')
                AND pg_class.oid NOT IN (SELECT vacrelid FROM pg_autovacuum)
            """)
    else:
        cur.execute("""
            SELECT nspname,relname
            FROM pg_namespace, pg_class
            WHERE relnamespace = pg_namespace.oid
                AND relkind = 'r' AND nspname <> 'pg_catalog'
            """)
        for namespace, table in list(cur.fetchall()):
            cur.execute("""
                ALTER TABLE ONLY "%s"."%s" SET (
                    autovacuum_enabled=false,
                    toast.autovacuum_enabled=false)
                """ % (namespace, table))

    log.debug("Killing existing autovacuum processes")
    num_autovacuums = -1
    while num_autovacuums != 0:
        # Sleep long enough for pg_stat_activity to be updated.
        time.sleep(0.6)
        cur.execute("""
            SELECT procpid FROM pg_stat_activity
            WHERE
                datname=current_database()
                AND current_query LIKE 'autovacuum: %'
            """)
        autovacuums = [row[0] for row in cur.fetchall()]
        num_autovacuums = len(autovacuums)
        for procpid in autovacuums:
            log.debug("Cancelling %d" % procpid)
            cur.execute("SELECT pg_cancel_backend(%d)" % procpid)
def main():
    parser = OptionParser()
    logger_options(parser)
    db_options(parser)

    options, args = parser.parse_args()

    if len(args) > 0:
        parser.error("Too many arguments.")

    log = logger(options)

    log.debug("Connecting")
    con = connect()
    con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    cur = con.cursor()

    cur.execute('show server_version')
    pg_version = LooseVersion(cur.fetchone()[0])

    log.debug("Disabling autovacuum on all tables in the database.")
    if pg_version < LooseVersion('8.4.0'):
        cur.execute("""
            INSERT INTO pg_autovacuum
            SELECT pg_class.oid, FALSE, -1,-1,-1,-1,-1,-1,-1,-1
            FROM pg_class
            WHERE relkind in ('r','t')
                AND pg_class.oid NOT IN (SELECT vacrelid FROM pg_autovacuum)
            """)
    else:
        cur.execute("""
            SELECT nspname,relname
            FROM pg_namespace, pg_class
            WHERE relnamespace = pg_namespace.oid
                AND relkind = 'r' AND nspname <> 'pg_catalog'
            """)
        for namespace, table in list(cur.fetchall()):
            cur.execute("""
                ALTER TABLE ONLY "%s"."%s" SET (
                    autovacuum_enabled=false,
                    toast.autovacuum_enabled=false)
                """ % (namespace, table))

    log.debug("Killing existing autovacuum processes")
    num_autovacuums = -1
    while num_autovacuums != 0:
        # Sleep long enough for pg_stat_activity to be updated.
        time.sleep(0.6)
        cur.execute("""
            SELECT procpid FROM pg_stat_activity
            WHERE
                datname=current_database()
                AND current_query LIKE 'autovacuum: %'
            """)
        autovacuums = [row[0] for row in cur.fetchall()]
        num_autovacuums = len(autovacuums)
        for procpid in autovacuums:
            log.debug("Cancelling %d" % procpid)
            cur.execute("SELECT pg_cancel_backend(%d)" % procpid)
Ejemplo n.º 5
0
def main():
    parser = OptionParser()
    logger_options(parser)
    db_options(parser)

    options, args = parser.parse_args()

    if len(args) > 0:
        parser.error("Too many arguments.")

    log = logger(options)

    log.debug("Connecting")
    con = connect()
    con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    cur = con.cursor()

    log.debug("Disabling autovacuum on all tables in the database.")
    cur.execute("""
        SELECT nspname,relname
        FROM pg_namespace, pg_class
        WHERE relnamespace = pg_namespace.oid
            AND relkind = 'r' AND nspname <> 'pg_catalog'
        """)
    for namespace, table in list(cur.fetchall()):
        cur.execute("""
            ALTER TABLE ONLY "%s"."%s" SET (
                autovacuum_enabled=false,
                toast.autovacuum_enabled=false)
            """ % (namespace, table))

    log.debug("Killing existing autovacuum processes")
    num_autovacuums = -1
    while num_autovacuums != 0:
        # Sleep long enough for pg_stat_activity to be updated.
        time.sleep(0.6)
        cur.execute("""
            SELECT %(pid)s FROM pg_stat_activity
            WHERE
                datname=current_database()
                AND %(query)s LIKE 'autovacuum: %%'
            """ % activity_cols(cur))
        autovacuums = [row[0] for row in cur.fetchall()]
        num_autovacuums = len(autovacuums)
        for pid in autovacuums:
            log.debug("Cancelling %d" % pid)
            cur.execute("SELECT pg_cancel_backend(%d)" % pid)
Ejemplo n.º 6
0
def main():
    parser = OptionParser()
    parser.add_option(
        "-0",
        "--null",
        dest="null",
        action="store_true",
        default=False,
        help="Set all full text index column values to NULL.",
    )
    parser.add_option(
        "-l",
        "--live-rebuild",
        dest="liverebuild",
        action="store_true",
        default=False,
        help="Rebuild all the indexes against a live database.",
    )
    db_options(parser)
    logger_options(parser)

    global options, args
    (options, args) = parser.parse_args()

    if options.null + options.liverebuild > 1:
        parser.error("Incompatible options")

    global log
    log = logger(options)

    con = connect()

    if options.liverebuild:
        con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        liverebuild(con)
    elif options.null:
        con.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
        nullify(con)
    else:
        parser.error("Required argument not specified")

    con.commit()
    return 0
Ejemplo n.º 7
0
def main():
    parser = OptionParser()
    parser.add_option(
            "-0", "--null", dest="null",
            action="store_true", default=False,
            help="Set all full text index column values to NULL.",
            )
    parser.add_option(
            "-l", "--live-rebuild", dest="liverebuild",
            action="store_true", default=False,
            help="Rebuild all the indexes against a live database.",
            )
    db_options(parser)
    logger_options(parser)

    global options, args
    (options, args) = parser.parse_args()

    if options.null + options.liverebuild > 1:
        parser.error("Incompatible options")

    global log
    log = logger(options)

    con = connect()

    if options.liverebuild:
        con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        liverebuild(con)
    elif options.null:
        con.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
        nullify(con)
    else:
        parser.error("Required argument not specified")

    con.commit()
    return 0
The sampledata does not update the current values of all the sequences
used to populate the primary keys (this was removed to aid in merging changes
to the sampledata).

This script resets all of these sequences to the correct value based on the
maximum value currently found in the corresponding table.
"""

__metaclass__ = type

import _pythonpath

from optparse import OptionParser

from lp.services.database.postgresql import resetSequences
from lp.services.database.sqlbase import connect
from lp.services.scripts import db_options


if __name__ == '__main__':
    parser = OptionParser()
    db_options(parser)
    (options, args) = parser.parse_args()
    if args:
        parser.error("Too many options given")
    if not options.dbname:
        parser.error("Required option --dbname not given")
    con = connect()
    resetSequences(con.cursor())
    con.commit()
Ejemplo n.º 9
0
    global _bzr_details_cache
    if _bzr_details_cache is None:
        try:
            branch = Branch.open_containing(SCHEMA_DIR)[0]
            revno, revision_id = branch.last_revision_info()
            branch_nick = branch.get_config().get_nickname()
        except NotBranchError:
            log.warning("Not a Bazaar branch - branch details unavailable")
            revision_id, revno, branch_nick = None, None, None
        _bzr_details_cache = (branch_nick, revno, revision_id)
    return _bzr_details_cache


if __name__ == '__main__':
    parser = OptionParser()
    db_options(parser)
    logger_options(parser)
    parser.add_option("-n",
                      "--dry-run",
                      dest="commit",
                      default=True,
                      action="store_false",
                      help="Don't actually commit changes")
    parser.add_option("--partial",
                      dest="partial",
                      default=False,
                      action="store_true",
                      help="Commit after applying each patch")
    parser.add_option("--skip-comments",
                      dest="comments",
                      default=True,
Ejemplo n.º 10
0
 def add_my_options(self):
     """Add standard database command line options."""
     db_options(self.parser)
 def add_my_options(self):
     """Add standard database command line options."""
     db_options(self.parser)
def main():
    # XXX: Tom Haddon 2007-07-12
    # There's a lot of untested stuff here: parsing options -
    # this should be moved into a testable location.
    # Also duplicated code in scripts/script-monitor.py
    parser = OptionParser(
            '%prog [options] (minutes) (host:scriptname) [host:scriptname]'
            )
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) < 2:
        print "Must specify time in minutes and " \
            "at least one host and script"
        return 3

    # First argument is the number of minutes into the past
    # we want to look for the scripts on the specified hosts
    try:
        minutes_ago, args = int(args[0]), args[1:]
        start_date = datetime.now() - timedelta(minutes=minutes_ago)

        completed_from = strftime("%Y-%m-%d %H:%M:%S", start_date.timetuple())
        completed_to = strftime(
            "%Y-%m-%d %H:%M:%S", datetime.now().timetuple())

        hosts_scripts = []
        for arg in args:
            try:
                hostname, scriptname = arg.split(':')
            except TypeError:
                print "%r is not in the format 'host:scriptname'" % arg
                return 3
            hosts_scripts.append((hostname, scriptname))
    except ValueError:
        print "Must specify time in minutes and " \
            "at least one host and script"
        return 3

    log = logger(options)

    try:
        log.debug("Connecting to database")
        con = connect()
        error_found = False
        msg = []
        for hostname, scriptname in hosts_scripts:
            failure_msg = check_script(con, log, hostname,
                scriptname, completed_from, completed_to)
            if failure_msg is not None:
                msg.append("%s:%s" % (hostname, scriptname))
                error_found = True
        if error_found:
            # Construct our return message
            print "Scripts failed to run: %s" % ', '.join(msg)
            return 2
        else:
            # Construct our return message
            print "All scripts ran as expected"
            return 0
    except Exception as e:
        # Squeeze the exception type and stringification of the exception
        # value on to one line.
        print "Unhandled exception: %s %r" % (e.__class__.__name__, str(e))
        return 3
Ejemplo n.º 13
0
"""Generate a preamble for slonik(1) scripts based on the current LPCONFIG.
"""

__metaclass__ = type
__all__ = []

import _pythonpath

from optparse import OptionParser
import time

from lp.services import scripts
from lp.services.config import config
from lp.services.database.sqlbase import connect
import replication.helpers


if __name__ == '__main__':
    parser = OptionParser()
    scripts.db_options(parser)
    (options, args) = parser.parse_args()
    if args:
        parser.error("Too many arguments")
    scripts.execute_zcml_for_scripts(use_web_security=False)

    con = connect()
    print '# slonik(1) preamble generated %s' % time.ctime()
    print '# LPCONFIG=%s' % config.instance_name
    print
    print replication.helpers.preamble(con)
Ejemplo n.º 14
0
def main():
    # XXX: Tom Haddon 2007-07-12
    # There's a lot of untested stuff here: parsing options and sending
    # emails - this should be moved into a testable location.
    # Also duplicated code in scripts/script-monitor-nagios.py
    parser = OptionParser(
            '%prog [options] (minutes) (host:scriptname) [host:scriptname]'
            )
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) < 2:
        parser.error("Must specify at time in minutes and "
            "at least one host and script")

    # First argument is the number of minutes into the past
    # we want to look for the scripts on the specified hosts
    try:
        minutes_ago, args = int(args[0]), args[1:]
        start_date = datetime.now() - timedelta(minutes=minutes_ago)

        completed_from = strftime("%Y-%m-%d %H:%M:%S", start_date.timetuple())
        completed_to = strftime(
            "%Y-%m-%d %H:%M:%S", datetime.now().timetuple())

        hosts_scripts = []
        for arg in args:
            try:
                hostname, scriptname = arg.split(':')
            except TypeError:
                parser.error(
                    "%r is not in the format 'host:scriptname'" % (arg,))
            hosts_scripts.append((hostname, scriptname))
    except ValueError:
        parser.error("Must specify time in minutes and "
            "at least one host and script")

    log = logger(options)

    try:
        log.debug("Connecting to database")
        con = connect()
        error_found = False
        msg, subj = [], []
        for hostname, scriptname in hosts_scripts:
            failure_msg = check_script(con, log, hostname,
                scriptname, completed_from, completed_to)
            if failure_msg is not None:
                msg.append(failure_msg)
                subj.append("%s:%s" % (hostname, scriptname))
                error_found = 2
        if error_found:
            # Construct our email.
            msg = MIMEText('\n'.join(msg))
            msg['Subject'] = "Scripts failed to run: %s" % ", ".join(subj)
            msg['From'] = '*****@*****.**'
            msg['Reply-To'] = '*****@*****.**'
            msg['To'] = '*****@*****.**'

            # Send out the email.
            smtp = smtplib.SMTP()
            smtp.connect()
            smtp.sendmail(
                '*****@*****.**',
                ['*****@*****.**'], msg.as_string())
            smtp.close()
            return 2
    except:
        log.exception("Unhandled exception")
        return 1
Ejemplo n.º 15
0
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Generate a preamble for slonik(1) scripts based on the current LPCONFIG.
"""

__metaclass__ = type
__all__ = []

import _pythonpath

from optparse import OptionParser
import time

from lp.services import scripts
from lp.services.config import config
from lp.services.database.sqlbase import connect
import replication.helpers

if __name__ == '__main__':
    parser = OptionParser()
    scripts.db_options(parser)
    (options, args) = parser.parse_args()
    if args:
        parser.error("Too many arguments")
    scripts.execute_zcml_for_scripts(use_web_security=False)

    con = connect()
    print '# slonik(1) preamble generated %s' % time.ctime()
    print '# LPCONFIG=%s' % config.instance_name
    print
    print replication.helpers.preamble(con)
Ejemplo n.º 16
0
def main():
    parser = OptionParser()

    db_options(parser)
    parser.add_option(
        "-f", "--from", dest="from_date", default=None,
        metavar="DATE", help="Only count new files since DATE (yyyy/mm/dd)")
    parser.add_option(
        "-u", "--until", dest="until_date", default=None,
        metavar="DATE", help="Only count new files until DATE (yyyy/mm/dd)")

    options, args = parser.parse_args()
    if len(args) > 0:
        parser.error("Too many command line arguments.")

    # Handle date filters. We use LibraryFileContent.datecreated rather
    # than LibraryFileAlias.datecreated as this report is about actual
    # disk space usage. A new row in the database linking to a
    # previously existing file in the Librarian takes up no new space.
    if options.from_date is not None:
        from_date = 'AND LFC.datecreated >= %s' % sqlvalues(
            options.from_date)
    else:
        from_date = ''
    if options.until_date is not None:
        until_date = 'AND LFC.datecreated <= %s' % sqlvalues(
            options.until_date)
    else:
        until_date = ''

    con = connect()
    cur = con.cursor()

    # Collect direct references to the LibraryFileAlias table.
    references = set(
        (from_table, from_column)
        # Note that listReferences is recursive, which we don't
        # care about in this simple report. We also ignore the
        # irrelevant constraint type update and delete flags.
        for from_table, from_column, to_table, to_column, update, delete
            in listReferences(cur, 'libraryfilealias', 'id')
        if to_table == 'libraryfilealias'
        )

    totals = set()
    for referring_table, referring_column in sorted(references):
        if referring_table == 'libraryfiledownloadcount':
            continue
        quoted_referring_table = quoteIdentifier(referring_table)
        quoted_referring_column = quoteIdentifier(referring_column)
        cur.execute("""
            SELECT
                COALESCE(SUM(filesize), 0),
                pg_size_pretty(CAST(COALESCE(SUM(filesize), 0) AS bigint)),
                COUNT(*)
            FROM (
                SELECT DISTINCT ON (LFC.id) LFC.id, LFC.filesize
                FROM LibraryFileContent AS LFC, LibraryFileAlias AS LFA, %s
                WHERE LFC.id = LFA.content
                    AND LFA.id = %s.%s
                    AND (
                        LFA.expires IS NULL
                        OR LFA.expires > CURRENT_TIMESTAMP AT TIME ZONE 'UTC')
                    %s %s
                ORDER BY LFC.id
                ) AS Whatever
            """ % (
                quoted_referring_table, quoted_referring_table,
                quoted_referring_column, from_date, until_date))
        total_bytes, formatted_size, num_files = cur.fetchone()
        totals.add((total_bytes, referring_table, formatted_size, num_files))

    for total_bytes, tab_name, formatted_size, num_files in sorted(
        totals, reverse=True):
        print '%-10s %s in %d files' % (formatted_size, tab_name, num_files)

    return 0
Ejemplo n.º 17
0
def main():
    parser = OptionParser()

    db_options(parser)
    parser.add_option("-f",
                      "--from",
                      dest="from_date",
                      default=None,
                      metavar="DATE",
                      help="Only count new files since DATE (yyyy/mm/dd)")
    parser.add_option("-u",
                      "--until",
                      dest="until_date",
                      default=None,
                      metavar="DATE",
                      help="Only count new files until DATE (yyyy/mm/dd)")

    options, args = parser.parse_args()
    if len(args) > 0:
        parser.error("Too many command line arguments.")

    # Handle date filters. We use LibraryFileContent.datecreated rather
    # than LibraryFileAlias.datecreated as this report is about actual
    # disk space usage. A new row in the database linking to a
    # previously existing file in the Librarian takes up no new space.
    if options.from_date is not None:
        from_date = 'AND LFC.datecreated >= %s' % sqlvalues(options.from_date)
    else:
        from_date = ''
    if options.until_date is not None:
        until_date = 'AND LFC.datecreated <= %s' % sqlvalues(
            options.until_date)
    else:
        until_date = ''

    con = connect()
    cur = con.cursor()

    # Collect direct references to the LibraryFileAlias table.
    references = set(
        (from_table, from_column)
        # Note that listReferences is recursive, which we don't
        # care about in this simple report. We also ignore the
        # irrelevant constraint type update and delete flags.
        for from_table, from_column, to_table, to_column, update, delete in
        listReferences(cur, 'libraryfilealias', 'id')
        if to_table == 'libraryfilealias')

    totals = set()
    for referring_table, referring_column in sorted(references):
        if referring_table == 'libraryfiledownloadcount':
            continue
        quoted_referring_table = quoteIdentifier(referring_table)
        quoted_referring_column = quoteIdentifier(referring_column)
        cur.execute("""
            SELECT
                COALESCE(SUM(filesize), 0),
                pg_size_pretty(CAST(COALESCE(SUM(filesize), 0) AS bigint)),
                COUNT(*)
            FROM (
                SELECT DISTINCT ON (LFC.id) LFC.id, LFC.filesize
                FROM LibraryFileContent AS LFC, LibraryFileAlias AS LFA, %s
                WHERE LFC.id = LFA.content
                    AND LFA.id = %s.%s
                    AND (
                        LFA.expires IS NULL
                        OR LFA.expires > CURRENT_TIMESTAMP AT TIME ZONE 'UTC')
                    %s %s
                ORDER BY LFC.id
                ) AS Whatever
            """ % (quoted_referring_table, quoted_referring_table,
                   quoted_referring_column, from_date, until_date))
        total_bytes, formatted_size, num_files = cur.fetchone()
        totals.add((total_bytes, referring_table, formatted_size, num_files))

    for total_bytes, tab_name, formatted_size, num_files in sorted(
            totals, reverse=True):
        print '%-10s %s in %d files' % (formatted_size, tab_name, num_files)

    return 0
def main():
    parser = LPOptionParser()
    db_options(parser)
    parser.add_option("-f",
                      "--from",
                      dest="from_ts",
                      type=datetime,
                      default=None,
                      metavar="TIMESTAMP",
                      help="Use statistics collected since TIMESTAMP.")
    parser.add_option("-u",
                      "--until",
                      dest="until_ts",
                      type=datetime,
                      default=None,
                      metavar="TIMESTAMP",
                      help="Use statistics collected up until TIMESTAMP.")
    parser.add_option(
        "-i",
        "--interval",
        dest="interval",
        type=str,
        default=None,
        metavar="INTERVAL",
        help=("Use statistics collected over the last INTERVAL period. "
              "INTERVAL is a string parsable by PostgreSQL "
              "such as '5 minutes'."))
    parser.add_option("-n",
                      "--limit",
                      dest="limit",
                      type=int,
                      default=15,
                      metavar="NUM",
                      help="Display the top NUM items in each category.")
    parser.add_option(
        "-b",
        "--bloat",
        dest="bloat",
        type=float,
        default=40,
        metavar="BLOAT",
        help="Display tables and indexes bloated by more than BLOAT%.")
    parser.add_option(
        "--min-bloat",
        dest="min_bloat",
        type=int,
        default=10000000,
        metavar="BLOAT",
        help="Don't report tables bloated less than BLOAT bytes.")
    parser.set_defaults(dbuser="******")
    options, args = parser.parse_args()

    if options.from_ts and options.until_ts and options.interval:
        parser.error(
            "Only two of --from, --until and --interval may be specified.")

    con = connect()
    cur = con.cursor()

    tables = list(get_table_stats(cur, options))
    if len(tables) == 0:
        parser.error("No statistics available in that time range.")
    arbitrary_table = tables[0]
    interval = arbitrary_table.date_end - arbitrary_table.date_start
    per_second = float(interval.days * 24 * 60 * 60 + interval.seconds)
    if per_second == 0:
        parser.error("Only one sample in that time range.")

    user_cpu = get_cpu_stats(cur, options)
    print "== Most Active Users =="
    print
    for cpu, username in sorted(user_cpu, reverse=True)[:options.limit]:
        print "%40s || %10.2f%% CPU" % (username, float(cpu) / 10)

    print
    print "== Most Written Tables =="
    print
    tables_sort = [
        'total_tup_written', 'n_tup_upd', 'n_tup_ins', 'n_tup_del', 'relname'
    ]
    most_written_tables = sorted(tables,
                                 key=attrgetter(*tables_sort),
                                 reverse=True)
    for table in most_written_tables[:options.limit]:
        print "%40s || %10.2f tuples/sec" % (
            table.relname, table.total_tup_written / per_second)

    print
    print "== Most Read Tables =="
    print
    # These match the pg_user_table_stats view. schemaname is the
    # namespace (normally 'public'), relname is the table (relation)
    # name. total_tup_red is the total number of rows read.
    # idx_tup_fetch is the number of rows looked up using an index.
    tables_sort = ['total_tup_read', 'idx_tup_fetch', 'schemaname', 'relname']
    most_read_tables = sorted(tables,
                              key=attrgetter(*tables_sort),
                              reverse=True)
    for table in most_read_tables[:options.limit]:
        print "%40s || %10.2f tuples/sec" % (table.relname,
                                             table.total_tup_read / per_second)

    table_bloat_stats = get_bloat_stats(cur, options, 'r')

    if not table_bloat_stats:
        print
        print "(There is no bloat information available in this time range.)"

    else:
        print
        print "== Most Bloated Tables =="
        print
        for bloated_table in table_bloat_stats[:options.limit]:
            print "%40s || %2d%% || %s of %s" % (
                bloated_table.name, bloated_table.end_bloat_percent,
                bloated_table.bloat_size, bloated_table.table_size)

        index_bloat_stats = get_bloat_stats(cur, options, 'i')

        print
        print "== Most Bloated Indexes =="
        print
        for bloated_index in index_bloat_stats[:options.limit]:
            print "%65s || %2d%% || %s of %s" % (
                bloated_index.sub_name, bloated_index.end_bloat_percent,
                bloated_index.bloat_size, bloated_index.table_size)

        # Order bloat delta report by size of bloat increase.
        # We might want to change this to percentage bloat increase.
        bloating_sort_key = lambda x: x.delta_bloat_len

        table_bloating_stats = sorted(table_bloat_stats,
                                      key=bloating_sort_key,
                                      reverse=True)

        if table_bloating_stats[0].num_samples <= 1:
            print
            print fill(
                dedent("""\
                (There are not enough samples in this time range to display
                bloat change statistics)
                """))
        else:
            print
            print "== Most Bloating Tables =="
            print

            for bloated_table in table_bloating_stats[:options.limit]:
                # Bloat decreases are uninteresting, and would need to be in
                # a separate table sorted in reverse anyway.
                if bloated_table.delta_bloat_percent > 0:
                    print "%40s || +%4.2f%% || +%s" % (
                        bloated_table.name, bloated_table.delta_bloat_percent,
                        bloated_table.delta_bloat_size)

            index_bloating_stats = sorted(index_bloat_stats,
                                          key=bloating_sort_key,
                                          reverse=True)

            print
            print "== Most Bloating Indexes =="
            print
            for bloated_index in index_bloating_stats[:options.limit]:
                # Bloat decreases are uninteresting, and would need to be in
                # a separate table sorted in reverse anyway.
                if bloated_index.delta_bloat_percent > 0:
                    print "%65s || +%4.2f%% || +%s" % (
                        bloated_index.sub_name,
                        bloated_index.delta_bloat_percent,
                        bloated_index.delta_bloat_size)
def main():
    parser = LPOptionParser()
    db_options(parser)
    parser.add_option(
        "-f", "--from", dest="from_ts", type=datetime,
        default=None, metavar="TIMESTAMP",
        help="Use statistics collected since TIMESTAMP.")
    parser.add_option(
        "-u", "--until", dest="until_ts", type=datetime,
        default=None, metavar="TIMESTAMP",
        help="Use statistics collected up until TIMESTAMP.")
    parser.add_option(
        "-i", "--interval", dest="interval", type=str,
        default=None, metavar="INTERVAL",
        help=(
            "Use statistics collected over the last INTERVAL period. "
            "INTERVAL is a string parsable by PostgreSQL "
            "such as '5 minutes'."))
    parser.add_option(
        "-n", "--limit", dest="limit", type=int,
        default=15, metavar="NUM",
        help="Display the top NUM items in each category.")
    parser.add_option(
        "-b", "--bloat", dest="bloat", type=float,
        default=40, metavar="BLOAT",
        help="Display tables and indexes bloated by more than BLOAT%.")
    parser.add_option(
        "--min-bloat", dest="min_bloat", type=int,
        default=10000000, metavar="BLOAT",
        help="Don't report tables bloated less than BLOAT bytes.")
    parser.set_defaults(dbuser="******")
    options, args = parser.parse_args()

    if options.from_ts and options.until_ts and options.interval:
        parser.error(
            "Only two of --from, --until and --interval may be specified.")

    con = connect()
    cur = con.cursor()

    tables = list(get_table_stats(cur, options))
    if len(tables) == 0:
        parser.error("No statistics available in that time range.")
    arbitrary_table = tables[0]
    interval = arbitrary_table.date_end - arbitrary_table.date_start
    per_second = float(interval.days * 24 * 60 * 60 + interval.seconds)
    if per_second == 0:
        parser.error("Only one sample in that time range.")

    user_cpu = get_cpu_stats(cur, options)
    print "== Most Active Users =="
    print
    for cpu, username in sorted(user_cpu, reverse=True)[:options.limit]:
        print "%40s || %10.2f%% CPU" % (username, float(cpu) / 10)

    print
    print "== Most Written Tables =="
    print
    tables_sort = [
        'total_tup_written', 'n_tup_upd', 'n_tup_ins', 'n_tup_del', 'relname']
    most_written_tables = sorted(
        tables, key=attrgetter(*tables_sort), reverse=True)
    for table in most_written_tables[:options.limit]:
        print "%40s || %10.2f tuples/sec" % (
            table.relname, table.total_tup_written / per_second)

    print
    print "== Most Read Tables =="
    print
    # These match the pg_user_table_stats view. schemaname is the
    # namespace (normally 'public'), relname is the table (relation)
    # name. total_tup_red is the total number of rows read.
    # idx_tup_fetch is the number of rows looked up using an index.
    tables_sort = ['total_tup_read', 'idx_tup_fetch', 'schemaname', 'relname']
    most_read_tables = sorted(
        tables, key=attrgetter(*tables_sort), reverse=True)
    for table in most_read_tables[:options.limit]:
        print "%40s || %10.2f tuples/sec" % (
            table.relname, table.total_tup_read / per_second)

    table_bloat_stats = get_bloat_stats(cur, options, 'r')

    if not table_bloat_stats:
        print
        print "(There is no bloat information available in this time range.)"

    else:
        print
        print "== Most Bloated Tables =="
        print
        for bloated_table in table_bloat_stats[:options.limit]:
            print "%40s || %2d%% || %s of %s" % (
                bloated_table.name,
                bloated_table.end_bloat_percent,
                bloated_table.bloat_size,
                bloated_table.table_size)

        index_bloat_stats = get_bloat_stats(cur, options, 'i')

        print
        print "== Most Bloated Indexes =="
        print
        for bloated_index in index_bloat_stats[:options.limit]:
            print "%65s || %2d%% || %s of %s" % (
                bloated_index.sub_name,
                bloated_index.end_bloat_percent,
                bloated_index.bloat_size,
                bloated_index.table_size)

        # Order bloat delta report by size of bloat increase.
        # We might want to change this to percentage bloat increase.
        bloating_sort_key = lambda x: x.delta_bloat_len

        table_bloating_stats = sorted(
            table_bloat_stats, key=bloating_sort_key, reverse=True)

        if table_bloating_stats[0].num_samples <= 1:
            print
            print fill(dedent("""\
                (There are not enough samples in this time range to display
                bloat change statistics)
                """))
        else:
            print
            print "== Most Bloating Tables =="
            print

            for bloated_table in table_bloating_stats[:options.limit]:
                # Bloat decreases are uninteresting, and would need to be in
                # a separate table sorted in reverse anyway.
                if bloated_table.delta_bloat_percent > 0:
                    print "%40s || +%4.2f%% || +%s" % (
                        bloated_table.name,
                        bloated_table.delta_bloat_percent,
                        bloated_table.delta_bloat_size)

            index_bloating_stats = sorted(
                index_bloat_stats, key=bloating_sort_key, reverse=True)

            print
            print "== Most Bloating Indexes =="
            print
            for bloated_index in index_bloating_stats[:options.limit]:
                # Bloat decreases are uninteresting, and would need to be in
                # a separate table sorted in reverse anyway.
                if bloated_index.delta_bloat_percent > 0:
                    print "%65s || +%4.2f%% || +%s" % (
                        bloated_index.sub_name,
                        bloated_index.delta_bloat_percent,
                        bloated_index.delta_bloat_size)
Ejemplo n.º 20
0
def main():
    # XXX: Tom Haddon 2007-07-12
    # There's a lot of untested stuff here: parsing options and sending
    # emails - this should be moved into a testable location.
    # Also duplicated code in scripts/script-monitor-nagios.py
    parser = OptionParser(
        '%prog [options] (minutes) (host:scriptname) [host:scriptname]')
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) < 2:
        parser.error("Must specify at time in minutes and "
                     "at least one host and script")

    # First argument is the number of minutes into the past
    # we want to look for the scripts on the specified hosts
    try:
        minutes_ago, args = int(args[0]), args[1:]
        start_date = datetime.now() - timedelta(minutes=minutes_ago)

        completed_from = strftime("%Y-%m-%d %H:%M:%S", start_date.timetuple())
        completed_to = strftime("%Y-%m-%d %H:%M:%S",
                                datetime.now().timetuple())

        hosts_scripts = []
        for arg in args:
            try:
                hostname, scriptname = arg.split(':')
            except TypeError:
                parser.error("%r is not in the format 'host:scriptname'" %
                             (arg, ))
            hosts_scripts.append((hostname, scriptname))
    except ValueError:
        parser.error("Must specify time in minutes and "
                     "at least one host and script")

    log = logger(options)

    try:
        log.debug("Connecting to database")
        con = connect()
        error_found = False
        msg, subj = [], []
        for hostname, scriptname in hosts_scripts:
            failure_msg = check_script(con, log, hostname, scriptname,
                                       completed_from, completed_to)
            if failure_msg is not None:
                msg.append(failure_msg)
                subj.append("%s:%s" % (hostname, scriptname))
                error_found = 2
        if error_found:
            # Construct our email.
            msg = MIMEText('\n'.join(msg))
            msg['Subject'] = "Scripts failed to run: %s" % ", ".join(subj)
            msg['From'] = '*****@*****.**'
            msg['Reply-To'] = '*****@*****.**'
            msg['To'] = '*****@*****.**'

            # Send out the email.
            smtp = smtplib.SMTP()
            smtp.connect()
            smtp.sendmail('*****@*****.**',
                          ['*****@*****.**'], msg.as_string())
            smtp.close()
            return 2
    except:
        log.exception("Unhandled exception")
        return 1