Ejemplo n.º 1
0
def main():
    parser = OptionParser()
    parser.add_option(
        '--pgbouncer', dest='pgbouncer',
        default='host=localhost port=6432 user=pgbouncer',
        metavar='CONN_STR',
        help="libpq connection string to administer pgbouncer")
    parser.add_option(
        '--dbname', dest='dbname', default='launchpad_prod', metavar='DBNAME',
        help='Database name we are updating.')
    parser.add_option(
        '--dbuser', dest='dbuser', default='postgres', metavar='USERNAME',
        help='Connect as USERNAME to databases')

    logger_options(parser, milliseconds=True)
    (options, args) = parser.parse_args()
    if args:
        parser.error("Too many arguments")

    # In case we are connected as a non-standard superuser, ensure we
    # don't kill our own connections.
    SYSTEM_USERS.add(options.dbuser)

    log = logger(options)

    controller = DBController(
        log, options.pgbouncer, options.dbname, options.dbuser)

    try:
        # Master connection, not running in autocommit to allow us to
        # rollback changes on failure.
        master_con = psycopg2.connect(str(controller.master))
    except Exception, x:
        log.fatal("Unable to open connection to master db (%s)", str(x))
        return 94
Ejemplo n.º 2
0
def main():
    parser = OptionParser(
            '%prog [options] (username|email) [...]'
            )
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) == 0:
        parser.error("Must specify username (Person.name)")

    log = logger(options)

    con = None
    try:
        log.debug("Connecting to database")
        con = connect()
        for username in args:
            if not close_account(con, log, username):
                log.debug("Rolling back")
                con.rollback()
                return 1
        log.debug("Committing changes")
        con.commit()
        return 0
    except:
        log.exception("Unhandled exception")
        log.debug("Rolling back")
        if con is not None:
            con.rollback()
        return 1
Ejemplo n.º 3
0
def main():
    parser = OptionParser('%prog [options] (username|email) [...]')
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) == 0:
        parser.error("Must specify username (Person.name)")

    log = logger(options)

    con = None
    try:
        log.debug("Connecting to database")
        con = connect()
        for username in args:
            if not close_account(con, log, username):
                log.debug("Rolling back")
                con.rollback()
                return 1
        log.debug("Committing changes")
        con.commit()
        return 0
    except:
        log.exception("Unhandled exception")
        log.debug("Rolling back")
        if con is not None:
            con.rollback()
        return 1
def main():
    parser = OptionParser()
    logger_options(parser)
    db_options(parser)

    options, args = parser.parse_args()

    if len(args) > 0:
        parser.error("Too many arguments.")

    log = logger(options)

    log.debug("Connecting")
    con = connect()
    con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    cur = con.cursor()

    cur.execute('show server_version')
    pg_version = LooseVersion(cur.fetchone()[0])

    log.debug("Disabling autovacuum on all tables in the database.")
    if pg_version < LooseVersion('8.4.0'):
        cur.execute("""
            INSERT INTO pg_autovacuum
            SELECT pg_class.oid, FALSE, -1,-1,-1,-1,-1,-1,-1,-1
            FROM pg_class
            WHERE relkind in ('r','t')
                AND pg_class.oid NOT IN (SELECT vacrelid FROM pg_autovacuum)
            """)
    else:
        cur.execute("""
            SELECT nspname,relname
            FROM pg_namespace, pg_class
            WHERE relnamespace = pg_namespace.oid
                AND relkind = 'r' AND nspname <> 'pg_catalog'
            """)
        for namespace, table in list(cur.fetchall()):
            cur.execute("""
                ALTER TABLE ONLY "%s"."%s" SET (
                    autovacuum_enabled=false,
                    toast.autovacuum_enabled=false)
                """ % (namespace, table))

    log.debug("Killing existing autovacuum processes")
    num_autovacuums = -1
    while num_autovacuums != 0:
        # Sleep long enough for pg_stat_activity to be updated.
        time.sleep(0.6)
        cur.execute("""
            SELECT procpid FROM pg_stat_activity
            WHERE
                datname=current_database()
                AND current_query LIKE 'autovacuum: %'
            """)
        autovacuums = [row[0] for row in cur.fetchall()]
        num_autovacuums = len(autovacuums)
        for procpid in autovacuums:
            log.debug("Cancelling %d" % procpid)
            cur.execute("SELECT pg_cancel_backend(%d)" % procpid)
Ejemplo n.º 5
0
def main():
    parser = OptionParser()
    logger_options(parser)
    db_options(parser)

    options, args = parser.parse_args()

    if len(args) > 0:
        parser.error("Too many arguments.")

    log = logger(options)

    log.debug("Connecting")
    con = connect()
    con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    cur = con.cursor()

    cur.execute('show server_version')
    pg_version = LooseVersion(cur.fetchone()[0])

    log.debug("Disabling autovacuum on all tables in the database.")
    if pg_version < LooseVersion('8.4.0'):
        cur.execute("""
            INSERT INTO pg_autovacuum
            SELECT pg_class.oid, FALSE, -1,-1,-1,-1,-1,-1,-1,-1
            FROM pg_class
            WHERE relkind in ('r','t')
                AND pg_class.oid NOT IN (SELECT vacrelid FROM pg_autovacuum)
            """)
    else:
        cur.execute("""
            SELECT nspname,relname
            FROM pg_namespace, pg_class
            WHERE relnamespace = pg_namespace.oid
                AND relkind = 'r' AND nspname <> 'pg_catalog'
            """)
        for namespace, table in list(cur.fetchall()):
            cur.execute("""
                ALTER TABLE ONLY "%s"."%s" SET (
                    autovacuum_enabled=false,
                    toast.autovacuum_enabled=false)
                """ % (namespace, table))

    log.debug("Killing existing autovacuum processes")
    num_autovacuums = -1
    while num_autovacuums != 0:
        # Sleep long enough for pg_stat_activity to be updated.
        time.sleep(0.6)
        cur.execute("""
            SELECT procpid FROM pg_stat_activity
            WHERE
                datname=current_database()
                AND current_query LIKE 'autovacuum: %'
            """)
        autovacuums = [row[0] for row in cur.fetchall()]
        num_autovacuums = len(autovacuums)
        for procpid in autovacuums:
            log.debug("Cancelling %d" % procpid)
            cur.execute("SELECT pg_cancel_backend(%d)" % procpid)
Ejemplo n.º 6
0
 def __init__(self):
     parser = OptionParser()
     scripts.logger_options(parser)
     parser.add_option(
         "--access-policy", type="choice", metavar="ACCESS_POLICY",
         choices=["anything", "default"], default="default",
         help="Access policy to use when accessing branches to import.")
     self.options, self.args = parser.parse_args()
     self.logger = scripts.logger(self.options, 'code-import-worker')
Ejemplo n.º 7
0
def main():
    parser = OptionParser()
    logger_options(parser)
    parser.add_option("--skip-connection-check",
                      dest='skip_connection_check',
                      default=False,
                      action="store_true",
                      help="Don't check open connections.")
    parser.add_option(
        "--kill-connections",
        dest='kill_connections',
        default=False,
        action="store_true",
        help="Kill non-system connections instead of reporting an error.")
    parser.add_option('--pgbouncer',
                      dest='pgbouncer',
                      default='host=localhost port=6432 user=pgbouncer',
                      metavar='CONN_STR',
                      help="libpq connection string to administer pgbouncer")
    parser.add_option('--dbname',
                      dest='dbname',
                      default='launchpad_prod',
                      metavar='DBNAME',
                      help='Database name we are updating.')
    parser.add_option('--dbuser',
                      dest='dbuser',
                      default='postgres',
                      metavar='USERNAME',
                      help='Connect as USERNAME to databases')

    (options, args) = parser.parse_args()
    if args:
        parser.error("Too many arguments")

    if options.kill_connections and options.skip_connection_check:
        parser.error(
            "--skip-connection-check conflicts with --kill-connections")

    log = logger(options)

    controller = DBController(log, options.pgbouncer, options.dbname,
                              options.dbuser)

    if options.kill_connections:
        preflight_check = KillConnectionsPreflight(log, controller)
    elif options.skip_connection_check:
        preflight_check = NoConnectionCheckPreflight(log, controller)
    else:
        preflight_check = DatabasePreflight(log, controller)

    if preflight_check.check_all():
        log.info('Preflight check succeeded. Good to go.')
        return 0
    else:
        log.error('Preflight check failed.')
        return 1
Ejemplo n.º 8
0
 def __init__(self):
     parser = OptionParser()
     scripts.logger_options(parser)
     parser.add_option(
         "--access-policy",
         type="choice",
         metavar="ACCESS_POLICY",
         choices=["anything", "default"],
         default="default",
         help="Access policy to use when accessing branches to import.")
     self.options, self.args = parser.parse_args()
     self.logger = scripts.logger(self.options, 'code-import-worker')
Ejemplo n.º 9
0
    def __init__(self, name=None, dbuser=None, test_args=None, logger=None):
        """Construct new LaunchpadScript.

        Name is a short name for this script; it will be used to
        assemble a lock filename and to identify the logger object.

        Use dbuser to specify the user to connect to the database; if
        not supplied a default will be used.

        Specify test_args when you want to override sys.argv.  This is
        useful in test scripts.

        :param logger: Use this logger, instead of initializing global
            logging.
        """
        if name is None:
            self._name = self.__class__.__name__.lower()
        else:
            self._name = name

        self._dbuser = dbuser
        self.logger = logger

        # The construction of the option parser is a bit roundabout, but
        # at least it's isolated here. First we build the parser, then
        # we add options that our logger object uses, then call our
        # option-parsing hook, and finally pull out and store the
        # supplied options and args.
        if self.description is None:
            description = self.__doc__
        else:
            description = self.description
        self.parser = OptionParser(usage=self.usage, description=description)

        if logger is None:
            scripts.logger_options(self.parser, default=self.loglevel)
            self.parser.add_option(
                '--profile',
                dest='profile',
                metavar='FILE',
                help=("Run the script under the profiler and save the "
                      "profiling stats in FILE."))
        else:
            scripts.dummy_logger_options(self.parser)

        self.add_my_options()
        self.options, self.args = self.parser.parse_args(args=test_args)

        # Enable subclasses to easily override these __init__()
        # arguments using command-line arguments.
        self.handle_options()
Ejemplo n.º 10
0
    def __init__(self, name=None, dbuser=None, test_args=None, logger=None):
        """Construct new LaunchpadScript.

        Name is a short name for this script; it will be used to
        assemble a lock filename and to identify the logger object.

        Use dbuser to specify the user to connect to the database; if
        not supplied a default will be used.

        Specify test_args when you want to override sys.argv.  This is
        useful in test scripts.

        :param logger: Use this logger, instead of initializing global
            logging.
        """
        if name is None:
            self._name = self.__class__.__name__.lower()
        else:
            self._name = name

        self._dbuser = dbuser
        self.logger = logger

        # The construction of the option parser is a bit roundabout, but
        # at least it's isolated here. First we build the parser, then
        # we add options that our logger object uses, then call our
        # option-parsing hook, and finally pull out and store the
        # supplied options and args.
        if self.description is None:
            description = self.__doc__
        else:
            description = self.description
        self.parser = OptionParser(usage=self.usage,
                                   description=description)

        if logger is None:
            scripts.logger_options(self.parser, default=self.loglevel)
            self.parser.add_option(
                '--profile', dest='profile', metavar='FILE', help=(
                        "Run the script under the profiler and save the "
                        "profiling stats in FILE."))
        else:
            scripts.dummy_logger_options(self.parser)

        self.add_my_options()
        self.options, self.args = self.parser.parse_args(args=test_args)

        # Enable subclasses to easily override these __init__()
        # arguments using command-line arguments.
        self.handle_options()
Ejemplo n.º 11
0
def main():
    parser = OptionParser()
    logger_options(parser)
    parser.add_option(
        "--skip-connection-check", dest='skip_connection_check',
        default=False, action="store_true",
        help="Don't check open connections.")
    parser.add_option(
        "--kill-connections", dest='kill_connections',
        default=False, action="store_true",
        help="Kill non-system connections instead of reporting an error.")
    parser.add_option(
        '--pgbouncer', dest='pgbouncer',
        default='host=localhost port=6432 user=pgbouncer',
        metavar='CONN_STR',
        help="libpq connection string to administer pgbouncer")
    parser.add_option(
        '--dbname', dest='dbname', default='launchpad_prod', metavar='DBNAME',
        help='Database name we are updating.')
    parser.add_option(
        '--dbuser', dest='dbuser', default='postgres', metavar='USERNAME',
        help='Connect as USERNAME to databases')

    (options, args) = parser.parse_args()
    if args:
        parser.error("Too many arguments")

    if options.kill_connections and options.skip_connection_check:
        parser.error(
            "--skip-connection-check conflicts with --kill-connections")

    log = logger(options)

    controller = DBController(
        log, options.pgbouncer, options.dbname, options.dbuser)

    if options.kill_connections:
        preflight_check = KillConnectionsPreflight(log, controller)
    elif options.skip_connection_check:
        preflight_check = NoConnectionCheckPreflight(log, controller)
    else:
        preflight_check = DatabasePreflight(log, controller)

    if preflight_check.check_all():
        log.info('Preflight check succeeded. Good to go.')
        return 0
    else:
        log.error('Preflight check failed.')
        return 1
Ejemplo n.º 12
0
    def filterOutLoggingOptions(self, arglist):
        """Remove the standard logging options from a list of arguments."""

        # Calling parser.parse_args as we do below is dangerous,
        # as if a callback invokes parser.error the test suite
        # terminates. This hack removes the dangerous argument manually.
        arglist = [arg for arg in arglist if not arg.startswith('--log-file=')]
        while '--log-file' in arglist:
            index = arglist.index('--log-file')
            del arglist[index]  # Delete the argument
            del arglist[index]  # And its parameter

        parser = OptionParser()
        scripts.logger_options(parser)
        options, args = parser.parse_args(arglist)
        return args
Ejemplo n.º 13
0
def main():
    parser = OptionParser()
    logger_options(parser)
    db_options(parser)

    options, args = parser.parse_args()

    if len(args) > 0:
        parser.error("Too many arguments.")

    log = logger(options)

    log.debug("Connecting")
    con = connect()
    con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    cur = con.cursor()

    log.debug("Disabling autovacuum on all tables in the database.")
    cur.execute("""
        SELECT nspname,relname
        FROM pg_namespace, pg_class
        WHERE relnamespace = pg_namespace.oid
            AND relkind = 'r' AND nspname <> 'pg_catalog'
        """)
    for namespace, table in list(cur.fetchall()):
        cur.execute("""
            ALTER TABLE ONLY "%s"."%s" SET (
                autovacuum_enabled=false,
                toast.autovacuum_enabled=false)
            """ % (namespace, table))

    log.debug("Killing existing autovacuum processes")
    num_autovacuums = -1
    while num_autovacuums != 0:
        # Sleep long enough for pg_stat_activity to be updated.
        time.sleep(0.6)
        cur.execute("""
            SELECT %(pid)s FROM pg_stat_activity
            WHERE
                datname=current_database()
                AND %(query)s LIKE 'autovacuum: %%'
            """ % activity_cols(cur))
        autovacuums = [row[0] for row in cur.fetchall()]
        num_autovacuums = len(autovacuums)
        for pid in autovacuums:
            log.debug("Cancelling %d" % pid)
            cur.execute("SELECT pg_cancel_backend(%d)" % pid)
Ejemplo n.º 14
0
    def setup(self, cfg):
        super(LoggerheadLogger, self).setup(cfg)
        formatter = LaunchpadFormatter(datefmt=None)
        for handler in self.error_log.handlers:
            handler.setFormatter(formatter)

        # Force Launchpad's logging machinery to set up the root logger the
        # way we want it.
        parser = OptionParser()
        logger_options(parser)
        log_options, _ = parser.parse_args(
            ['-q', '--ms', '--log-file=DEBUG:%s' % cfg.errorlog])
        logger(log_options)

        # Make the OpenID library use proper logging rather than writing to
        # stderr.
        oidutil.log = lambda message, level=0: log.debug(message)
Ejemplo n.º 15
0
    def filterOutLoggingOptions(self, arglist):
        """Remove the standard logging options from a list of arguments."""

        # Calling parser.parse_args as we do below is dangerous,
        # as if a callback invokes parser.error the test suite
        # terminates. This hack removes the dangerous argument manually.
        arglist = [
            arg for arg in arglist if not arg.startswith('--log-file=')]
        while '--log-file' in arglist:
            index = arglist.index('--log-file')
            del arglist[index] # Delete the argument
            del arglist[index] # And its parameter

        parser = OptionParser()
        scripts.logger_options(parser)
        options, args = parser.parse_args(arglist)
        return args
Ejemplo n.º 16
0
def main():
    parser = OptionParser()
    parser.add_option(
        "-0",
        "--null",
        dest="null",
        action="store_true",
        default=False,
        help="Set all full text index column values to NULL.",
    )
    parser.add_option(
        "-l",
        "--live-rebuild",
        dest="liverebuild",
        action="store_true",
        default=False,
        help="Rebuild all the indexes against a live database.",
    )
    db_options(parser)
    logger_options(parser)

    global options, args
    (options, args) = parser.parse_args()

    if options.null + options.liverebuild > 1:
        parser.error("Incompatible options")

    global log
    log = logger(options)

    con = connect()

    if options.liverebuild:
        con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        liverebuild(con)
    elif options.null:
        con.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
        nullify(con)
    else:
        parser.error("Required argument not specified")

    con.commit()
    return 0
Ejemplo n.º 17
0
def main():
    parser = OptionParser()
    parser.add_option('--pgbouncer',
                      dest='pgbouncer',
                      default='host=localhost port=6432 user=pgbouncer',
                      metavar='CONN_STR',
                      help="libpq connection string to administer pgbouncer")
    parser.add_option('--dbname',
                      dest='dbname',
                      default='launchpad_prod',
                      metavar='DBNAME',
                      help='Database name we are updating.')
    parser.add_option('--dbuser',
                      dest='dbuser',
                      default='postgres',
                      metavar='USERNAME',
                      help='Connect as USERNAME to databases')

    logger_options(parser, milliseconds=True)
    (options, args) = parser.parse_args()
    if args:
        parser.error("Too many arguments")

    # In case we are connected as a non-standard superuser, ensure we
    # don't kill our own connections.
    SYSTEM_USERS.add(options.dbuser)

    log = logger(options)

    controller = DBController(log, options.pgbouncer, options.dbname,
                              options.dbuser)

    try:
        # Master connection, not running in autocommit to allow us to
        # rollback changes on failure.
        master_con = psycopg2.connect(str(controller.master))
    except Exception, x:
        log.fatal("Unable to open connection to master db (%s)", str(x))
        return 94
Ejemplo n.º 18
0
def main():
    parser = OptionParser()
    parser.add_option(
            "-0", "--null", dest="null",
            action="store_true", default=False,
            help="Set all full text index column values to NULL.",
            )
    parser.add_option(
            "-l", "--live-rebuild", dest="liverebuild",
            action="store_true", default=False,
            help="Rebuild all the indexes against a live database.",
            )
    db_options(parser)
    logger_options(parser)

    global options, args
    (options, args) = parser.parse_args()

    if options.null + options.liverebuild > 1:
        parser.error("Incompatible options")

    global log
    log = logger(options)

    con = connect()

    if options.liverebuild:
        con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        liverebuild(con)
    elif options.null:
        con.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
        nullify(con)
    else:
        parser.error("Required argument not specified")

    con.commit()
    return 0
Ejemplo n.º 19
0
def shutdown_with_errors(failure):
    tplog.err(failure)
    failure.printTraceback()
    reactor.stop()


def run_mirror(log, manager):
    # It's conceivable that mirror() might raise an exception before it
    # returns a Deferred -- maybeDeferred means we don't have to worry.
    deferred = defer.maybeDeferred(mirror, log, manager)
    deferred.addCallback(clean_shutdown)
    deferred.addErrback(shutdown_with_errors)


if __name__ == '__main__':
    parser = OptionParser()
    logger_options(parser)
    parser.add_option('--branch-type', action='append', default=[])
    (options, arguments) = parser.parse_args()
    if arguments:
        parser.error("Unhandled arguments %s" % repr(arguments))
    log = set_up_logging_for_script(options, 'supermirror_puller',
                                    options.log_file)
    manager = scheduler.JobScheduler(
        LoggingProxy(config.codehosting.codehosting_endpoint, log), log,
        options.branch_type)

    reactor.callWhenRunning(run_mirror, log, manager)
    reactor.run()
Ejemplo n.º 20
0
def main():
    parser = OptionParser('Usage: %prog [options] [SERVICE ...]')
    parser.add_option("-w",
                      "--wait",
                      metavar="SECS",
                      default=20,
                      type="int",
                      help="Wait up to SECS seconds for processes "
                      "to die before retrying with SIGKILL")
    logger_options(parser, logging.INFO)
    (options, args) = parser.parse_args()
    log = logger(options)
    if len(args) < 1:
        parser.error('No service name provided')

    pids = []  # List of pids we tried to kill.
    services = args[:]

    # Mailman is special, but only stop it if it was launched.
    if 'mailman' in services:
        if config.mailman.launch:
            stop_mailman()
        services.remove('mailman')

    for service in services:
        log.debug("PID file is %s", pidfile_path(service))
        try:
            pid = get_pid(service)
        except ValueError as error:
            log.error(error)
            continue
        if pid is not None:
            log.info("Killing %s (%d)", service, pid)
            try:
                os.kill(pid, SIGTERM)
                pids.append((service, pid))
            except OSError as x:
                log.error("Unable to SIGTERM %s (%d) - %s", service, pid,
                          x.strerror)
        else:
            log.debug("No PID file for %s", service)

    wait_for_pids(pids, options.wait, log)

    # Anything that didn't die, kill harder with SIGKILL.
    for service, pid in pids:
        if not process_exists(pid):
            continue
        log.warn("SIGTERM failed to kill %s (%d). Trying SIGKILL", service,
                 pid)
        try:
            os.kill(pid, SIGKILL)
        except OSError as x:
            log.error("Unable to SIGKILL %s (%d) - %s", service, pid,
                      x.strerror)

    wait_for_pids(pids, options.wait, log)

    # Report anything still left running after a SIGKILL.
    for service, pid in pids:
        if process_exists(pid):
            log.error("SIGKILL didn't terminate %s (%d)", service, pid)

    # Remove any pidfiles that didn't get cleaned up if there is no
    # corresponding process (from an unkillable process, or maybe some
    # other job has relaunched it while we were not looking).
    for service in services:
        pid = get_pid(service)
        if pid is not None and not process_exists(pid):
            try:
                remove_pidfile(service)
            except OSError:
                pass
Ejemplo n.º 21
0
def main():
    # XXX: Tom Haddon 2007-07-12
    # There's a lot of untested stuff here: parsing options and sending
    # emails - this should be moved into a testable location.
    # Also duplicated code in scripts/script-monitor-nagios.py
    parser = OptionParser(
            '%prog [options] (minutes) (host:scriptname) [host:scriptname]'
            )
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) < 2:
        parser.error("Must specify at time in minutes and "
            "at least one host and script")

    # First argument is the number of minutes into the past
    # we want to look for the scripts on the specified hosts
    try:
        minutes_ago, args = int(args[0]), args[1:]
        start_date = datetime.now() - timedelta(minutes=minutes_ago)

        completed_from = strftime("%Y-%m-%d %H:%M:%S", start_date.timetuple())
        completed_to = strftime(
            "%Y-%m-%d %H:%M:%S", datetime.now().timetuple())

        hosts_scripts = []
        for arg in args:
            try:
                hostname, scriptname = arg.split(':')
            except TypeError:
                parser.error(
                    "%r is not in the format 'host:scriptname'" % (arg,))
            hosts_scripts.append((hostname, scriptname))
    except ValueError:
        parser.error("Must specify time in minutes and "
            "at least one host and script")

    log = logger(options)

    try:
        log.debug("Connecting to database")
        con = connect()
        error_found = False
        msg, subj = [], []
        for hostname, scriptname in hosts_scripts:
            failure_msg = check_script(con, log, hostname,
                scriptname, completed_from, completed_to)
            if failure_msg is not None:
                msg.append(failure_msg)
                subj.append("%s:%s" % (hostname, scriptname))
                error_found = 2
        if error_found:
            # Construct our email.
            msg = MIMEText('\n'.join(msg))
            msg['Subject'] = "Scripts failed to run: %s" % ", ".join(subj)
            msg['From'] = '*****@*****.**'
            msg['Reply-To'] = '*****@*****.**'
            msg['To'] = '*****@*****.**'

            # Send out the email.
            smtp = smtplib.SMTP()
            smtp.connect()
            smtp.sendmail(
                '*****@*****.**',
                ['*****@*****.**'], msg.as_string())
            smtp.close()
            return 2
    except:
        log.exception("Unhandled exception")
        return 1
Ejemplo n.º 22
0
def main():
    # XXX: Tom Haddon 2007-07-12
    # There's a lot of untested stuff here: parsing options and sending
    # emails - this should be moved into a testable location.
    # Also duplicated code in scripts/script-monitor-nagios.py
    parser = OptionParser(
        '%prog [options] (minutes) (host:scriptname) [host:scriptname]')
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) < 2:
        parser.error("Must specify at time in minutes and "
                     "at least one host and script")

    # First argument is the number of minutes into the past
    # we want to look for the scripts on the specified hosts
    try:
        minutes_ago, args = int(args[0]), args[1:]
        start_date = datetime.now() - timedelta(minutes=minutes_ago)

        completed_from = strftime("%Y-%m-%d %H:%M:%S", start_date.timetuple())
        completed_to = strftime("%Y-%m-%d %H:%M:%S",
                                datetime.now().timetuple())

        hosts_scripts = []
        for arg in args:
            try:
                hostname, scriptname = arg.split(':')
            except TypeError:
                parser.error("%r is not in the format 'host:scriptname'" %
                             (arg, ))
            hosts_scripts.append((hostname, scriptname))
    except ValueError:
        parser.error("Must specify time in minutes and "
                     "at least one host and script")

    log = logger(options)

    try:
        log.debug("Connecting to database")
        con = connect()
        error_found = False
        msg, subj = [], []
        for hostname, scriptname in hosts_scripts:
            failure_msg = check_script(con, log, hostname, scriptname,
                                       completed_from, completed_to)
            if failure_msg is not None:
                msg.append(failure_msg)
                subj.append("%s:%s" % (hostname, scriptname))
                error_found = 2
        if error_found:
            # Construct our email.
            msg = MIMEText('\n'.join(msg))
            msg['Subject'] = "Scripts failed to run: %s" % ", ".join(subj)
            msg['From'] = '*****@*****.**'
            msg['Reply-To'] = '*****@*****.**'
            msg['To'] = '*****@*****.**'

            # Send out the email.
            smtp = smtplib.SMTP()
            smtp.connect()
            smtp.sendmail('*****@*****.**',
                          ['*****@*****.**'], msg.as_string())
            smtp.close()
            return 2
    except:
        log.exception("Unhandled exception")
        return 1
def main():
    # XXX: Tom Haddon 2007-07-12
    # There's a lot of untested stuff here: parsing options -
    # this should be moved into a testable location.
    # Also duplicated code in scripts/script-monitor.py
    parser = OptionParser(
            '%prog [options] (minutes) (host:scriptname) [host:scriptname]'
            )
    db_options(parser)
    logger_options(parser)

    (options, args) = parser.parse_args()

    if len(args) < 2:
        print "Must specify time in minutes and " \
            "at least one host and script"
        return 3

    # First argument is the number of minutes into the past
    # we want to look for the scripts on the specified hosts
    try:
        minutes_ago, args = int(args[0]), args[1:]
        start_date = datetime.now() - timedelta(minutes=minutes_ago)

        completed_from = strftime("%Y-%m-%d %H:%M:%S", start_date.timetuple())
        completed_to = strftime(
            "%Y-%m-%d %H:%M:%S", datetime.now().timetuple())

        hosts_scripts = []
        for arg in args:
            try:
                hostname, scriptname = arg.split(':')
            except TypeError:
                print "%r is not in the format 'host:scriptname'" % arg
                return 3
            hosts_scripts.append((hostname, scriptname))
    except ValueError:
        print "Must specify time in minutes and " \
            "at least one host and script"
        return 3

    log = logger(options)

    try:
        log.debug("Connecting to database")
        con = connect()
        error_found = False
        msg = []
        for hostname, scriptname in hosts_scripts:
            failure_msg = check_script(con, log, hostname,
                scriptname, completed_from, completed_to)
            if failure_msg is not None:
                msg.append("%s:%s" % (hostname, scriptname))
                error_found = True
        if error_found:
            # Construct our return message
            print "Scripts failed to run: %s" % ', '.join(msg)
            return 2
        else:
            # Construct our return message
            print "All scripts ran as expected"
            return 0
    except Exception as e:
        # Squeeze the exception type and stringification of the exception
        # value on to one line.
        print "Unhandled exception: %s %r" % (e.__class__.__name__, str(e))
        return 3
 def __init__(self, test_args=None):
     """Set up basic facilities, similar to `LaunchpadScript`."""
     self.parser = OptionParser()
     scripts.logger_options(self.parser, default=logging.INFO)
     self.options, self.args = self.parser.parse_args(args=test_args)
     self.logger = scripts.logger(self.options, self.name)
def shutdown_with_errors(failure):
    tplog.err(failure)
    failure.printTraceback()
    reactor.stop()


def run_mirror(log, manager):
    # It's conceivable that mirror() might raise an exception before it
    # returns a Deferred -- maybeDeferred means we don't have to worry.
    deferred = defer.maybeDeferred(mirror, log, manager)
    deferred.addCallback(clean_shutdown)
    deferred.addErrback(shutdown_with_errors)


if __name__ == '__main__':
    parser = OptionParser()
    logger_options(parser)
    parser.add_option('--branch-type', action='append', default=[])
    (options, arguments) = parser.parse_args()
    if arguments:
        parser.error("Unhandled arguments %s" % repr(arguments))
    log = set_up_logging_for_script(
        options, 'supermirror_puller', options.log_file)
    manager = scheduler.JobScheduler(
        LoggingProxy(config.codehosting.codehosting_endpoint, log), log,
        options.branch_type)

    reactor.callWhenRunning(run_mirror, log, manager)
    reactor.run()
Ejemplo n.º 26
0
def main():
    parser = OptionParser('Usage: %prog [options] [SERVICE ...]')
    parser.add_option("-w", "--wait", metavar="SECS",
        default=20, type="int",
        help="Wait up to SECS seconds for processes "
            "to die before retrying with SIGKILL")
    logger_options(parser, logging.INFO)
    (options, args) = parser.parse_args()
    log = logger(options)
    if len(args) < 1:
        parser.error('No service name provided')

    pids = [] # List of pids we tried to kill.
    services = args[:]

    # Mailman is special, but only stop it if it was launched.
    if 'mailman' in services:
        if config.mailman.launch:
            stop_mailman()
        services.remove('mailman')

    for service in services:
        log.debug("PID file is %s", pidfile_path(service))
        try:
            pid = get_pid(service)
        except ValueError as error:
            log.error(error)
            continue
        if pid is not None:
            log.info("Killing %s (%d)", service, pid)
            try:
                os.kill(pid, SIGTERM)
                pids.append((service, pid))
            except OSError as x:
                log.error(
                    "Unable to SIGTERM %s (%d) - %s",
                    service, pid, x.strerror)
        else:
            log.debug("No PID file for %s", service)

    wait_for_pids(pids, options.wait, log)

    # Anything that didn't die, kill harder with SIGKILL.
    for service, pid in pids:
        if not process_exists(pid):
            continue
        log.warn(
            "SIGTERM failed to kill %s (%d). Trying SIGKILL", service, pid)
        try:
            os.kill(pid, SIGKILL)
        except OSError as x:
            log.error(
                "Unable to SIGKILL %s (%d) - %s", service, pid, x.strerror)

    wait_for_pids(pids, options.wait, log)

    # Report anything still left running after a SIGKILL.
    for service, pid in pids:
        if process_exists(pid):
            log.error("SIGKILL didn't terminate %s (%d)", service, pid)

    # Remove any pidfiles that didn't get cleaned up if there is no
    # corresponding process (from an unkillable process, or maybe some
    # other job has relaunched it while we were not looking).
    for service in services:
        pid = get_pid(service)
        if pid is not None and not process_exists(pid):
            try:
                remove_pidfile(service)
            except OSError:
                pass
 def __init__(self, test_args=None):
     """Set up basic facilities, similar to `LaunchpadScript`."""
     self.parser = OptionParser()
     scripts.logger_options(self.parser, default=logging.INFO)
     self.options, self.args = self.parser.parse_args(args=test_args)
     self.logger = scripts.logger(self.options, self.name)