Пример #1
0
    def setUp(self):
        self.control = "control"
        self.args = ""
        self.resultdir = "results"
        self.uncollected = "results/uncollected_logs"
        self.label = "default"
        self.user = "******"
        self.machines = ('abcd1', 'abcd2', 'abcd3')

        # make god
        self.god = mock.mock_god()

        # stub out some common functions
        self.god.stub_function(os.path, 'exists')
        self.god.stub_function(os, 'mkdir')
        self.god.stub_function(os, 'access')
        self.god.stub_function(os, 'makedirs')
        self.god.stub_function(os.path, 'isdir')
        self.god.stub_function(os, 'chmod')
        self.god.stub_function(os, 'chdir')
        self.god.stub_function(os, 'remove')
        self.god.stub_function(pickle, 'dump')
        self.god.stub_function(server_job, 'get_site_job_data')
        self.god.stub_function(server_job, 'open')
        self.god.stub_function(utils, 'write_keyval')

        logging_manager.configure_logging(logging_config.TestingConfig())

        self.construct_server_job()
Пример #2
0
def main():
    """Runs the program."""
    options = parse_options()
    logging_manager.configure_logging(
        test_importer.TestImporterLoggingConfig(), verbose=options.verbose)
    backup_succeeded = False

    with ts_mon_config.SetupTsMonGlobalState(service_name='mysql_db_backup',
                                             indirect=True):
        with metrics.SecondsTimer('chromeos/autotest/afe_db/backup/durations',
                                  fields={'type': options.type}):
            try:
                logging.debug('Start db backup: %s', options.type)
                archiver = MySqlArchiver(options.type, options.keep,
                                         options.gs_bucket)
                dump_file = archiver.dump()
                logging.debug('Uploading backup: %s', options.type)
                archiver.upload_to_google_storage(dump_file)
                archiver.cleanup()
                logging.debug('Db backup completed: %s', options.type)
                backup_succeeded = True
            finally:
                metrics.Counter('chromeos/autotest/db/db_backup/completed'
                                ).increment(fields={
                                    'success': backup_succeeded,
                                    'type': options.type
                                })
Пример #3
0
def main():
    parser = OptionParser()
    parser.add_option("-r",
                      action="store_true",
                      dest="recover",
                      help=("run recovery mode (implicit after any crash)"))
    parser.add_option("--background",
                      dest="background",
                      action="store_true",
                      default=False,
                      help=("runs the scheduler monitor on "
                            "background"))
    (options, args) = parser.parse_args()

    recover = (options.recover == True)

    if len(args) != 0:
        parser.print_help()
        sys.exit(1)

    if os.getuid() == 0:
        logging.critical("Running as root, aborting!")
        sys.exit(1)

    if utils.program_is_alive(monitor_db.WATCHER_PID_FILE_PREFIX):
        logging.critical("autotest-monitor-watcher already running, aborting!")
        sys.exit(1)

    utils.write_pid(monitor_db.WATCHER_PID_FILE_PREFIX)

    if options.background:
        logging_manager.configure_logging(
            watcher_logging_config.WatcherLoggingConfig(use_console=False))

        # Double fork - see http://code.activestate.com/recipes/66012/
        try:
            pid = os.fork()
            if (pid > 0):
                sys.exit(0)  # exit from first parent
        except OSError, e:
            sys.stderr.write("fork #1 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # Decouple from parent environment
        os.chdir("/")
        os.umask(0)
        os.setsid()

        # Second fork
        try:
            pid = os.fork()
            if (pid > 0):
                sys.exit(0)  # exit from second parent
        except OSError, e:
            sys.stderr.write("fork #2 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)
def main():
    logging_manager.configure_logging(
        drone_logging_config.DroneLoggingConfig())
    calls = parse_input()
    args = _parse_args(sys.argv[1:])

    drone_utility = DroneUtility()
    return_value = drone_utility.execute_calls(calls)
    return_data(return_value)
Пример #5
0
def perform_bootstrap_into_autotest_root(arguments, autotest_path, argv):
    """
    Perfoms a bootstrap to run test_that from the |autotest_path|.

    This function is to be called from test_that's main() script, when
    test_that is executed from the source tree location. It runs
    autotest_quickmerge to update the sysroot unless arguments.no_quickmerge
    is set. It then executes and waits on the version of test_that.py
    in |autotest_path|.

    @param arguments: A parsed arguments object, as returned from
                      test_that.parse_arguments(...).
    @param autotest_path: Full absolute path to the autotest root directory.
    @param argv: The arguments list, as passed to main(...)

    @returns: The return code of the test_that script that was executed in
              |autotest_path|.
    """
    logging_manager.configure_logging(
            server_logging_config.ServerLoggingConfig(),
            use_console=True,
            verbose=arguments.debug)
    if arguments.no_quickmerge:
        logging.info('Skipping quickmerge step.')
    else:
        logging.info('Running autotest_quickmerge step.')
        command = [_QUICKMERGE_SCRIPTNAME, '--board='+arguments.board]
        s = subprocess.Popen(command,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        for message in iter(s.stdout.readline, b''):
            logging.info('quickmerge| %s', message.strip())
        return_code = s.wait()
        if return_code:
            raise test_runner_utils.TestThatRunError(
                    'autotest_quickmerge failed with error code %s.' %
                    return_code)

    logging.info('Re-running test_that script in %s copy of autotest.',
                 autotest_path)
    script_command = os.path.join(autotest_path, 'site_utils',
                                  'test_that.py')
    if not os.path.exists(script_command):
        raise test_runner_utils.TestThatRunError(
            'Unable to bootstrap to autotest root, %s not found.' %
            script_command)
    proc = None
    def resend_sig(signum, stack_frame):
        #pylint: disable-msg=C0111
        if proc:
            proc.send_signal(signum)
    signal.signal(signal.SIGINT, resend_sig)
    signal.signal(signal.SIGTERM, resend_sig)

    proc = subprocess.Popen([script_command] + argv)

    return proc.wait()
Пример #6
0
    def setUp(self):
        # make god
        self.god = mock.mock_god(ut=self)

        # need to set some environ variables
        self.autodir = "autodir"
        os.environ['AUTODIR'] = self.autodir

        # set up some variables
        self.control = "control"
        self.jobtag = "jobtag"

        # get rid of stdout and logging
        sys.stdout = StringIO.StringIO()
        logging_manager.configure_logging(logging_config.TestingConfig())
        logging.disable(logging.CRITICAL)

        def dummy_configure_logging(*args, **kwargs):
            pass

        self.god.stub_with(logging_manager, 'configure_logging',
                           dummy_configure_logging)
        real_get_logging_manager = logging_manager.get_logging_manager

        def get_logging_manager_no_fds(manage_stdout_and_stderr=False,
                                       redirect_fds=False):
            return real_get_logging_manager(manage_stdout_and_stderr, False)

        self.god.stub_with(logging_manager, 'get_logging_manager',
                           get_logging_manager_no_fds)

        # stub out some stuff
        self.god.stub_function(os.path, 'exists')
        self.god.stub_function(os.path, 'isdir')
        self.god.stub_function(os, 'makedirs')
        self.god.stub_function(os, 'mkdir')
        self.god.stub_function(os, 'remove')
        self.god.stub_function(shutil, 'rmtree')
        self.god.stub_function(shutil, 'copyfile')
        self.god.stub_function(job, 'open')
        self.god.stub_function(utils, 'system')
        self.god.stub_function(utils, 'drop_caches')
        self.god.stub_function(harness, 'select')
        self.god.stub_function(sysinfo, 'log_per_reboot_data')

        self.god.stub_class(config, 'config')
        self.god.stub_class(job.local_host, 'LocalHost')
        self.god.stub_class(boottool, 'boottool')
        self.god.stub_class(sysinfo, 'sysinfo')

        self.god.stub_class_method(job.base_client_job,
                                   '_cleanup_debugdir_files')
        self.god.stub_class_method(job.base_client_job, '_cleanup_results_dir')

        self.god.stub_with(job.base_job.job_directory, '_ensure_valid',
                           lambda *_: None)
Пример #7
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        # harness is chosen by following rules:
        # 1. explicitly specified via command line
        # 2. harness stored in state file (if continuing job '-c')
        # 3. default harness
        selected_harness = None
        if options.harness:
            selected_harness = options.harness
            self._state.set('client', 'harness', selected_harness)
        else:
            stored_harness = self._state.get('client', 'harness', None)
            if stored_harness:
                selected_harness = stored_harness

        self.harness = harness.select(selected_harness, self)

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)
        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook,
            tap_writer=self._tap)
def setup_logging(log_dir, log_name, timestamped_logfile_prefix='scheduler'):
    """Setup logging to a given log directory and log file.

    @param log_dir: The directory to log into.
    @param log_name: Name of the log file.
    @param timestamped_logfile_prefix: The prefix to apply to the logfile.
    """
    logging_manager.configure_logging(
        SchedulerLoggingConfig(),
        log_dir=log_dir,
        logfile_name=log_name,
        timestamped_logfile_prefix=timestamped_logfile_prefix)
def main():
    """Runs the program."""
    options = parse_options()
    logging_manager.configure_logging(
        test_importer.TestImporterLoggingConfig(), verbose=options.verbose)
    logging.debug('Start db backup: %s', options.type)
    archiver = MySqlArchiver(options.type, options.keep, options.gs_bucket)
    dump_file = archiver.dump()
    logging.debug('Uploading backup: %s', options.type)
    archiver.upload_to_google_storage(dump_file)
    archiver.cleanup()
    logging.debug('Db backup completed: %s', options.type)
Пример #10
0
def main():
    parser = OptionParser()
    parser.add_option("-r", action="store_true", dest="recover",
                      help=("run recovery mode (implicit after any crash)"))
    parser.add_option("--background", dest="background", action="store_true",
                      default=False, help=("runs the scheduler monitor on "
                                           "background"))
    (options, args) = parser.parse_args()

    recover = (options.recover == True)

    if len(args) != 0:
        parser.print_help()
        sys.exit(1)

    if os.getuid() == 0:
        logging.critical("Running as root, aborting!")
        sys.exit(1)

    if utils.program_is_alive(monitor_db.WATCHER_PID_FILE_PREFIX):
        logging.critical("autotest-monitor-watcher already running, aborting!")
        sys.exit(1)

    utils.write_pid(monitor_db.WATCHER_PID_FILE_PREFIX)

    if options.background:
        logging_manager.configure_logging(
             watcher_logging_config.WatcherLoggingConfig(use_console=False))

        # Double fork - see http://code.activestate.com/recipes/66012/
        try:
            pid = os.fork()
            if (pid > 0):
                sys.exit(0) # exit from first parent
        except OSError, e:
            sys.stderr.write("fork #1 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # Decouple from parent environment
        os.chdir("/")
        os.umask(0)
        os.setsid()

        # Second fork
        try:
            pid = os.fork()
            if (pid > 0):
                sys.exit(0) # exit from second parent
        except OSError, e:
            sys.stderr.write("fork #2 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)
Пример #11
0
    def setUp(self):
        # make god
        self.god = mock.mock_god(ut=self)

        # need to set some environ variables
        self.autodir = "autodir"
        os.environ['AUTODIR'] = self.autodir

        # set up some variables
        self.control = "control"
        self.jobtag = "jobtag"

        # get rid of stdout and logging
        sys.stdout = StringIO.StringIO()
        logging_manager.configure_logging(logging_config.TestingConfig())
        logging.disable(logging.CRITICAL)
        def dummy_configure_logging(*args, **kwargs):
            pass
        self.god.stub_with(logging_manager, 'configure_logging',
                           dummy_configure_logging)
        real_get_logging_manager = logging_manager.get_logging_manager
        def get_logging_manager_no_fds(manage_stdout_and_stderr=False,
                                       redirect_fds=False):
            return real_get_logging_manager(manage_stdout_and_stderr, False)
        self.god.stub_with(logging_manager, 'get_logging_manager',
                           get_logging_manager_no_fds)

        # stub out some stuff
        self.god.stub_function(os.path, 'exists')
        self.god.stub_function(os.path, 'isdir')
        self.god.stub_function(os, 'makedirs')
        self.god.stub_function(os, 'mkdir')
        self.god.stub_function(os, 'remove')
        self.god.stub_function(shutil, 'rmtree')
        self.god.stub_function(shutil, 'copyfile')
        self.god.stub_function(job, 'open')
        self.god.stub_function(utils, 'system')
        self.god.stub_function(utils, 'drop_caches')
        self.god.stub_function(harness, 'select')
        self.god.stub_function(sysinfo, 'log_per_reboot_data')

        self.god.stub_class(config, 'config')
        self.god.stub_class(job.local_host, 'LocalHost')
        self.god.stub_class(boottool, 'boottool')
        self.god.stub_class(sysinfo, 'sysinfo')

        self.god.stub_class_method(job.base_client_job,
                                   '_cleanup_debugdir_files')
        self.god.stub_class_method(job.base_client_job, '_cleanup_results_dir')

        self.god.stub_with(job.base_job.job_directory, '_ensure_valid',
                           lambda *_: None)
Пример #12
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        _harness = self.handle_persistent_option(options, 'harness')
        _harness_args = self.handle_persistent_option(options, 'harness_args')

        self.harness = harness.select(_harness, self, _harness_args)

        if self.control:
            parsed_control = control_data.parse_control(self.control,
                                                        raise_warnings=False)
            self.fast = parsed_control.fast

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag,
                                            entry.fields)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)

        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook)
Пример #13
0
def main():
    logging_manager.configure_logging(CompileClientsLoggingConfig(),
                                      verbose=True)
    parser = optparse.OptionParser()
    parser.add_option('-l', '--list-projects',
                      action='store_true', dest='list_projects',
                      default=False,
                      help='List all projects and clients that can be compiled')
    parser.add_option('-a', '--compile-all',
                      action='store_true', dest='compile_all',
                     default=False,
                     help='Compile all available projects and clients')
    parser.add_option('-c', '--compile',
                      dest='compile_list', action='store',
                      help='List of clients to compiled (e.g. -c "x.X c.C")')
    parser.add_option('-e', '--extra-args',
                      dest='extra_args', action='store',
                      default='',
                      help='Extra arguments to pass to java')
    parser.add_option('-d', '--no-install', dest='install_client',
                      action='store_false', default=True,
                      help='Do not install the clients just compile them')
    options, args = parser.parse_args()

    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(0)
    elif options.list_projects:
        print_projects()
        sys.exit(0)
    elif options.compile_all and options.compile_list:
        logging.error('Options -c and -a are mutually exclusive')
        parser.print_help()
        sys.exit(1)

    failed_clients = []
    if options.compile_all:
        failed_clients = compile_all_projects(options.extra_args)
    elif options.compile_list:
        for client in options.compile_list.split():
            if not compile_and_install_client(client, options.extra_args,
                                              options.install_client):
                failed_clients.append(client)

    if os.path.exists(_TMP_COMPILE_DIR):
        shutil.rmtree(_TMP_COMPILE_DIR)

    if failed_clients:
        logging.error('The following clients failed: %s',
                      '\n'.join(failed_clients))
        sys.exit(1)
Пример #14
0
    def setUp(self):
        # make god
        self.god = mock.mock_god(ut=self)

        # need to set some environ variables
        self.autodir = "autodir"
        os.environ["AUTODIR"] = self.autodir

        # set up some variables
        self.control = "control"
        self.jobtag = "jobtag"

        # get rid of stdout and logging
        sys.stdout = StringIO.StringIO()
        logging_manager.configure_logging(logging_config.TestingConfig())
        logging.disable(logging.CRITICAL)

        def dummy_configure_logging(*args, **kwargs):
            pass

        self.god.stub_with(logging_manager, "configure_logging", dummy_configure_logging)
        real_get_logging_manager = logging_manager.get_logging_manager

        def get_logging_manager_no_fds(manage_stdout_and_stderr=False, redirect_fds=False):
            return real_get_logging_manager(manage_stdout_and_stderr, False)

        self.god.stub_with(logging_manager, "get_logging_manager", get_logging_manager_no_fds)

        # stub out some stuff
        self.god.stub_function(os.path, "exists")
        self.god.stub_function(os.path, "isdir")
        self.god.stub_function(os, "makedirs")
        self.god.stub_function(os, "mkdir")
        self.god.stub_function(os, "remove")
        self.god.stub_function(shutil, "rmtree")
        self.god.stub_function(shutil, "copyfile")
        self.god.stub_function(job, "open")
        self.god.stub_function(utils, "system")
        self.god.stub_function(utils, "drop_caches")
        self.god.stub_function(harness, "select")
        self.god.stub_function(sysinfo, "log_per_reboot_data")

        self.god.stub_class(config, "config")
        self.god.stub_class(job.local_host, "LocalHost")
        self.god.stub_class(boottool, "boottool")
        self.god.stub_class(sysinfo, "sysinfo")

        self.god.stub_class_method(job.base_client_job, "_cleanup_debugdir_files")
        self.god.stub_class_method(job.base_client_job, "_cleanup_results_dir")

        self.god.stub_with(job.base_job.job_directory, "_ensure_valid", lambda *_: None)
def main():
    logging_manager.configure_logging(CompileClientsLoggingConfig(),
                                      verbose=True)
    parser = optparse.OptionParser()
    parser.add_option('-l', '--list-projects',
                      action='store_true', dest='list_projects',
                      default=False,
                      help='List all projects and clients that can be compiled')
    parser.add_option('-a', '--compile-all',
                      action='store_true', dest='compile_all',
                     default=False,
                     help='Compile all available projects and clients')
    parser.add_option('-c', '--compile',
                      dest='compile_list', action='store',
                      help='List of clients to compiled (e.g. -c "x.X c.C")')
    parser.add_option('-e', '--extra-args',
                      dest='extra_args', action='store',
                      default='',
                      help='Extra arguments to pass to java')
    parser.add_option('-d', '--no-install', dest='install_client',
                      action='store_false', default=True,
                      help='Do not install the clients just compile them')
    options, args = parser.parse_args()

    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(0)
    elif options.list_projects:
        print_projects()
        sys.exit(0)
    elif options.compile_all and options.compile_list:
        logging.error('Options -c and -a are mutually exclusive')
        parser.print_help()
        sys.exit(1)

    failed_clients = []
    if options.compile_all:
        failed_clients = compile_all_projects(options.extra_args)
    elif options.compile_list:
        for client in options.compile_list.split():
            if not compile_and_install_client(client, options.extra_args,
                                              options.install_client):
                failed_clients.append(client)

    if os.path.exists(_TMP_COMPILE_DIR):
        shutil.rmtree(_TMP_COMPILE_DIR)

    if failed_clients:
        logging.error('The following clients failed: %s',
                      '\n'.join(failed_clients))
        sys.exit(1)
def main():
    """
    Find all ExternalPackage classes defined in this file and ask them to
    fetch, build and install themselves.
    """
    options = parse_arguments(sys.argv[1:])
    logging_manager.configure_logging(BuildExternalsLoggingConfig(),
                                      verbose=True)
    os.umask(022)

    top_of_tree = external_packages.find_top_of_autotest_tree()
    package_dir = os.path.join(top_of_tree, PACKAGE_DIR)
    install_dir = os.path.join(top_of_tree, INSTALL_DIR)

    # Make sure the install_dir is in our python module search path
    # as well as the PYTHONPATH being used by all our setup.py
    # install subprocesses.
    if install_dir not in sys.path:
        sys.path.insert(0, install_dir)
    env_python_path_varname = 'PYTHONPATH'
    env_python_path = os.environ.get(env_python_path_varname, '')
    if install_dir + ':' not in env_python_path:
        os.environ[env_python_path_varname] = ':'.join(
            [install_dir, env_python_path])

    fetched_packages, fetch_errors = fetch_necessary_packages(
        package_dir, install_dir, set(options.names_to_check))
    install_errors = build_and_install_packages(fetched_packages, install_dir,
                                                options.use_chromite_master)

    # Byte compile the code after it has been installed in its final
    # location as .pyc files contain the path passed to compile_dir().
    # When printing exception tracebacks, python uses that path first to look
    # for the source code before checking the directory of the .pyc file.
    # Don't leave references to our temporary build dir in the files.
    logging.info('compiling .py files in %s to .pyc', install_dir)
    compileall.compile_dir(install_dir, quiet=True)

    # Some things install with whacky permissions, fix that.
    external_packages.system("chmod -R a+rX '%s'" % install_dir)

    errors = fetch_errors + install_errors
    for error_msg in errors:
        logging.error(error_msg)

    if not errors:
        logging.info("Syntax errors from pylint above are expected, not "
                     "problematic. SUCCESS.")
    else:
        logging.info("Problematic errors encountered. FAILURE.")
    return len(errors)
Пример #17
0
def main():
    logging_manager.configure_logging(
        drone_logging_config.DroneLoggingConfig())
    with timer.get_client('decode'):
        calls = parse_input()
    args = _parse_args(sys.argv[1:])
    if args.call_time is not None:
        autotest_stats.Gauge(_STATS_KEY).send('invocation_overhead',
                                              time.time() - args.call_time)

    drone_utility = DroneUtility()
    return_value = drone_utility.execute_calls(calls)
    with timer.get_client('encode'):
        return_data(return_value)
Пример #18
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self._group_level,
        self.harness and of course self._logger.
        """
        if not options.cont:
            self._cleanup_debugdir_files()
            self._cleanup_results_dir()

        logging_manager.configure_logging(
            client_logging_config.ClientLoggingConfig(),
            results_dir=self.resultdir,
            verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self._current_step_ancestry = []
        self._next_step_index = 0
        self._load_state()

        _harness = self.handle_persistent_option(options, 'harness')
        _harness_args = self.handle_persistent_option(options, 'harness_args')

        self.harness = harness.select(_harness, self, _harness_args)

        # set up the status logger
        def client_job_record_hook(entry):
            msg_tag = ''
            if '.' in self._logger.global_filename:
                msg_tag = self._logger.global_filename.split('.', 1)[1]
            # send the entry to the job harness
            message = '\n'.join([entry.message] + entry.extra_message_lines)
            rendered_entry = self._logger.render_entry(entry)
            self.harness.test_status_detail(entry.status_code, entry.subdir,
                                            entry.operation, message, msg_tag,
                                            entry.fields)
            self.harness.test_status(rendered_entry, msg_tag)
            # send the entry to stdout, if it's enabled
            logging.info(rendered_entry)
        self._logger = base_job.status_logger(
            self, status_indenter(self), record_hook=client_job_record_hook,
            tap_writer=self._tap)
Пример #19
0
    def _pre_record_init(self, control, options):
        """
        Initialization function that should peform ONLY the required
        setup so that the self.record() method works.

        As of now self.record() needs self.resultdir, self.group_level,
        self.log_filename, self.harness.
        """
        self.autodir = os.environ['AUTODIR']
        self.resultdir = os.path.join(self.autodir, 'results', options.tag)
        self.tmpdir = os.path.join(self.autodir, 'tmp')

        if not os.path.exists(self.resultdir):
            os.makedirs(self.resultdir)

        if not options.cont:
            self._cleanup_results_dir()
            # Don't cleanup the tmp dir (which contains the lockfile)
            # in the constructor, this would be a problem for multiple
            # jobs starting at the same time on the same client. Instead
            # do the delete at the server side. We simply create the tmp
            # directory here if it does not already exist.
            if not os.path.exists(self.tmpdir):
                os.mkdir(self.tmpdir)

        logging_manager.configure_logging(
                client_logging_config.ClientLoggingConfig(),
                results_dir=self.resultdir,
                verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        self.log_filename = self.DEFAULT_LOG_FILENAME

        # init_group_level needs the state
        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self.state_file = self.control + '.state'
        self.current_step_ancestry = []
        self.next_step_index = 0
        self.testtag = ''
        self._test_tag_prefix = ''
        self._load_state()

        self._init_group_level()

        self.harness = harness.select(options.harness, self)
Пример #20
0
def main():
    """
    Find all ExternalPackage classes defined in this file and ask them to
    fetch, build and install themselves.
    """
    logging_manager.configure_logging(BuildExternalsLoggingConfig(),
                                      verbose=True)
    os.umask(022)

    top_of_tree = external_packages.find_top_of_autotest_tree()
    package_dir = os.path.join(top_of_tree, PACKAGE_DIR)
    install_dir = os.path.join(top_of_tree, INSTALL_DIR)

    # Make sure the install_dir is in our python module search path
    # as well as the PYTHONPATH being used by all our setup.py
    # install subprocesses.
    if install_dir not in sys.path:
        sys.path.insert(0, install_dir)
    env_python_path_varname = 'PYTHONPATH'
    env_python_path = os.environ.get(env_python_path_varname, '')
    if install_dir+':' not in env_python_path:
        os.environ[env_python_path_varname] = ':'.join([
            install_dir, env_python_path])

    fetched_packages, fetch_errors = fetch_necessary_packages(package_dir,
                                                              install_dir)
    install_errors = build_and_install_packages(fetched_packages, install_dir)

    # Byte compile the code after it has been installed in its final
    # location as .pyc files contain the path passed to compile_dir().
    # When printing exception tracebacks, python uses that path first to look
    # for the source code before checking the directory of the .pyc file.
    # Don't leave references to our temporary build dir in the files.
    logging.info('compiling .py files in %s to .pyc', install_dir)
    compileall.compile_dir(install_dir, quiet=True)

    # Some things install with whacky permissions, fix that.
    external_packages.system("chmod -R a+rX '%s'" % install_dir)

    errors = fetch_errors + install_errors
    for error_msg in errors:
        logging.error(error_msg)

    return len(errors)
Пример #21
0
def perform_run_from_autotest_root(autotest_path,
                                   argv,
                                   tests,
                                   remote,
                                   build=NO_BUILD,
                                   board=NO_BOARD,
                                   args=None,
                                   pretend=False,
                                   no_experimental=False,
                                   ignore_deps=True,
                                   results_directory=None,
                                   ssh_verbosity=0,
                                   ssh_options=None,
                                   iterations=1,
                                   fast_mode=False,
                                   debug=False,
                                   whitelist_chrome_crashes=False,
                                   host_attributes={}):
    """
    Perform a test_that run, from the |autotest_path|.

    This function is to be called from test_that/test_droid's main() script,
    when tests are executed from the |autotest_path|. It handles all stages
    of a test run that come after the bootstrap into |autotest_path|.

    @param autotest_path: Full absolute path to the autotest root directory.
    @param argv: The arguments list, as passed to main(...)
    @param tests: List of strings naming tests and suites to run. Suite strings
                  should be formed like "suite:smoke".
    @param remote: Remote hostname.
    @param build: String specifying build for local run.
    @param board: String specifying board for local run.
    @param args: String that should be passed as args parameter to autoserv,
                 and then ultimitely to test itself.
    @param pretend: If True, will print out autoserv commands rather than
                    running them.
    @param no_experimental: Skip experimental tests when scheduling a suite.
    @param ignore_deps: If True, test dependencies will be ignored.
    @param results_directory: Directory to store results in. Defaults to None,
                              in which case results will be stored in a new
                              subdirectory of /tmp
    @param ssh_verbosity: SSH verbosity level, passed through to
                          autoserv_utils.
    @param ssh_options: Additional ssh options to be passed to autoserv_utils
    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
    @param iterations: int number of times to schedule tests.
    @param fast_mode: bool to use fast mode (disables slow autotest features).
    @param debug: Logging and autoserv verbosity.
    @param whitelist_chrome_crashes: If True, whitelist chrome crashes.
    @param host_attributes: Dict of host attributes to pass into autoserv.

    @returns: A return code that test_that should exit with.
    """
    if results_directory is None or not os.path.exists(results_directory):
        raise ValueError('Expected valid results directory, got %s' %
                         results_directory)

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=results_directory,
        use_console=True,
        verbose=debug,
        debug_log_name='test_that')
    logging.info('Began logging to %s', results_directory)

    logging.debug('test_that command line was: %s', argv)

    signal.signal(signal.SIGINT, sigint_handler)
    signal.signal(signal.SIGTERM, sigint_handler)

    afe = setup_local_afe()
    codes = perform_local_run(afe,
                              autotest_path,
                              tests,
                              remote,
                              fast_mode,
                              build,
                              board,
                              args=args,
                              pretend=pretend,
                              no_experimental=no_experimental,
                              ignore_deps=ignore_deps,
                              results_directory=results_directory,
                              ssh_verbosity=ssh_verbosity,
                              ssh_options=ssh_options,
                              autoserv_verbose=debug,
                              iterations=iterations,
                              host_attributes=host_attributes)
    if pretend:
        logging.info('Finished pretend run. Exiting.')
        return 0

    final_result = generate_report(
        results_directory,
        whitelist_chrome_crashes=whitelist_chrome_crashes,
        html_report=True)
    try:
        os.unlink(_LATEST_RESULTS_DIRECTORY)
    except OSError:
        pass
    link_target = os.path.relpath(results_directory,
                                  os.path.dirname(_LATEST_RESULTS_DIRECTORY))
    if any(codes):
        logging.error('Autoserv encountered unexpected errors '
                      'when executing jobs.')
        final_result = final_result or 1
    os.symlink(link_target, _LATEST_RESULTS_DIRECTORY)
    logging.info('Finished running tests. Results can be found in %s or %s',
                 results_directory, _LATEST_RESULTS_DIRECTORY)
    return final_result
Пример #22
0
def main():
    start_time = datetime.datetime.now()
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    # If the job requires to run with server-side package, try to stage server-
    # side package first. If that fails with error that autotest server package
    # does not exist, fall back to run the job without using server-side
    # packaging. If option warn_no_ssp is specified, that means autoserv is
    # running in a drone does not support SSP, thus no need to stage server-side
    # package.
    ssp_url = None
    ssp_url_warning = False
    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
        ssp_url, ssp_error_msg = _stage_ssp(parser)
        # The build does not have autotest server package. Fall back to not
        # to use server-side package. Logging is postponed until logging being
        # set up.
        ssp_url_warning = not ssp_url

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    # Server-side packaging will only be used if it's required and the package
    # is available. If warn_no_ssp is specified, it means that autoserv is
    # running in a drone does not have SSP supported and a warning will be logs.
    # Therefore, it should not run with SSP.
    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
               and ssp_url)
    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    if ssp_url_warning:
        logging.warn(
            'Autoserv is required to run with server-side packaging. '
            'However, no server-side package can be found based on '
            '`--image`, host attribute job_repo_url or host OS version '
            'label. It could be that the build to test is older than the '
            'minimum version that supports server-side packaging. The test '
            'will be executed without using erver-side packaging. '
            'Following is the detailed error:\n%s', ssp_error_msg)

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    try:
        # Take the first argument as control file name, get the test name from
        # the control file.
        if (len(parser.args) > 0 and parser.args[0] != ''
                and parser.options.machines):
            try:
                test_name = control_data.parse_control(
                    parser.args[0], raise_warnings=True).name
            except control_data.ControlVariableException:
                logging.debug(
                    'Failed to retrieve test name from control file.')
                test_name = None
    except control_data.ControlVariableException as e:
        logging.error(str(e))
    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker(
                        test_name if test_name else 'unknown-test',
                        parser.options.results, machine).mock_results()
                return
            else:
                run_autoserv(pid_file_manager, results, parser, ssp_url,
                             use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception(e)
        except Exception as e:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception(e)
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
        # Record the autoserv duration time. Must be called
        # just before the system exits to ensure accuracy.
        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
        record_autoserv(parser.options, duration_secs)
    sys.exit(exit_code)
Пример #23
0
def main(argv):
    """Main function"""

    global DRY_RUN
    parser = optparse.OptionParser()
    parser.add_option('-c', '--db-clean-tests',
                      dest='clean_tests', action='store_true',
                      default=False,
                help='Clean client and server tests with invalid control files')
    parser.add_option('-C', '--db-clear-all-tests',
                      dest='clear_all_tests', action='store_true',
                      default=False,
                help='Clear ALL client and server tests')
    parser.add_option('-d', '--dry-run',
                      dest='dry_run', action='store_true', default=False,
                      help='Dry run for operation')
    parser.add_option('-A', '--add-all',
                      dest='add_all', action='store_true',
                      default=False,
                      help='Add site_tests, tests, and test_suites')
    parser.add_option('-S', '--add-samples',
                      dest='add_samples', action='store_true',
                      default=False,
                      help='Add samples.')
    parser.add_option('-E', '--add-experimental',
                      dest='add_experimental', action='store_true',
                      default=True,
                      help='Add experimental tests to frontend')
    parser.add_option('-N', '--add-noncompliant',
                      dest='add_noncompliant', action='store_true',
                      default=False,
                      help='Add non-compliant tests (i.e. tests that do not '
                           'define all required control variables)')
    parser.add_option('-p', '--profile-dir', dest='profile_dir',
                      help='Directory to recursively check for profiles')
    parser.add_option('-t', '--tests-dir', dest='tests_dir',
                      help='Directory to recursively check for control.*')
    parser.add_option('-r', '--control-pattern', dest='control_pattern',
                      default='^control.*',
               help='The pattern to look for in directories for control files')
    parser.add_option('-v', '--verbose',
                      dest='verbose', action='store_true', default=False,
                      help='Run in verbose mode')
    parser.add_option('-w', '--whitelist-file', dest='whitelist_file',
                      help='Filename for list of test names that must match')
    parser.add_option('-z', '--autotest-dir', dest='autotest_dir',
                      default=os.path.join(os.path.dirname(__file__), '..'),
                      help='Autotest directory root')
    options, args = parser.parse_args()

    logging_manager.configure_logging(TestImporterLoggingConfig(),
                                      verbose=options.verbose)

    DRY_RUN = options.dry_run
    if DRY_RUN:
        logging.getLogger().setLevel(logging.WARN)

    # Make sure autotest_dir is the absolute path
    options.autotest_dir = os.path.abspath(options.autotest_dir)

    if len(args) > 0:
        logging.error("Invalid option(s) provided: %s", args)
        parser.print_help()
        return 1

    if options.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    if len(argv) == 1 or (len(argv) == 2 and options.verbose):
        update_all(options.autotest_dir, options.add_noncompliant,
                   options.add_experimental)
        db_clean_broken(options.autotest_dir)
        return 0

    if options.clear_all_tests:
        if (options.clean_tests or options.add_all or options.add_samples or
            options.add_noncompliant):
            logging.error(
                "Can only pass --autotest-dir, --dry-run and --verbose with "
                "--db-clear-all-tests")
            return 1
        db_clean_all(options.autotest_dir)

    whitelist_set = None
    if options.whitelist_file:
        if options.add_all:
            logging.error("Cannot pass both --add-all and --whitelist-file")
            return 1
        whitelist_path = os.path.abspath(options.whitelist_file)
        if not os.path.isfile(whitelist_path):
            logging.error("--whitelist-file (%s) not found", whitelist_path)
            return 1
        logging.info("Using whitelist file %s", whitelist_path)
        whitelist_set =  _create_whitelist_set(whitelist_path)
        update_from_whitelist(whitelist_set,
                              add_experimental=options.add_experimental,
                              add_noncompliant=options.add_noncompliant,
                              autotest_dir=options.autotest_dir)
    if options.add_all:
        update_all(options.autotest_dir, options.add_noncompliant,
                   options.add_experimental)
    if options.add_samples:
        update_samples(options.autotest_dir, options.add_noncompliant,
                       options.add_experimental)
    if options.tests_dir:
        options.tests_dir = os.path.abspath(options.tests_dir)
        tests = get_tests_from_fs(options.tests_dir, options.control_pattern,
                                  add_noncompliant=options.add_noncompliant)
        update_tests_in_db(tests, add_experimental=options.add_experimental,
                           add_noncompliant=options.add_noncompliant,
                           autotest_dir=options.autotest_dir)
    if options.profile_dir:
        profilers = get_tests_from_fs(options.profile_dir, '.*py$')
        update_profilers_in_db(profilers,
                               add_noncompliant=options.add_noncompliant,
                               description='NA')
    if options.clean_tests:
        db_clean_broken(options.autotest_dir)
Пример #24
0
@copyright: Red Hat 2008-2009
"""

import os, sys, optparse, logging
import common
import kvm_utils
from autotest_lib.client.common_lib import logging_config, logging_manager
from autotest_lib.client.bin import utils


if __name__ == "__main__":
    parser = optparse.OptionParser("usage: %prog [options] [filenames]")
    options, args = parser.parse_args()

    logging_manager.configure_logging(kvm_utils.KvmLoggingConfig())

    if args:
        filenames = args
    else:
        parser.print_help()
        sys.exit(1)

    for filename in filenames:
        filename = os.path.abspath(filename)

        file_exists = os.path.isfile(filename)
        can_read_file = os.access(filename, os.R_OK)
        if not file_exists:
            logging.critical("File %s does not exist!", filename)
            continue
Пример #25
0
@copyright: Red Hat 2008-2009
"""

import os, sys, optparse, logging
import common
import kvm_utils
from autotest_lib.client.common_lib import logging_manager
from autotest_lib.client.bin import utils


if __name__ == "__main__":
    parser = optparse.OptionParser("usage: %prog [options] [filenames]")
    options, args = parser.parse_args()

    logging_manager.configure_logging(kvm_utils.KvmLoggingConfig())

    if args:
        filenames = args
    else:
        parser.print_help()
        sys.exit(1)

    for filename in filenames:
        filename = os.path.abspath(filename)

        file_exists = os.path.isfile(filename)
        can_read_file = os.access(filename, os.R_OK)
        if not file_exists:
            logging.critical("File %s does not exist!", filename)
            continue
Пример #26
0
            sys.stderr.write("fork #1 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # Decouple from parent environment
        os.chdir("/")
        os.umask(0)
        os.setsid()

        # Second fork
        try:
            pid = os.fork()
            if (pid > 0):
                sys.exit(0) # exit from second parent
        except OSError, e:
            sys.stderr.write("fork #2 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)
    else:
        logging_manager.configure_logging(
                                watcher_logging_config.WatcherLoggingConfig())

    while True:
        proc = MonitorProc(do_recovery=recover)
        proc.start()
        time.sleep(PAUSE_LENGTH)
        while proc.is_running():
            logging.info("Tick")
            time.sleep(PAUSE_LENGTH)
        recover = False
Пример #27
0
def main():
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=results,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)
    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if parser.options.use_existing_results and not resultdir_exists:
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    if parser.options.write_pidfile:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest_remote.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    exit_code = 0
    try:
        try:
            run_autoserv(pid_file_manager, results, parser)
        except SystemExit, e:
            exit_code = e.code
        except:
            traceback.print_exc()
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            exit_code = 1
Пример #28
0
        except:
            return None


if __name__ == '__main__':
    parser = optparse.OptionParser(
        "usage: %prog [install|remove|list-all|list-files|add-repo|remove-repo|"
        "upgrade|what-provides|install-what-provides] arguments")
    parser.add_option('--verbose',
                      dest="debug",
                      action='store_true',
                      help='include debug messages in console output')

    options, args = parser.parse_args()
    debug = options.debug
    logging_manager.configure_logging(SoftwareManagerLoggingConfig(),
                                      verbose=debug)
    software_manager = SoftwareManager()
    if args:
        action = args[0]
        args = " ".join(args[1:])
    else:
        action = 'show-help'

    if action == 'install':
        software_manager.install(args)
    elif action == 'remove':
        software_manager.remove(args)
    if action == 'list-all':
        software_manager.list_all()
    elif action == 'list-files':
        software_manager.list_files(args)
Пример #29
0
                      help='include debug messages in console output')
    parser.add_option('-f', '--full-check', dest="full_check",
                      action='store_true',
                      help='check the full tree for corrective actions')
    parser.add_option('-y', '--yes', dest="confirm",
                      action='store_true',
                      help='Answer yes to all questions')

    options, args = parser.parse_args()
    local_patch = options.local_patch
    id = options.id
    debug = options.debug
    full_check = options.full_check
    confirm = options.confirm

    logging_manager.configure_logging(CheckPatchLoggingConfig(), verbose=debug)

    ignore_file_list = ['common.py']
    if full_check:
        for root, dirs, files in os.walk('.'):
            if not '.svn' in root:
                for file in files:
                    if file not in ignore_file_list:
                        path = os.path.join(root, file)
                        file_checker = FileChecker(path, confirm=confirm)
                        file_checker.report()
    else:
        if local_patch:
            patch_checker = PatchChecker(patch=local_patch, confirm=confirm)
        elif id:
            patch_checker = PatchChecker(patchwork_id=id, confirm=confirm)
Пример #30
0
            sys.stderr.write("fork #1 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # Decouple from parent environment
        os.chdir("/")
        os.umask(0)
        os.setsid()

        # Second fork
        try:
            pid = os.fork()
            if (pid > 0):
                sys.exit(0)  # exit from second parent
        except OSError, e:
            sys.stderr.write("fork #2 failed: (%d) %s\n" %
                             (e.errno, e.strerror))
            sys.exit(1)
    else:
        logging_manager.configure_logging(
            watcher_logging_config.WatcherLoggingConfig())

    while True:
        proc = MonitorProc(do_recovery=recover)
        proc.start()
        time.sleep(PAUSE_LENGTH)
        while proc.is_running():
            logging.info("Tick")
            time.sleep(PAUSE_LENGTH)
        recover = False
Пример #31
0
def main(argv):
    """Main function"""

    global DRY_RUN
    parser = optparse.OptionParser()
    parser.add_option(
        "-c",
        "--db-clean-tests",
        dest="clean_tests",
        action="store_true",
        default=False,
        help="Clean client and server tests with invalid control files",
    )
    parser.add_option(
        "-C",
        "--db-clear-all-tests",
        dest="clear_all_tests",
        action="store_true",
        default=False,
        help="Clear ALL client and server tests",
    )
    parser.add_option(
        "-d", "--dry-run", dest="dry_run", action="store_true", default=False, help="Dry run for operation"
    )
    parser.add_option(
        "-A",
        "--add-all",
        dest="add_all",
        action="store_true",
        default=False,
        help="Add site_tests, tests, and test_suites",
    )
    parser.add_option(
        "-S", "--add-samples", dest="add_samples", action="store_true", default=False, help="Add samples."
    )
    parser.add_option(
        "-E",
        "--add-experimental",
        dest="add_experimental",
        action="store_true",
        default=True,
        help="Add experimental tests to frontend",
    )
    parser.add_option(
        "-N",
        "--add-noncompliant",
        dest="add_noncompliant",
        action="store_true",
        default=False,
        help="Add non-compliant tests (i.e. tests that do not " "define all required control variables)",
    )
    parser.add_option("-p", "--profile-dir", dest="profile_dir", help="Directory to recursively check for profiles")
    parser.add_option("-t", "--tests-dir", dest="tests_dir", help="Directory to recursively check for control.*")
    parser.add_option(
        "-r",
        "--control-pattern",
        dest="control_pattern",
        default="^control.*",
        help="The pattern to look for in directories for control files",
    )
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Run in verbose mode")
    parser.add_option(
        "-w", "--whitelist-file", dest="whitelist_file", help="Filename for list of test names that must match"
    )
    parser.add_option(
        "-z",
        "--autotest-dir",
        dest="autotest_dir",
        default=os.path.join(os.path.dirname(__file__), ".."),
        help="Autotest directory root",
    )
    options, args = parser.parse_args()

    logging_manager.configure_logging(TestImporterLoggingConfig(), verbose=options.verbose)

    DRY_RUN = options.dry_run
    if DRY_RUN:
        logging.getLogger().setLevel(logging.WARN)

    # Make sure autotest_dir is the absolute path
    options.autotest_dir = os.path.abspath(options.autotest_dir)

    if len(args) > 0:
        logging.error("Invalid option(s) provided: %s", args)
        parser.print_help()
        return 1

    if options.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    if len(argv) == 1 or (len(argv) == 2 and options.verbose):
        update_all(options.autotest_dir, options.add_noncompliant, options.add_experimental)
        db_clean_broken(options.autotest_dir)
        return 0

    if options.clear_all_tests:
        if options.clean_tests or options.add_all or options.add_samples or options.add_noncompliant:
            logging.error("Can only pass --autotest-dir, --dry-run and --verbose with " "--db-clear-all-tests")
            return 1
        db_clean_all(options.autotest_dir)

    whitelist_set = None
    if options.whitelist_file:
        if options.add_all:
            logging.error("Cannot pass both --add-all and --whitelist-file")
            return 1
        whitelist_path = os.path.abspath(options.whitelist_file)
        if not os.path.isfile(whitelist_path):
            logging.error("--whitelist-file (%s) not found", whitelist_path)
            return 1
        logging.info("Using whitelist file %s", whitelist_path)
        whitelist_set = _create_whitelist_set(whitelist_path)
        update_from_whitelist(
            whitelist_set,
            add_experimental=options.add_experimental,
            add_noncompliant=options.add_noncompliant,
            autotest_dir=options.autotest_dir,
        )
    if options.add_all:
        update_all(options.autotest_dir, options.add_noncompliant, options.add_experimental)
    if options.add_samples:
        update_samples(options.autotest_dir, options.add_noncompliant, options.add_experimental)
    if options.tests_dir:
        options.tests_dir = os.path.abspath(options.tests_dir)
        tests = get_tests_from_fs(options.tests_dir, options.control_pattern, add_noncompliant=options.add_noncompliant)
        update_tests_in_db(
            tests,
            add_experimental=options.add_experimental,
            add_noncompliant=options.add_noncompliant,
            autotest_dir=options.autotest_dir,
        )
    if options.profile_dir:
        profilers = get_tests_from_fs(options.profile_dir, ".*py$")
        update_profilers_in_db(profilers, add_noncompliant=options.add_noncompliant, description="NA")
    if options.clean_tests:
        db_clean_broken(options.autotest_dir)
            try:
                utils.unmap_url_cache(destination, url, hash, method="sha1")
                file_ok = True
            except EnvironmentError, e:
                logging.error(e)
        else:
            logging.info("File %s present, but chose to not verify it",
                         iso_path)
            return

    if file_ok:
        logging.info("%s present, with proper checksum", iso_path)


if __name__ == "__main__":
    logging_manager.configure_logging(virt_utils.VirtLoggingConfig(),
                                      verbose=True)
    logging.info("KVM test config helper")

    logging.info("")
    logging.info("1 - Verifying directories (check if the directory structure "
                 "expected by the default test config is there)")
    base_dir = "/tmp/kvm_autotest_root"
    sub_dir_list = ["images", "isos", "steps_data"]
    for sub_dir in sub_dir_list:
        sub_dir_path = os.path.join(base_dir, sub_dir)
        if not os.path.isdir(sub_dir_path):
            logging.debug("Creating %s", sub_dir_path)
            os.makedirs(sub_dir_path)
        else:
            logging.debug("Dir %s exists, not creating" % sub_dir_path)
    logging.info("")
Пример #33
0
import common
from autotest_lib.client.common_lib import logging_manager
from autotest_lib.client.virt import virt_utils


def destroy_tap(tapfd_list):
    for tapfd in tapfd_list:
        try:
            os.close(tapfd)
        # File descriptor is already closed
        except OSError:
            pass


if __name__ == "__main__":
    logging_manager.configure_logging(virt_utils.VirtLoggingConfig(), verbose=True)
    if len(sys.argv) <= 2:
        logging.info("Usage: %s bridge_name qemu_command_line", sys.argv[0])
        sys.exit(255)

    brname = sys.argv[1]
    cmd_line = " ".join(sys.argv[2:])

    if re.findall("-netdev\s", cmd_line):
        # so we get the new qemu cli with netdev parameter.
        tap_list_re = r"tap,id=(.*?),"
        tap_replace_re = r"(tap,id=%s.*?,fd=)\d+"
    else:
        # the old cli contain "-net" parameter.
        tap_list_re = r"tap,vlan=(\d+),"
        tap_replace_re = r"(tap,vlan=%s,fd=)\d+"
Пример #34
0
@copyright: Red Hat 2008-2009
"""

import os, sys, optparse, logging
import common
from autotest_lib.client.common_lib import logging_manager
from autotest_lib.client.bin import utils
from autotest_lib.client.virt import virt_utils


if __name__ == "__main__":
    parser = optparse.OptionParser("usage: %prog [options] [filenames]")
    options, args = parser.parse_args()

    logging_manager.configure_logging(virt_utils.VirtLoggingConfig())

    if args:
        filenames = args
    else:
        parser.print_help()
        sys.exit(1)

    for filename in filenames:
        filename = os.path.abspath(filename)

        file_exists = os.path.isfile(filename)
        can_read_file = os.access(filename, os.R_OK)
        if not file_exists:
            logging.critical("File %s does not exist!", filename)
            continue
Пример #35
0

if __name__ == "__main__":
    parser = optparse.OptionParser()
    parser.add_option('-f', '--file', dest="filename", action='store_true',
                      help='path to a config file that will be parsed. '
                           'If not specified, will parse kvm_tests.cfg '
                           'located inside the kvm test dir.')
    parser.add_option('--verbose', dest="debug", action='store_true',
                      help='include debug messages in console output')

    options, args = parser.parse_args()
    filename = options.filename
    debug = options.debug

    if not filename:
        filename = os.path.join(os.path.dirname(sys.argv[0]), "kvm_tests.cfg")

    # Here we configure the stand alone program to use the autotest
    # logging system.
    logging_manager.configure_logging(KvmLoggingConfig(), verbose=debug)
    list = config(filename, debug=debug).get_list()
    i = 0
    for dict in list:
        logging.info("Dictionary #%d:", i)
        keys = dict.keys()
        keys.sort()
        for key in keys:
            logging.info("    %s = %s", key, dict[key])
        i += 1
Пример #36
0
def main():
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results  = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    logging_manager.configure_logging(
            server_logging_config.ServerLoggingConfig(), results_dir=results,
            use_console=not parser.options.no_tee,
            verbose=parser.options.verbose,
            no_console_prefix=parser.options.no_console_prefix)
    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if parser.options.use_existing_results and not resultdir_exists:
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)


    if parser.options.write_pidfile:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    exit_code = 0
    try:
        try:
            run_autoserv(pid_file_manager, results, parser)
        except SystemExit, e:
            exit_code = e.code
        except:
            traceback.print_exc()
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            exit_code = 1
Пример #37
0
        if self.active:
            self.plot_2d_graphs()
            self.plot_3d_graphs()


class AnalyzerLoggingConfig(logging_config.LoggingConfig):
    def configure_logging(self, results_dir=None, verbose=False):
        super(AnalyzerLoggingConfig, self).configure_logging(use_console=True,
                                                        verbose=verbose)


if __name__ == "__main__":
    parser = optparse.OptionParser("usage: %prog [options] [filenames]")
    options, args = parser.parse_args()

    logging_manager.configure_logging(AnalyzerLoggingConfig())

    if args:
        filenames = args
    else:
        parser.print_help()
        sys.exit(1)

    if len(args) > 2:
        parser.print_help()
        sys.exit(1)

    o = os.path.join(os.getcwd(),
                     "iozone-graphs-%s" % time.strftime('%Y-%m-%d-%H.%M.%S'))
    if not os.path.isdir(o):
        os.makedirs(o)
Пример #38
0
def main():
    start_time = datetime.datetime.now()
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    if parser.options.require_ssp:
        # This is currently only used for skylab (i.e., when --control-name is
        # used).
        use_ssp = _require_ssp_from_control(parser.options.control_name)
    else:
        use_ssp = False

    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))
    logging.debug('autoserv parsed options: %s', parser.options)

    if use_ssp:
        ssp_url = _stage_ssp(parser, results)
    else:
        ssp_url = None

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.Autotest.set_install_in_tmpdir(parser.options.install_in_tmpdir)

    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)

    trace_labels = {
        'job_id': job_directories.get_job_id_or_task_id(parser.options.results)
    }
    trace = cloud_trace.SpanStack(
        labels=trace_labels, global_context=parser.options.cloud_trace_context)
    trace.enabled = parser.options.cloud_trace_context_enabled == 'True'
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker('unknown-test',
                                                 parser.options.results,
                                                 machine).mock_results()
                return
            else:
                with trace.Span(get_job_status(parser.options)):
                    run_autoserv(pid_file_manager, results, parser, ssp_url,
                                 use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception('Uncaught SystemExit with code %s',
                                  exit_code)
        except Exception:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception('Uncaught Exception, exit_code = 1.')
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
    sys.exit(exit_code)
Пример #39
0
                return list_provides[0]
            return None
        except Exception:
            return None


if __name__ == '__main__':
    parser = optparse.OptionParser(
    "usage: %prog [install|remove|list-all|list-files|add-repo|remove-repo|"
    "upgrade|what-provides|install-what-provides] arguments")
    parser.add_option('--verbose', dest="debug", action='store_true',
                      help='include debug messages in console output')

    options, args = parser.parse_args()
    debug = options.debug
    logging_manager.configure_logging(SoftwareManagerLoggingConfig(),
                                      verbose=debug)
    software_manager = SoftwareManager()
    if args:
        action = args[0]
        args = " ".join(args[1:])
    else:
        action = 'show-help'

    if action == 'install':
        software_manager.install(args)
    elif action == 'remove':
        software_manager.remove(args)
    if action == 'list-all':
        software_manager.list_all()
    elif action == 'list-files':
        software_manager.list_files(args)
Пример #40
0
    iso_path = os.path.join(destination, os.path.basename(url))
    if not os.path.isfile(iso_path) or (
                            utils.hash_file(iso_path, method="sha1") != hash):
        logging.warning("%s not found or corrupted", iso_path)
        logging.warning("Would you like to download it? (y/n)")
        iso_download = raw_input()
        if iso_download == 'y':
            utils.unmap_url_cache(destination, url, hash, method="sha1")
        else:
            logging.warning("Missing file %s. Please download it", iso_path)
    else:
        logging.debug("%s present, with proper checksum", iso_path)


if __name__ == "__main__":
    logging_manager.configure_logging(kvm_utils.KvmLoggingConfig(),
                                      verbose=True)
    logging.info("KVM test config helper")

    logging.info("1 - Verifying directories (check if the directory structure "
                 "expected by the default test config is there)")
    base_dir = "/tmp/kvm_autotest_root"
    sub_dir_list = ["images", "isos", "steps_data"]
    for sub_dir in sub_dir_list:
        sub_dir_path = os.path.join(base_dir, sub_dir)
        if not os.path.isdir(sub_dir_path):
            logging.debug("Creating %s", sub_dir_path)
            os.makedirs(sub_dir_path)
        else:
            logging.debug("Dir %s exists, not creating" %
                          sub_dir_path)
    logging.info("Do you want to setup NFS mounts for some of those "
Пример #41
0
Program that calculates several hashes for a given CD image.

@copyright: Red Hat 2008-2009
"""

import os, sys, optparse, logging
import common
from autotest_lib.client.common_lib import logging_manager
from autotest_lib.client.bin import utils
from autotest_lib.client.virt import virt_utils

if __name__ == "__main__":
    parser = optparse.OptionParser("usage: %prog [options] [filenames]")
    options, args = parser.parse_args()

    logging_manager.configure_logging(virt_utils.VirtLoggingConfig())

    if args:
        filenames = args
    else:
        parser.print_help()
        sys.exit(1)

    for filename in filenames:
        filename = os.path.abspath(filename)

        file_exists = os.path.isfile(filename)
        can_read_file = os.access(filename, os.R_OK)
        if not file_exists:
            logging.critical("File %s does not exist!", filename)
            continue
Пример #42
0
                      dest="confirm",
                      action='store_true',
                      help='Answer yes to all questions')

    options, args = parser.parse_args()
    local_patch = options.local_patch
    id = options.id
    gh_id = options.gh_id
    debug = options.debug
    full_check = options.full_check
    confirm = options.confirm
    vcs = VCS()
    if vcs.backend is None:
        vcs = None

    logging_manager.configure_logging(CheckPatchLoggingConfig(), verbose=debug)

    ignore_list = [
        'common.py', ".svn", ".git", '.pyc', ".orig", ".rej", ".bak"
    ]
    if full_check:
        logging.info("Autotest full tree check")
        logging.info("")
        for root, dirs, files in os.walk('.'):
            for file in files:
                check = True
                path = os.path.join(root, file)
                for pattern in ignore_list:
                    if re.search(pattern, path):
                        check = False
                if check:
Пример #43
0

if __name__ == "__main__":
    parser = optparse.OptionParser("usage: %prog [options] [filename]")
    parser.add_option('--verbose',
                      dest="debug",
                      action='store_true',
                      help='include debug messages in console output')

    options, args = parser.parse_args()
    debug = options.debug
    if args:
        filenames = args
    else:
        filenames = [os.path.join(os.path.dirname(sys.argv[0]), "tests.cfg")]

    # Here we configure the stand alone program to use the autotest
    # logging system.
    logging_manager.configure_logging(kvm_utils.KvmLoggingConfig(),
                                      verbose=debug)
    cfg = config(debug=debug)
    for fn in filenames:
        cfg.parse_file(fn)
    dicts = cfg.get_generator()
    for i, dict in enumerate(dicts):
        print "Dictionary #%d:" % (i)
        keys = dict.keys()
        keys.sort()
        for key in keys:
            print "    %s = %s" % (key, dict[key])
Пример #44
0
def main():
    """Entry point for suite_scheduler.py"""
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGHUP, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    parser, options, args = parse_options()
    if args or options.events and not options.build:
        parser.print_help()
        return 1

    if options.config_file and not os.path.exists(options.config_file):
        logging.error('Specified config file %s does not exist.',
                      options.config_file)
        return 1

    config = forgiving_config_parser.ForgivingConfigParser()
    config.read(options.config_file)

    if options.list:
        print 'Supported events:'
        for event_class in driver.Driver.EVENT_CLASSES:
            print '  ', event_class.KEYWORD
        return 0

    # If we're just sanity checking, we can stop after we've parsed the
    # config file.
    if options.sanity:
        # config_file_getter generates a high amount of noise at DEBUG level
        logging.getLogger().setLevel(logging.WARNING)
        d = driver.Driver(None, None, True)
        d.SetUpEventsAndTasks(config, None)
        tasks_per_event = d.TasksFromConfig(config)
        # flatten [[a]] -> [a]
        tasks = [x for y in tasks_per_event.values() for x in y]
        control_files_exist = sanity.CheckControlFileExistence(tasks)
        return control_files_exist

    logging_manager.configure_logging(SchedulerLoggingConfig(),
                                      log_dir=options.log_dir)
    if not options.log_dir:
        logging.info('Not logging to a file, as --log_dir was not passed.')

    # If server database is enabled, check if the server has role
    # `suite_scheduler`. If the server does not have suite_scheduler role,
    # exception will be raised and suite scheduler will not continue to run.
    if not server_manager_utils:
        raise ImportError(
            'Could not import autotest_lib.site_utils.server_manager_utils')
    if server_manager_utils.use_server_db():
        server_manager_utils.confirm_server_has_role(hostname='localhost',
                                                     role='suite_scheduler')

    afe_server = global_config.global_config.get_config_value(
        CONFIG_SECTION_SERVER, "suite_scheduler_afe", default=None)

    afe = frontend_wrappers.RetryingAFE(server=afe_server,
                                        timeout_min=10,
                                        delay_sec=5,
                                        debug=False)
    logging.info('Connecting to: %s', afe.server)
    enumerator = board_enumerator.BoardEnumerator(afe)
    scheduler = deduping_scheduler.DedupingScheduler(afe, options.file_bug)
    mv = manifest_versions.ManifestVersions(options.tmp_repo_dir)
    d = driver.Driver(scheduler, enumerator)
    d.SetUpEventsAndTasks(config, mv)

    # Set up metrics upload for Monarch.
    ts_mon_config.SetupTsMonGlobalState('autotest_suite_scheduler')

    try:
        if options.events:
            # Act as though listed events have just happened.
            keywords = re.split('\s*,\s*', options.events)
            if not options.tmp_repo_dir:
                logging.warn('To run a list of events, you may need to use '
                             '--repo_dir to specify a folder that already has '
                             'manifest repo set up. This is needed for suites '
                             'requiring firmware update.')
            logging.info('Forcing events: %r', keywords)
            d.ForceEventsOnceForBuild(keywords, options.build, options.os_type)
        else:
            if not options.tmp_repo_dir:
                mv.Initialize()
            d.RunForever(config, mv)
    except Exception as e:
        logging.error('Fatal exception in suite_scheduler: %r\n%s', e,
                      traceback.format_exc())
        return 1
Пример #45
0
def main(argv):
    """Main function"""

    global DRY_RUN
    parser = optparse.OptionParser()
    parser.add_option(
        '-c',
        '--db-clean-tests',
        dest='clean_tests',
        action='store_true',
        default=False,
        help='Clean client and server tests with invalid control files')
    parser.add_option('-C',
                      '--db-clear-all-tests',
                      dest='clear_all_tests',
                      action='store_true',
                      default=False,
                      help='Clear ALL client and server tests')
    parser.add_option('-d',
                      '--dry-run',
                      dest='dry_run',
                      action='store_true',
                      default=False,
                      help='Dry run for operation')
    parser.add_option('-A',
                      '--add-all',
                      dest='add_all',
                      action='store_true',
                      default=False,
                      help='Add site_tests, tests, and test_suites')
    parser.add_option('-S',
                      '--add-samples',
                      dest='add_samples',
                      action='store_true',
                      default=False,
                      help='Add samples.')
    parser.add_option('-E',
                      '--add-experimental',
                      dest='add_experimental',
                      action='store_true',
                      default=True,
                      help='Add experimental tests to frontend')
    parser.add_option('-N',
                      '--add-noncompliant',
                      dest='add_noncompliant',
                      action='store_true',
                      default=False,
                      help='Add non-compliant tests (i.e. tests that do not '
                      'define all required control variables)')
    parser.add_option('-p',
                      '--profile-dir',
                      dest='profile_dir',
                      help='Directory to recursively check for profiles')
    parser.add_option('-t',
                      '--tests-dir',
                      dest='tests_dir',
                      help='Directory to recursively check for control.*')
    parser.add_option(
        '-r',
        '--control-pattern',
        dest='control_pattern',
        default='^control.*',
        help='The pattern to look for in directories for control files')
    parser.add_option('-v',
                      '--verbose',
                      dest='verbose',
                      action='store_true',
                      default=False,
                      help='Run in verbose mode')
    parser.add_option('-w',
                      '--whitelist-file',
                      dest='whitelist_file',
                      help='Filename for list of test names that must match')
    parser.add_option('-z',
                      '--autotest-dir',
                      dest='autotest_dir',
                      default=os.path.join(os.path.dirname(__file__), '..'),
                      help='Autotest directory root')
    options, args = parser.parse_args()

    logging_manager.configure_logging(TestImporterLoggingConfig(),
                                      verbose=options.verbose)

    DRY_RUN = options.dry_run
    if DRY_RUN:
        logging.getLogger().setLevel(logging.WARN)

    # Make sure autotest_dir is the absolute path
    options.autotest_dir = os.path.abspath(options.autotest_dir)

    if len(args) > 0:
        logging.error("Invalid option(s) provided: %s", args)
        parser.print_help()
        return 1

    if options.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    if len(argv) == 1 or (len(argv) == 2 and options.verbose):
        update_all(options.autotest_dir, options.add_noncompliant,
                   options.add_experimental)
        db_clean_broken(options.autotest_dir)
        return 0

    if options.clear_all_tests:
        if (options.clean_tests or options.add_all or options.add_samples
                or options.add_noncompliant):
            logging.error(
                "Can only pass --autotest-dir, --dry-run and --verbose with "
                "--db-clear-all-tests")
            return 1
        db_clean_all(options.autotest_dir)

    whitelist_set = None
    if options.whitelist_file:
        if options.add_all:
            logging.error("Cannot pass both --add-all and --whitelist-file")
            return 1
        whitelist_path = os.path.abspath(options.whitelist_file)
        if not os.path.isfile(whitelist_path):
            logging.error("--whitelist-file (%s) not found", whitelist_path)
            return 1
        logging.info("Using whitelist file %s", whitelist_path)
        whitelist_set = _create_whitelist_set(whitelist_path)
        update_from_whitelist(whitelist_set,
                              add_experimental=options.add_experimental,
                              add_noncompliant=options.add_noncompliant,
                              autotest_dir=options.autotest_dir)
    if options.add_all:
        update_all(options.autotest_dir, options.add_noncompliant,
                   options.add_experimental)
    if options.add_samples:
        update_samples(options.autotest_dir, options.add_noncompliant,
                       options.add_experimental)
    if options.tests_dir:
        options.tests_dir = os.path.abspath(options.tests_dir)
        tests = get_tests_from_fs(options.tests_dir,
                                  options.control_pattern,
                                  add_noncompliant=options.add_noncompliant)
        update_tests_in_db(tests,
                           add_experimental=options.add_experimental,
                           add_noncompliant=options.add_noncompliant,
                           autotest_dir=options.autotest_dir)
    if options.profile_dir:
        profilers = get_tests_from_fs(options.profile_dir, '.*py$')
        update_profilers_in_db(profilers,
                               add_noncompliant=options.add_noncompliant,
                               description='NA')
    if options.clean_tests:
        db_clean_broken(options.autotest_dir)
Пример #46
0
    def __init__(self, control, options, drop_caches=True,
                 extra_copy_cmdline=None):
        """
        Prepare a client side job object.

        @param control: The control file (pathname of).
        @param options: an object which includes:
                jobtag: The job tag string (eg "default").
                cont: If this is the continuation of this job.
                harness_type: An alternative server harness.  [None]
                use_external_logging: If true, the enable_external_logging
                          method will be called during construction.  [False]
        @param drop_caches: If true, utils.drop_caches() is called before and
                between all tests.  [True]
        @param extra_copy_cmdline: list of additional /proc/cmdline arguments to
                copy from the running kernel to all the installed kernels with
                this job
        """
        self.autodir = os.environ['AUTODIR']
        self.bindir = os.path.join(self.autodir, 'bin')
        self.libdir = os.path.join(self.autodir, 'lib')
        self.testdir = os.path.join(self.autodir, 'tests')
        self.configdir = os.path.join(self.autodir, 'config')
        self.site_testdir = os.path.join(self.autodir, 'site_tests')
        self.profdir = os.path.join(self.autodir, 'profilers')
        self.tmpdir = os.path.join(self.autodir, 'tmp')
        self.toolsdir = os.path.join(self.autodir, 'tools')
        self.resultdir = os.path.join(self.autodir, 'results', options.tag)

        if not os.path.exists(self.resultdir):
            os.makedirs(self.resultdir)

        if not options.cont:
            self._cleanup_results_dir()

        logging_manager.configure_logging(
                client_logging_config.ClientLoggingConfig(),
                results_dir=self.resultdir,
                verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        self.drop_caches_between_iterations = False
        self.drop_caches = drop_caches
        if self.drop_caches:
            logging.debug("Dropping caches")
            utils.drop_caches()

        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self.state_file = self.control + '.state'
        self.current_step_ancestry = []
        self.next_step_index = 0
        self.testtag = ''
        self._test_tag_prefix = ''

        self._load_state()
        self.pkgmgr = packages.PackageManager(
            self.autodir, run_function_dargs={'timeout':3600})
        self.pkgdir = os.path.join(self.autodir, 'packages')
        self.run_test_cleanup = self.get_state("__run_test_cleanup",
                                                default=True)

        self.sysinfo = sysinfo.sysinfo(self.resultdir)
        self._load_sysinfo_state()

        self.last_boot_tag = self.get_state("__last_boot_tag", default=None)
        self.tag = self.get_state("__job_tag", default=None)

        if not options.cont:
            """
            Don't cleanup the tmp dir (which contains the lockfile)
            in the constructor, this would be a problem for multiple
            jobs starting at the same time on the same client. Instead
            do the delete at the server side. We simply create the tmp
            directory here if it does not already exist.
            """
            if not os.path.exists(self.tmpdir):
                os.mkdir(self.tmpdir)

            if not os.path.exists(self.pkgdir):
                os.mkdir(self.pkgdir)

            results = os.path.join(self.autodir, 'results')
            if not os.path.exists(results):
                os.mkdir(results)

            download = os.path.join(self.testdir, 'download')
            if not os.path.exists(download):
                os.mkdir(download)

            os.makedirs(os.path.join(self.resultdir, 'analysis'))

            shutil.copyfile(self.control,
                            os.path.join(self.resultdir, 'control'))


        self.control = control
        self.jobtag = options.tag
        self.log_filename = self.DEFAULT_LOG_FILENAME

        self.logging = logging_manager.get_logging_manager(
                manage_stdout_and_stderr=True, redirect_fds=True)
        self.logging.start_logging()

        self._init_group_level()

        self.config = config.config(self)
        self.harness = harness.select(options.harness, self)
        self.profilers = profilers.profilers(self)

        try:
            tool = self.config_get('boottool.executable')
            self.bootloader = boottool.boottool(tool)
        except:
            pass

        self.sysinfo.log_per_reboot_data()

        if not options.cont:
            self.record('START', None, None)
            self._increment_group_level()

        self.harness.run_start()

        if options.log:
            self.enable_external_logging()

        # load the max disk usage rate - default to no monitoring
        self.max_disk_usage_rate = self.get_state('__monitor_disk', default=0.0)

        copy_cmdline = set(['console'])
        if extra_copy_cmdline is not None:
            copy_cmdline.update(extra_copy_cmdline)

        # extract console= and other args from cmdline and add them into the
        # base args that we use for all kernels we install
        cmdline = utils.read_one_line('/proc/cmdline')
        kernel_args = []
        for karg in cmdline.split():
            for param in copy_cmdline:
                if karg.startswith(param) and \
                    (len(param) == len(karg) or karg[len(param)] == '='):
                    kernel_args.append(karg)
        self.config_set('boot.default_args', ' '.join(kernel_args))