示例#1
0
def perform_bootstrap_into_autotest_root(arguments, autotest_path, argv):
    """
    Perfoms a bootstrap to run test_that from the |autotest_path|.

    This function is to be called from test_that's main() script, when
    test_that is executed from the source tree location. It runs
    autotest_quickmerge to update the sysroot unless arguments.no_quickmerge
    is set. It then executes and waits on the version of test_that.py
    in |autotest_path|.

    @param arguments: A parsed arguments object, as returned from
                      test_that.parse_arguments(...).
    @param autotest_path: Full absolute path to the autotest root directory.
    @param argv: The arguments list, as passed to main(...)

    @returns: The return code of the test_that script that was executed in
              |autotest_path|.
    """
    logging_manager.configure_logging(
            server_logging_config.ServerLoggingConfig(),
            use_console=True,
            verbose=arguments.debug)
    if arguments.no_quickmerge:
        logging.info('Skipping quickmerge step.')
    else:
        logging.info('Running autotest_quickmerge step.')
        command = [_QUICKMERGE_SCRIPTNAME, '--board='+arguments.board]
        s = subprocess.Popen(command,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        for message in iter(s.stdout.readline, b''):
            logging.info('quickmerge| %s', message.strip())
        return_code = s.wait()
        if return_code:
            raise test_runner_utils.TestThatRunError(
                    'autotest_quickmerge failed with error code %s.' %
                    return_code)

    logging.info('Re-running test_that script in %s copy of autotest.',
                 autotest_path)
    script_command = os.path.join(autotest_path, 'site_utils',
                                  'test_that.py')
    if not os.path.exists(script_command):
        raise test_runner_utils.TestThatRunError(
            'Unable to bootstrap to autotest root, %s not found.' %
            script_command)
    proc = None
    def resend_sig(signum, stack_frame):
        #pylint: disable-msg=C0111
        if proc:
            proc.send_signal(signum)
    signal.signal(signal.SIGINT, resend_sig)
    signal.signal(signal.SIGTERM, resend_sig)

    proc = subprocess.Popen([script_command] + argv)

    return proc.wait()
示例#2
0
def _start(args):
    """Starts up the container pool service.

    This function instantiates and starts up the pool service on the current
    thread (i.e. the function will block, and not return until the service is
    shut down).
    """
    # TODO(dshi): crbug.com/459344 Set remove this enforcement when test
    # container can be unprivileged container.
    if utils.sudo_require_password():
        logging.warning('SSP requires root privilege to run commands, please '
                        'grant root access to this process.')
        utils.run('sudo true')

    # Configure logging.
    config = server_logging_config.ServerLoggingConfig()
    config.configure_logging(verbose=args.verbose)
    config.add_debug_file_handlers(log_dir=_LOG_LOCATION, log_name=_LOG_NAME)
    # Pool code is heavily multi-threaded.  This will help debugging.
    logging_config.add_threadname_in_log()

    host_dir = lxc.SharedHostDir()
    service = container_pool.Service(host_dir)
    # Catch signals, and send the appropriate stop request to the service
    # instead of killing the main thread.
    # - SIGINT is generated by Ctrl-C
    # - SIGTERM is generated by an upstart stopping event.
    for sig in (signal.SIGINT, signal.SIGTERM):
        signal.signal(sig, lambda s, f: service.stop())

    with ts_mon_config.SetupTsMonGlobalState(service_name='lxc_pool_service',
                                             indirect=True,
                                             short_lived=False):
        # Start the service.  This blocks and does not return till the service
        # shuts down.
        service.start(pool_size=args.size)
示例#3
0
def perform_run_from_autotest_root(autotest_path,
                                   argv,
                                   tests,
                                   remote,
                                   build=NO_BUILD,
                                   board=NO_BOARD,
                                   args=None,
                                   pretend=False,
                                   no_experimental=False,
                                   ignore_deps=True,
                                   results_directory=None,
                                   ssh_verbosity=0,
                                   ssh_options=None,
                                   iterations=1,
                                   fast_mode=False,
                                   debug=False,
                                   whitelist_chrome_crashes=False,
                                   host_attributes={}):
    """
    Perform a test_that run, from the |autotest_path|.

    This function is to be called from test_that/test_droid's main() script,
    when tests are executed from the |autotest_path|. It handles all stages
    of a test run that come after the bootstrap into |autotest_path|.

    @param autotest_path: Full absolute path to the autotest root directory.
    @param argv: The arguments list, as passed to main(...)
    @param tests: List of strings naming tests and suites to run. Suite strings
                  should be formed like "suite:smoke".
    @param remote: Remote hostname.
    @param build: String specifying build for local run.
    @param board: String specifying board for local run.
    @param args: String that should be passed as args parameter to autoserv,
                 and then ultimitely to test itself.
    @param pretend: If True, will print out autoserv commands rather than
                    running them.
    @param no_experimental: Skip experimental tests when scheduling a suite.
    @param ignore_deps: If True, test dependencies will be ignored.
    @param results_directory: Directory to store results in. Defaults to None,
                              in which case results will be stored in a new
                              subdirectory of /tmp
    @param ssh_verbosity: SSH verbosity level, passed through to
                          autoserv_utils.
    @param ssh_options: Additional ssh options to be passed to autoserv_utils
    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
    @param iterations: int number of times to schedule tests.
    @param fast_mode: bool to use fast mode (disables slow autotest features).
    @param debug: Logging and autoserv verbosity.
    @param whitelist_chrome_crashes: If True, whitelist chrome crashes.
    @param host_attributes: Dict of host attributes to pass into autoserv.

    @returns: A return code that test_that should exit with.
    """
    if results_directory is None or not os.path.exists(results_directory):
        raise ValueError('Expected valid results directory, got %s' %
                         results_directory)

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=results_directory,
        use_console=True,
        verbose=debug,
        debug_log_name='test_that')
    logging.info('Began logging to %s', results_directory)

    logging.debug('test_that command line was: %s', argv)

    signal.signal(signal.SIGINT, sigint_handler)
    signal.signal(signal.SIGTERM, sigint_handler)

    afe = setup_local_afe()
    codes = perform_local_run(afe,
                              autotest_path,
                              tests,
                              remote,
                              fast_mode,
                              build,
                              board,
                              args=args,
                              pretend=pretend,
                              no_experimental=no_experimental,
                              ignore_deps=ignore_deps,
                              results_directory=results_directory,
                              ssh_verbosity=ssh_verbosity,
                              ssh_options=ssh_options,
                              autoserv_verbose=debug,
                              iterations=iterations,
                              host_attributes=host_attributes)
    if pretend:
        logging.info('Finished pretend run. Exiting.')
        return 0

    final_result = generate_report(
        results_directory,
        whitelist_chrome_crashes=whitelist_chrome_crashes,
        html_report=True)
    try:
        os.unlink(_LATEST_RESULTS_DIRECTORY)
    except OSError:
        pass
    link_target = os.path.relpath(results_directory,
                                  os.path.dirname(_LATEST_RESULTS_DIRECTORY))
    if any(codes):
        logging.error('Autoserv encountered unexpected errors '
                      'when executing jobs.')
        final_result = final_result or 1
    os.symlink(link_target, _LATEST_RESULTS_DIRECTORY)
    logging.info('Finished running tests. Results can be found in %s or %s',
                 results_directory, _LATEST_RESULTS_DIRECTORY)
    return final_result
示例#4
0
def main():
    start_time = datetime.datetime.now()
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    if parser.options.require_ssp:
        # This is currently only used for skylab (i.e., when --control-name is
        # used).
        use_ssp = _require_ssp_from_control(parser.options.control_name)
    else:
        use_ssp = False

    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))
    logging.debug('autoserv parsed options: %s', parser.options)

    if use_ssp:
        ssp_url = _stage_ssp(parser, results)
    else:
        ssp_url = None

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.Autotest.set_install_in_tmpdir(parser.options.install_in_tmpdir)

    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)

    trace_labels = {
        'job_id': job_directories.get_job_id_or_task_id(parser.options.results)
    }
    trace = cloud_trace.SpanStack(
        labels=trace_labels, global_context=parser.options.cloud_trace_context)
    trace.enabled = parser.options.cloud_trace_context_enabled == 'True'
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker('unknown-test',
                                                 parser.options.results,
                                                 machine).mock_results()
                return
            else:
                with trace.Span(get_job_status(parser.options)):
                    run_autoserv(pid_file_manager, results, parser, ssp_url,
                                 use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception('Uncaught SystemExit with code %s',
                                  exit_code)
        except Exception:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception('Uncaught Exception, exit_code = 1.')
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
    sys.exit(exit_code)
示例#5
0
def main():
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=results,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)
    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if parser.options.use_existing_results and not resultdir_exists:
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    if parser.options.write_pidfile:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest_remote.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    exit_code = 0
    try:
        try:
            run_autoserv(pid_file_manager, results, parser)
        except SystemExit, e:
            exit_code = e.code
        except:
            traceback.print_exc()
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            exit_code = 1
def main():
    start_time = datetime.datetime.now()
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    # If the job requires to run with server-side package, try to stage server-
    # side package first. If that fails with error that autotest server package
    # does not exist, fall back to run the job without using server-side
    # packaging. If option warn_no_ssp is specified, that means autoserv is
    # running in a drone does not support SSP, thus no need to stage server-side
    # package.
    ssp_url = None
    ssp_url_warning = False
    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
        ssp_url, ssp_error_msg = _stage_ssp(parser)
        # The build does not have autotest server package. Fall back to not
        # to use server-side package. Logging is postponed until logging being
        # set up.
        ssp_url_warning = not ssp_url

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    # Server-side packaging will only be used if it's required and the package
    # is available. If warn_no_ssp is specified, it means that autoserv is
    # running in a drone does not have SSP supported and a warning will be logs.
    # Therefore, it should not run with SSP.
    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
               and ssp_url)
    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    if ssp_url_warning:
        logging.warn(
            'Autoserv is required to run with server-side packaging. '
            'However, no server-side package can be found based on '
            '`--image`, host attribute job_repo_url or host OS version '
            'label. It could be that the build to test is older than the '
            'minimum version that supports server-side packaging. The test '
            'will be executed without using erver-side packaging. '
            'Following is the detailed error:\n%s', ssp_error_msg)

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    try:
        # Take the first argument as control file name, get the test name from
        # the control file.
        if (len(parser.args) > 0 and parser.args[0] != ''
                and parser.options.machines):
            try:
                test_name = control_data.parse_control(
                    parser.args[0], raise_warnings=True).name
            except control_data.ControlVariableException:
                logging.debug(
                    'Failed to retrieve test name from control file.')
                test_name = None
    except control_data.ControlVariableException as e:
        logging.error(str(e))
    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker(
                        test_name if test_name else 'unknown-test',
                        parser.options.results, machine).mock_results()
                return
            else:
                run_autoserv(pid_file_manager, results, parser, ssp_url,
                             use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception(e)
        except Exception as e:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception(e)
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
        # Record the autoserv duration time. Must be called
        # just before the system exits to ensure accuracy.
        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
        record_autoserv(parser.options, duration_secs)
    sys.exit(exit_code)