def run_once(self, expect_ssp=None):
        """There is no body for this test.

        @param expect_ssp: If True, ensure test is running inside a container.
                If False, ensure test is not running inside a container.
                If None (default), do nothing.
        """
        if expect_ssp is not None:
            if expect_ssp and not utils.is_in_container():
                raise error.TestFail(
                    'The test is not running inside container')
            if not expect_ssp and utils.is_in_container():
                raise error.TestFail('Test test is running inside container')
Exemple #2
0
    def __init__(self, path, user, server, print_log, debug, reply_debug):
        """
        Create a cached instance of a connection to the frontend

            user: username to connect as
            server: frontend server to connect to
            print_log: pring a logging message to stdout on every operation
            debug: print out all RPC traffic
        """
        if not user and utils.is_in_container():
            user = GLOBAL_CONFIG.get_config_value('SSP', 'user', default=None)
        if not user:
            user = getpass.getuser()
        if not server:
            if 'AUTOTEST_WEB' in os.environ:
                server = os.environ['AUTOTEST_WEB']
            else:
                server = GLOBAL_CONFIG.get_config_value('SERVER',
                                                        'hostname',
                                                        default=DEFAULT_SERVER)
        self.server = server
        self.user = user
        self.print_log = print_log
        self.debug = debug
        self.reply_debug = reply_debug
        headers = {'AUTHORIZATION': self.user}
        rpc_server = 'http://' + server + path
        if debug:
            print 'SERVER: %s' % rpc_server
            print 'HEADERS: %s' % headers
        self.proxy = rpc_client_lib.get_proxy(rpc_server, headers=headers)
Exemple #3
0
def get_job_id_or_task_id(result_dir):
    """Extract job id or special task id from result_dir

    @param result_dir: path to the result dir.
            For test job:
            /usr/local/autotest/results/2032-chromeos-test/chromeos1-rack5-host6
            The hostname at the end is optional.
            For special task:
            /usr/local/autotest/results/hosts/chromeos1-rack5-host6/1343-cleanup

    @returns: str representing the job id or task id. Returns None if fail
        to parse job or task id from the result_dir.
    """
    if not result_dir:
        return
    result_dir = os.path.abspath(result_dir)
    # Result folder for job running inside container has only job id.
    ssp_job_pattern = '.*/(\d+)$'
    # Try to get the job ID from the last pattern of number-text. This avoids
    # issue with path like 123-results/456-debug_user, in which 456 is the real
    # job ID.
    m_job = re.findall('.*/(\d+)-[^/]+', result_dir)
    if m_job:
        return m_job[-1]
    m_special_task = re.match(SPECIAL_TASK_PATTERN, result_dir)
    if m_special_task:
        return m_special_task.group(1)
    m_ssp_job_pattern = re.match(ssp_job_pattern, result_dir)
    if m_ssp_job_pattern and utils.is_in_container():
        return m_ssp_job_pattern.group(1)
    return _get_swarming_run_id(result_dir)
    def run_once(self, force_ssp=False):
        """There is no body for this test.

        @param force_ssp: True to force the test to run using server-side
                packaging. Test shall fail if not running inside a container.
                Default is set to False.
        """
        if force_ssp and not utils.is_in_container():
            raise error.TestFail('The test is not running inside container.')
        return
def get_chart_address(host_address, args):
    """Get address of chart tablet from commandline args or mapping logic in
    test lab.

    @param host_address: a list of hostname strings.
    @param args: a dict parse from commandline args.
    @return:
        A list of strings for chart tablet addresses.
    """
    address = utils.args_to_dict(args).get('chart')
    if address is not None:
        return address.split(',')
    elif utils.is_in_container():
        return [utils.get_lab_chart_address(host) for host in host_address]
    else:
        return None
def main():
    start_time = datetime.datetime.now()
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    # If the job requires to run with server-side package, try to stage server-
    # side package first. If that fails with error that autotest server package
    # does not exist, fall back to run the job without using server-side
    # packaging. If option warn_no_ssp is specified, that means autoserv is
    # running in a drone does not support SSP, thus no need to stage server-side
    # package.
    ssp_url = None
    ssp_url_warning = False
    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
        ssp_url, ssp_error_msg = _stage_ssp(parser)
        # The build does not have autotest server package. Fall back to not
        # to use server-side package. Logging is postponed until logging being
        # set up.
        ssp_url_warning = not ssp_url

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    # Server-side packaging will only be used if it's required and the package
    # is available. If warn_no_ssp is specified, it means that autoserv is
    # running in a drone does not have SSP supported and a warning will be logs.
    # Therefore, it should not run with SSP.
    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
               and ssp_url)
    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    if ssp_url_warning:
        logging.warn(
            'Autoserv is required to run with server-side packaging. '
            'However, no server-side package can be found based on '
            '`--image`, host attribute job_repo_url or host OS version '
            'label. It could be that the build to test is older than the '
            'minimum version that supports server-side packaging. The test '
            'will be executed without using erver-side packaging. '
            'Following is the detailed error:\n%s', ssp_error_msg)

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    try:
        # Take the first argument as control file name, get the test name from
        # the control file.
        if (len(parser.args) > 0 and parser.args[0] != ''
                and parser.options.machines):
            try:
                test_name = control_data.parse_control(
                    parser.args[0], raise_warnings=True).name
            except control_data.ControlVariableException:
                logging.debug(
                    'Failed to retrieve test name from control file.')
                test_name = None
    except control_data.ControlVariableException as e:
        logging.error(str(e))
    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker(
                        test_name if test_name else 'unknown-test',
                        parser.options.results, machine).mock_results()
                return
            else:
                run_autoserv(pid_file_manager, results, parser, ssp_url,
                             use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception(e)
        except Exception as e:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception(e)
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
        # Record the autoserv duration time. Must be called
        # just before the system exits to ensure accuracy.
        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
        record_autoserv(parser.options, duration_secs)
    sys.exit(exit_code)
def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp):
    """Run server job with given options.

    @param pid_file_manager: PidFileManager used to monitor the autoserv process
    @param results: Folder to store results.
    @param parser: Parser for the command line arguments.
    @param ssp_url: Url to server-side package.
    @param use_ssp: Set to True to run with server-side packaging.
    """
    if parser.options.warn_no_ssp:
        # Post a warning in the log.
        logging.warn('Autoserv is required to run with server-side packaging. '
                     'However, no drone is found to support server-side '
                     'packaging. The test will be executed in a drone without '
                     'server-side packaging supported.')

    # send stdin to /dev/null
    dev_null = os.open(os.devnull, os.O_RDONLY)
    os.dup2(dev_null, sys.stdin.fileno())
    os.close(dev_null)

    # Create separate process group if the process is not a process group
    # leader. This allows autoserv process to keep running after the caller
    # process (drone manager call) exits.
    if os.getpid() != os.getpgid(0):
        os.setsid()

    # Container name is predefined so the container can be destroyed in
    # handle_sigterm.
    job_or_task_id = job_directories.get_job_id_or_task_id(
        parser.options.results)
    container_name = (lxc.TEST_CONTAINER_NAME_FMT %
                      (job_or_task_id, time.time(), os.getpid()))
    job_folder = job_directories.get_job_folder_name(parser.options.results)

    # Implement SIGTERM handler
    def handle_sigterm(signum, frame):
        logging.debug('Received SIGTERM')
        if pid_file_manager:
            pid_file_manager.close_file(1, signal.SIGTERM)
        logging.debug('Finished writing to pid_file. Killing process.')

        # Update results folder's file permission. This needs to be done ASAP
        # before the parsing process tries to access the log.
        if use_ssp and results:
            correct_results_folder_permission(results)

        # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved.
        # This sleep allows the pending output to be logged before the kill
        # signal is sent.
        time.sleep(.1)
        if use_ssp:
            logging.debug(
                'Destroy container %s before aborting the autoserv '
                'process.', container_name)
            metadata = {
                'drone': socket.gethostname(),
                'job_id': job_or_task_id,
                'container_name': container_name,
                'action': 'abort',
                'success': True
            }
            try:
                bucket = lxc.ContainerBucket()
                container = bucket.get(container_name)
                if container:
                    container.destroy()
                else:
                    metadata['success'] = False
                    metadata['error'] = 'container not found'
                    logging.debug('Container %s is not found.', container_name)
            except:
                metadata['success'] = False
                metadata['error'] = 'Exception: %s' % str(sys.exc_info())
                # Handle any exception so the autoserv process can be aborted.
                logging.exception('Failed to destroy container %s.',
                                  container_name)
            autotest_es.post(use_http=True,
                             type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
                             metadata=metadata)
            # Try to correct the result file permission again after the
            # container is destroyed, as the container might have created some
            # new files in the result folder.
            if results:
                correct_results_folder_permission(results)

        os.killpg(os.getpgrp(), signal.SIGKILL)

    # Set signal handler
    signal.signal(signal.SIGTERM, handle_sigterm)

    # faulthandler is only needed to debug in the Lab and is not avaliable to
    # be imported in the chroot as part of VMTest, so Try-Except it.
    try:
        import faulthandler
        faulthandler.register(signal.SIGTERM, all_threads=True, chain=True)
        logging.debug('faulthandler registered on SIGTERM.')
    except ImportError:
        sys.exc_clear()

    # Ignore SIGTTOU's generated by output from forked children.
    signal.signal(signal.SIGTTOU, signal.SIG_IGN)

    # If we received a SIGALARM, let's be loud about it.
    signal.signal(signal.SIGALRM, log_alarm)

    # Server side tests that call shell scripts often depend on $USER being set
    # but depending on how you launch your autotest scheduler it may not be set.
    os.environ['USER'] = getpass.getuser()

    label = parser.options.label
    group_name = parser.options.group_name
    user = parser.options.user
    client = parser.options.client
    server = parser.options.server
    install_before = parser.options.install_before
    install_after = parser.options.install_after
    verify = parser.options.verify
    repair = parser.options.repair
    cleanup = parser.options.cleanup
    provision = parser.options.provision
    reset = parser.options.reset
    job_labels = parser.options.job_labels
    no_tee = parser.options.no_tee
    parse_job = parser.options.parse_job
    execution_tag = parser.options.execution_tag
    if not execution_tag:
        execution_tag = parse_job
    ssh_user = parser.options.ssh_user
    ssh_port = parser.options.ssh_port
    ssh_pass = parser.options.ssh_pass
    collect_crashinfo = parser.options.collect_crashinfo
    control_filename = parser.options.control_filename
    test_retry = parser.options.test_retry
    verify_job_repo_url = parser.options.verify_job_repo_url
    skip_crash_collection = parser.options.skip_crash_collection
    ssh_verbosity = int(parser.options.ssh_verbosity)
    ssh_options = parser.options.ssh_options
    no_use_packaging = parser.options.no_use_packaging
    host_attributes = parser.options.host_attributes
    in_lab = bool(parser.options.lab)

    # can't be both a client and a server side test
    if client and server:
        parser.parser.error(
            "Can not specify a test as both server and client!")

    if provision and client:
        parser.parser.error("Cannot specify provisioning and client!")

    is_special_task = (verify or repair or cleanup or collect_crashinfo
                       or provision or reset)
    if len(parser.args) < 1 and not is_special_task:
        parser.parser.error("Missing argument: control file")

    if ssh_verbosity > 0:
        # ssh_verbosity is an integer between 0 and 3, inclusive
        ssh_verbosity_flag = '-' + 'v' * ssh_verbosity
    else:
        ssh_verbosity_flag = ''

    # We have a control file unless it's just a verify/repair/cleanup job
    if len(parser.args) > 0:
        control = parser.args[0]
    else:
        control = None

    machines = _get_machines(parser)
    if group_name and len(machines) < 2:
        parser.parser.error('-G %r may only be supplied with more than one '
                            'machine.' % group_name)

    kwargs = {
        'group_name': group_name,
        'tag': execution_tag,
        'disable_sysinfo': parser.options.disable_sysinfo
    }
    if parser.options.parent_job_id:
        kwargs['parent_job_id'] = int(parser.options.parent_job_id)
    if control_filename:
        kwargs['control_filename'] = control_filename
    if host_attributes:
        kwargs['host_attributes'] = host_attributes
    kwargs['in_lab'] = in_lab
    job = server_job.server_job(control, parser.args[1:], results, label, user,
                                machines, client, parse_job, ssh_user,
                                ssh_port, ssh_pass, ssh_verbosity_flag,
                                ssh_options, test_retry, **kwargs)

    job.logging.start_logging()
    job.init_parser()

    # perform checks
    job.precheck()

    # run the job
    exit_code = 0
    auto_start_servod = _CONFIG.get_config_value('AUTOSERV',
                                                 'auto_start_servod',
                                                 type=bool,
                                                 default=False)

    site_utils.SetupTsMonGlobalState('autoserv',
                                     indirect=False,
                                     short_lived=True)
    try:
        try:
            if repair:
                if auto_start_servod and len(machines) == 1:
                    _start_servod(machines[0])
                job.repair(job_labels)
            elif verify:
                job.verify(job_labels)
            elif provision:
                job.provision(job_labels)
            elif reset:
                job.reset(job_labels)
            elif cleanup:
                job.cleanup(job_labels)
            else:
                if auto_start_servod and len(machines) == 1:
                    _start_servod(machines[0])
                if use_ssp:
                    try:
                        _run_with_ssp(job, container_name, job_or_task_id,
                                      results, parser, ssp_url, job_folder,
                                      machines)
                    finally:
                        # Update the ownership of files in result folder.
                        correct_results_folder_permission(results)
                else:
                    if collect_crashinfo:
                        # Update the ownership of files in result folder. If the
                        # job to collect crashinfo was running inside container
                        # (SSP) and crashed before correcting folder permission,
                        # the result folder might have wrong permission setting.
                        try:
                            correct_results_folder_permission(results)
                        except:
                            # Ignore any error as the user may not have root
                            # permission to run sudo command.
                            pass
                    metric_name = ('chromeos/autotest/experimental/'
                                   'autoserv_job_run_duration')
                    f = {
                        'in_container': utils.is_in_container(),
                        'success': False
                    }
                    with metrics.SecondsTimer(metric_name, fields=f) as c:
                        job.run(install_before,
                                install_after,
                                verify_job_repo_url=verify_job_repo_url,
                                only_collect_crashinfo=collect_crashinfo,
                                skip_crash_collection=skip_crash_collection,
                                job_labels=job_labels,
                                use_packaging=(not no_use_packaging))
                        c['success'] = True

        finally:
            while job.hosts:
                host = job.hosts.pop()
                host.close()
    except:
        exit_code = 1
        traceback.print_exc()
    finally:
        metrics.Flush()

    if pid_file_manager:
        pid_file_manager.num_tests_failed = job.num_tests_failed
        pid_file_manager.close_file(exit_code)
    job.cleanup_parser()

    sys.exit(exit_code)