Beispiel #1
0
def main():
    options, args = parse_args()
    results_dir = os.path.abspath(args[0])
    assert os.path.exists(results_dir)

    pid_file_manager = pidfile.PidFileManager("parser", results_dir)

    if options.write_pidfile:
        pid_file_manager.open_file()

    try:
        # build up the list of job dirs to parse
        if options.singledir:
            jobs_list = [results_dir]
        else:
            jobs_list = [
                os.path.join(results_dir, subdir)
                for subdir in os.listdir(results_dir)
            ]

        # build up the database
        db = tko_db.db(autocommit=False,
                       host=options.db_host,
                       user=options.db_user,
                       password=options.db_pass,
                       database=options.db_name)

        # parse all the jobs
        for path in jobs_list:
            lockfile = open(os.path.join(path, ".parse.lock"), "w")
            flags = fcntl.LOCK_EX
            if options.noblock:
                flags |= fcntl.LOCK_NB
            try:
                fcntl.flock(lockfile, flags)
            except IOError, e:
                # lock is not available and nonblock has been requested
                if e.errno == errno.EWOULDBLOCK:
                    lockfile.close()
                    continue
                else:
                    raise  # something unexpected happened
            try:
                parse_path(db, path, options.level, options.reparse,
                           options.mailit)

            finally:
                fcntl.flock(lockfile, fcntl.LOCK_UN)
                lockfile.close()

    except:
        pid_file_manager.close_file(1)
        raise
    else:
        pid_file_manager.close_file(0)
Beispiel #2
0
def main():
    start_time = datetime.datetime.now()
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    if parser.options.require_ssp:
        # This is currently only used for skylab (i.e., when --control-name is
        # used).
        use_ssp = _require_ssp_from_control(parser.options.control_name)
    else:
        use_ssp = False

    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))
    logging.debug('autoserv parsed options: %s', parser.options)

    if use_ssp:
        ssp_url = _stage_ssp(parser, results)
    else:
        ssp_url = None

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.Autotest.set_install_in_tmpdir(parser.options.install_in_tmpdir)

    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)

    trace_labels = {
        'job_id': job_directories.get_job_id_or_task_id(parser.options.results)
    }
    trace = cloud_trace.SpanStack(
        labels=trace_labels, global_context=parser.options.cloud_trace_context)
    trace.enabled = parser.options.cloud_trace_context_enabled == 'True'
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker('unknown-test',
                                                 parser.options.results,
                                                 machine).mock_results()
                return
            else:
                with trace.Span(get_job_status(parser.options)):
                    run_autoserv(pid_file_manager, results, parser, ssp_url,
                                 use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception('Uncaught SystemExit with code %s',
                                  exit_code)
        except Exception:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception('Uncaught Exception, exit_code = 1.')
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
    sys.exit(exit_code)
Beispiel #3
0
def main():
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=results,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)
    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if parser.options.use_existing_results and not resultdir_exists:
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    if parser.options.write_pidfile:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest_remote.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    exit_code = 0
    try:
        try:
            run_autoserv(pid_file_manager, results, parser)
        except SystemExit, e:
            exit_code = e.code
        except:
            traceback.print_exc()
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            exit_code = 1
def main():
    start_time = datetime.datetime.now()
    # grab the parser
    parser = autoserv_parser.autoserv_parser
    parser.parse_args()

    if len(sys.argv) == 1:
        parser.parser.print_help()
        sys.exit(1)

    # If the job requires to run with server-side package, try to stage server-
    # side package first. If that fails with error that autotest server package
    # does not exist, fall back to run the job without using server-side
    # packaging. If option warn_no_ssp is specified, that means autoserv is
    # running in a drone does not support SSP, thus no need to stage server-side
    # package.
    ssp_url = None
    ssp_url_warning = False
    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
        ssp_url, ssp_error_msg = _stage_ssp(parser)
        # The build does not have autotest server package. Fall back to not
        # to use server-side package. Logging is postponed until logging being
        # set up.
        ssp_url_warning = not ssp_url

    if parser.options.no_logging:
        results = None
    else:
        results = parser.options.results
        if not results:
            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
        results = os.path.abspath(results)
        resultdir_exists = False
        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
            if os.path.exists(os.path.join(results, filename)):
                resultdir_exists = True
        if not parser.options.use_existing_results and resultdir_exists:
            error = "Error: results directory already exists: %s\n" % results
            sys.stderr.write(error)
            sys.exit(1)

        # Now that we certified that there's no leftover results dir from
        # previous jobs, lets create the result dir since the logging system
        # needs to create the log file in there.
        if not os.path.isdir(results):
            os.makedirs(results)

    # Server-side packaging will only be used if it's required and the package
    # is available. If warn_no_ssp is specified, it means that autoserv is
    # running in a drone does not have SSP supported and a warning will be logs.
    # Therefore, it should not run with SSP.
    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
               and ssp_url)
    if use_ssp:
        log_dir = os.path.join(results, 'ssp_logs') if results else None
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    else:
        log_dir = results

    logging_manager.configure_logging(
        server_logging_config.ServerLoggingConfig(),
        results_dir=log_dir,
        use_console=not parser.options.no_tee,
        verbose=parser.options.verbose,
        no_console_prefix=parser.options.no_console_prefix)

    if ssp_url_warning:
        logging.warn(
            'Autoserv is required to run with server-side packaging. '
            'However, no server-side package can be found based on '
            '`--image`, host attribute job_repo_url or host OS version '
            'label. It could be that the build to test is older than the '
            'minimum version that supports server-side packaging. The test '
            'will be executed without using erver-side packaging. '
            'Following is the detailed error:\n%s', ssp_error_msg)

    if results:
        logging.info("Results placed in %s" % results)

        # wait until now to perform this check, so it get properly logged
        if (parser.options.use_existing_results and not resultdir_exists
                and not utils.is_in_container()):
            logging.error("No existing results directory found: %s", results)
            sys.exit(1)

    logging.debug('autoserv is running in drone %s.', socket.gethostname())
    logging.debug('autoserv command was: %s', ' '.join(sys.argv))

    if parser.options.write_pidfile and results:
        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
                                                  results)
        pid_file_manager.open_file()
    else:
        pid_file_manager = None

    autotest.BaseAutotest.set_install_in_tmpdir(
        parser.options.install_in_tmpdir)

    try:
        # Take the first argument as control file name, get the test name from
        # the control file.
        if (len(parser.args) > 0 and parser.args[0] != ''
                and parser.options.machines):
            try:
                test_name = control_data.parse_control(
                    parser.args[0], raise_warnings=True).name
            except control_data.ControlVariableException:
                logging.debug(
                    'Failed to retrieve test name from control file.')
                test_name = None
    except control_data.ControlVariableException as e:
        logging.error(str(e))
    exit_code = 0
    # TODO(beeps): Extend this to cover different failure modes.
    # Testing exceptions are matched against labels sent to autoserv. Eg,
    # to allow only the hostless job to run, specify
    # testing_exceptions: test_suite in the shadow_config. To allow both
    # the hostless job and dummy_Pass to run, specify
    # testing_exceptions: test_suite,dummy_Pass. You can figure out
    # what label autoserv is invoked with by looking through the logs of a test
    # for the autoserv command's -l option.
    testing_exceptions = _CONFIG.get_config_value('AUTOSERV',
                                                  'testing_exceptions',
                                                  type=list,
                                                  default=[])
    test_mode = _CONFIG.get_config_value('AUTOSERV',
                                         'testing_mode',
                                         type=bool,
                                         default=False)
    test_mode = (
        results_mocker and test_mode
        and not any([ex in parser.options.label for ex in testing_exceptions]))
    is_task = (parser.options.verify or parser.options.repair
               or parser.options.provision or parser.options.reset
               or parser.options.cleanup or parser.options.collect_crashinfo)
    try:
        try:
            if test_mode:
                # The parser doesn't run on tasks anyway, so we can just return
                # happy signals without faking results.
                if not is_task:
                    machine = parser.options.results.split('/')[-1]

                    # TODO(beeps): The proper way to do this would be to
                    # refactor job creation so we can invoke job.record
                    # directly. To do that one needs to pipe the test_name
                    # through run_autoserv and bail just before invoking
                    # the server job. See the comment in
                    # puppylab/results_mocker for more context.
                    results_mocker.ResultsMocker(
                        test_name if test_name else 'unknown-test',
                        parser.options.results, machine).mock_results()
                return
            else:
                run_autoserv(pid_file_manager, results, parser, ssp_url,
                             use_ssp)
        except SystemExit as e:
            exit_code = e.code
            if exit_code:
                logging.exception(e)
        except Exception as e:
            # If we don't know what happened, we'll classify it as
            # an 'abort' and return 1.
            logging.exception(e)
            exit_code = 1
    finally:
        if pid_file_manager:
            pid_file_manager.close_file(exit_code)
        # Record the autoserv duration time. Must be called
        # just before the system exits to ensure accuracy.
        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
        record_autoserv(parser.options, duration_secs)
    sys.exit(exit_code)
def main():
    """Main entrance."""
    start_time = datetime.datetime.now()
    # Record the processed jobs so that
    # we can send the duration of parsing to metadata db.
    processed_jobs = set()

    options, args = parse_args()
    parse_options = _ParseOptions(options.reparse, options.mailit,
                                  options.dry_run, options.suite_report,
                                  options.datastore_creds,
                                  options.export_to_gcloud_path)
    results_dir = os.path.abspath(args[0])
    assert os.path.exists(results_dir)

    pid_file_manager = pidfile.PidFileManager("parser", results_dir)

    if options.write_pidfile:
        pid_file_manager.open_file()

    try:
        # build up the list of job dirs to parse
        if options.singledir:
            jobs_list = [results_dir]
        else:
            jobs_list = [
                os.path.join(results_dir, subdir)
                for subdir in os.listdir(results_dir)
            ]

        # build up the database
        db = tko_db.db(autocommit=False,
                       host=options.db_host,
                       user=options.db_user,
                       password=options.db_pass,
                       database=options.db_name)

        # parse all the jobs
        for path in jobs_list:
            lockfile = open(os.path.join(path, ".parse.lock"), "w")
            flags = fcntl.LOCK_EX
            if options.noblock:
                flags |= fcntl.LOCK_NB
            try:
                fcntl.flock(lockfile, flags)
            except IOError, e:
                # lock is not available and nonblock has been requested
                if e.errno == errno.EWOULDBLOCK:
                    lockfile.close()
                    continue
                else:
                    raise  # something unexpected happened
            try:
                new_jobs = parse_path(db, path, options.level, parse_options)
                processed_jobs.update(new_jobs)

            finally:
                fcntl.flock(lockfile, fcntl.LOCK_UN)
                lockfile.close()

    except Exception as e:
        pid_file_manager.close_file(1)

        metadata = {
            'results_dir': results_dir,
            'error': str(e),
            'details': traceback.format_exc()
        }
        autotest_es.post(use_http=True,
                         type_str='parse_failure_final',
                         metadata=metadata)

        raise
    else:
        pid_file_manager.close_file(0)
    duration_secs = (datetime.datetime.now() - start_time).total_seconds()
    if options.record_duration:
        record_parsing(processed_jobs, duration_secs)
def _main_with_options(options, args):
    """Entry point with options parsed and metrics already set up."""
    # Record the processed jobs so that
    # we can send the duration of parsing to metadata db.
    processed_jobs = set()

    if options.detach:
        _detach_from_parent_process()

    parse_options = _ParseOptions(options.reparse, options.mailit,
                                  options.dry_run, options.suite_report,
                                  options.datastore_creds,
                                  options.export_to_gcloud_path)
    results_dir = os.path.abspath(args[0])
    assert os.path.exists(results_dir)

    pid_file_manager = pidfile.PidFileManager("parser", results_dir)

    if options.write_pidfile:
        pid_file_manager.open_file()

    try:
        # build up the list of job dirs to parse
        if options.singledir:
            jobs_list = [results_dir]
        else:
            jobs_list = [os.path.join(results_dir, subdir)
                         for subdir in os.listdir(results_dir)]

        # build up the database
        db = tko_db.db(autocommit=False, host=options.db_host,
                       user=options.db_user, password=options.db_pass,
                       database=options.db_name)

        # parse all the jobs
        for path in jobs_list:
            lockfile = open(os.path.join(path, ".parse.lock"), "w")
            flags = fcntl.LOCK_EX
            if options.noblock:
                flags |= fcntl.LOCK_NB
            try:
                fcntl.flock(lockfile, flags)
            except IOError, e:
                # lock is not available and nonblock has been requested
                if e.errno == errno.EWOULDBLOCK:
                    lockfile.close()
                    continue
                else:
                    raise # something unexpected happened
            try:
                new_jobs = parse_path(db, pid_file_manager, path, options.level,
                                      parse_options)
                processed_jobs.update(new_jobs)

            finally:
                fcntl.flock(lockfile, fcntl.LOCK_UN)
                lockfile.close()

    except Exception as e:
        pid_file_manager.close_file(1)
        raise
    else:
        pid_file_manager.close_file(0)