Ejemplo n.º 1
0
    def refresh(self, pidfile_paths):
        """
        pidfile_paths should be a list of paths to check for pidfiles.

        Returns a dict containing:
        * pidfiles: dict mapping pidfile paths to file contents, for pidfiles
        that exist.
        * autoserv_processes: list of dicts corresponding to running autoserv
        processes.  each dict contain pid, pgid, ppid, comm, and args (see
        "man ps" for details).
        * parse_processes: likewise, for parse processes.
        * pidfiles_second_read: same info as pidfiles, but gathered after the
        processes are scanned.
        """
        site_check_parse = utils.import_site_function(
            __file__, 'autotest_lib.scheduler.site_drone_utility',
            'check_parse', lambda x: False)
        results = {
            'pidfiles':
            self._read_pidfiles(pidfile_paths),
            # element 0 of _get_process_info() is the headers from `ps`
            'all_processes':
            list(self._get_process_info())[1:],
            'autoserv_processes':
            self._refresh_processes('autoserv'),
            'parse_processes':
            self._refresh_processes('parse',
                                    site_check_parse=site_check_parse),
            'pidfiles_second_read':
            self._read_pidfiles(pidfile_paths),
        }
        return results
Ejemplo n.º 2
0
def wrap_control_file(control_file, is_server, skip_verify,
                      verify_params=None, **kwargs):
    """
    Wraps a control file for use with Test Planner
    """
    wrapped = ''

    if not skip_verify:
        prepared_args = prepare_args(verify_params)
        wrapped += apply_string_arguments(VERIFY_TEST_SEGMENT,
                                          verify_args=prepared_args)

    site_generate_additional_segments = utils.import_site_function(
            __file__, 'autotest_lib.frontend.planner.site_control_file',
            'generate_additional_segments', _generate_additional_segments_dummy)
    wrapped += site_generate_additional_segments(**kwargs)

    if is_server:
        wrapped += apply_string_arguments(SERVER_SEGMENT,
                                          control_raw=control_file)
    else:
        control_base64 = base64.encodestring(control_file)
        control_comment = '\n'.join('# ' + l for l in control_file.split('\n'))
        wrapped += apply_string_arguments(CLIENT_SEGMENT,
                                          control_base64=control_base64,
                                          control_comment=control_comment)

    return wrapped
Ejemplo n.º 3
0
def wrap_control_file(control_file,
                      is_server,
                      skip_verify,
                      verify_params=None,
                      **kwargs):
    """
    Wraps a control file for use with Test Planner
    """
    wrapped = ''

    if not skip_verify:
        prepared_args = prepare_args(verify_params)
        wrapped += apply_string_arguments(VERIFY_TEST_SEGMENT,
                                          verify_args=prepared_args)

    site_generate_additional_segments = utils.import_site_function(
        __file__, 'autotest_lib.frontend.planner.site_control_file',
        'generate_additional_segments', _generate_additional_segments_dummy)
    wrapped += site_generate_additional_segments(**kwargs)

    if is_server:
        wrapped += apply_string_arguments(SERVER_SEGMENT,
                                          control_raw=control_file)
    else:
        control_base64 = base64.encodestring(control_file)
        control_comment = '\n'.join('# ' + l for l in control_file.split('\n'))
        wrapped += apply_string_arguments(CLIENT_SEGMENT,
                                          control_base64=control_base64,
                                          control_comment=control_comment)

    return wrapped
Ejemplo n.º 4
0
    def refresh(self, pidfile_paths):
        """
        pidfile_paths should be a list of paths to check for pidfiles.

        Returns a dict containing:
        * pidfiles: dict mapping pidfile paths to file contents, for pidfiles
        that exist.
        * autoserv_processes: list of dicts corresponding to running autoserv
        processes.  each dict contain pid, pgid, ppid, comm, and args (see
        "man ps" for details).
        * parse_processes: likewise, for parse processes.
        * pidfiles_second_read: same info as pidfiles, but gathered after the
        processes are scanned.
        """
        site_check_parse = utils.import_site_function(
                __file__, 'autotest_lib.scheduler.site_drone_utility',
                'check_parse', lambda x: False)
        results = {
            'pidfiles' : self._read_pidfiles(pidfile_paths),
            'autoserv_processes' : self._refresh_processes('autoserv'),
            'parse_processes' : self._refresh_processes(
                    'parse', site_check_parse=site_check_parse),
            'pidfiles_second_read' : self._read_pidfiles(pidfile_paths),
        }
        return results
Ejemplo n.º 5
0
def main():
    options, args = parse_args()
    results_dir = os.path.abspath(args[0])
    assert os.path.exists(results_dir)

    pid_file_manager = pidfile.PidFileManager("parser", results_dir)

    if options.write_pidfile:
        pid_file_manager.open_file()

    site_post_parse_job = utils.import_site_function(__file__,
        "autotest_lib.tko.site_parse", "site_post_parse_job",
        _site_post_parse_job_dummy)

    try:
        # build up the list of job dirs to parse
        if options.singledir:
            jobs_list = [results_dir]
        else:
            jobs_list = [os.path.join(results_dir, subdir)
                         for subdir in os.listdir(results_dir)]

        # build up the database
        db = tko_db.db(autocommit=False, host=options.db_host,
                       user=options.db_user, password=options.db_pass,
                       database=options.db_name)

        # parse all the jobs
        for path in jobs_list:
            lockfile = open(os.path.join(path, ".parse.lock"), "w")
            flags = fcntl.LOCK_EX
            if options.noblock:
                flags |= fcntl.LOCK_NB
            try:
                fcntl.flock(lockfile, flags)
            except IOError, e:
                # lock is not available and nonblock has been requested
                if e.errno == errno.EWOULDBLOCK:
                    lockfile.close()
                    continue
                else:
                    raise # something unexpected happened
            try:
                parse_path(db, path, options.level, options.reparse,
                           options.mailit)

            finally:
                fcntl.flock(lockfile, fcntl.LOCK_UN)
                lockfile.close()

        if options.site_do_post is True:
            site_post_parse_job(results_dir)
Ejemplo n.º 6
0
def wrap_control_file(plan, hostname, run_verify, test_config):
    """
    Wraps a control file using the ControlParameters for the plan
    """
    site_additional_wrap_arguments = utils.import_site_function(
            __file__, 'autotest_lib.frontend.planner.site_rpc_utils',
            'additional_wrap_arguments', _additional_wrap_arguments_dummy)
    additional_wrap_arguments = site_additional_wrap_arguments(plan, hostname)

    verify_params = get_wrap_arguments(
            plan, hostname, model_attributes.AdditionalParameterType.VERIFY)

    return control_file.wrap_control_file(
            control_file=test_config.control_file.contents,
            is_server=test_config.is_server,
            skip_verify=(not run_verify),
            verify_params=verify_params,
            **additional_wrap_arguments)
Ejemplo n.º 7
0
def wrap_control_file(plan, hostname, run_verify, test_config):
    """
    Wraps a control file using the ControlParameters for the plan
    """
    site_additional_wrap_arguments = utils.import_site_function(
        __file__, 'autotest_lib.frontend.planner.site_rpc_utils',
        'additional_wrap_arguments', _additional_wrap_arguments_dummy)
    additional_wrap_arguments = site_additional_wrap_arguments(plan, hostname)

    verify_params = get_wrap_arguments(
        plan, hostname, model_attributes.AdditionalParameterType.VERIFY)

    return control_file.wrap_control_file(
        control_file=test_config.control_file.contents,
        is_server=test_config.is_server,
        skip_verify=(not run_verify),
        verify_params=verify_params,
        **additional_wrap_arguments)
    def _do_request(self, method, uri, query_parameters, encoded_body):
        uri_parts = [uri]
        if query_parameters:
            if '?' in uri:
                uri_parts += '&'
            else:
                uri_parts += '?'
            uri_parts += urllib.urlencode(query_parameters, doseq=True)
        full_uri = ''.join(uri_parts)

        if encoded_body:
            entity_body = simplejson.dumps(encoded_body)
        else:
            entity_body = None

        logging.debug('%s %s', method, full_uri)
        if entity_body:
            logging.debug(entity_body)

        site_verify = utils.import_site_function(
            __file__, 'autotest_lib.frontend.shared.site_rest_client',
            'site_verify_response', _site_verify_response_default)
        headers, response_body = self._http.request(
            full_uri,
            method,
            body=entity_body,
            headers=_get_request_headers(uri))
        if not site_verify(headers, response_body):
            logging.debug(
                'Response verification failed, clearing headers and '
                'trying again:\n%s', response_body)
            _clear_request_headers(uri)
            headers, response_body = self._http.request(
                full_uri,
                method,
                body=entity_body,
                headers=_get_request_headers(uri))

        logging.debug('Response: %s', headers['status'])

        return Response(headers, response_body)
Ejemplo n.º 9
0
def _process_host_action(host, action):
    """
    Takes the specified action on the host
    """
    HostAction = failure_actions.HostAction
    if action not in HostAction.values:
        raise ValueError('Unexpected host action %s' % action)

    site_process = utils.import_site_function(
        __file__, 'autotest_lib.frontend.planner.site_rpc_utils',
        'site_process_host_action', _site_process_host_action_dummy)

    if not site_process(host, action):
        # site_process_host_action returns True and and only if it matched a
        # site-specific processing option
        if action == HostAction.BLOCK:
            host.blocked = True
        elif action == HostAction.UNBLOCK:
            host.blocked = False
        else:
            assert action == HostAction.REINSTALL
            raise NotImplemented('TODO: implement reinstall')

        host.save()
Ejemplo n.º 10
0
def _process_host_action(host, action):
    """
    Takes the specified action on the host
    """
    HostAction = failure_actions.HostAction
    if action not in HostAction.values:
        raise ValueError('Unexpected host action %s' % action)

    site_process = utils.import_site_function(
            __file__, 'autotest_lib.frontend.planner.site_rpc_utils',
            'site_process_host_action', _site_process_host_action_dummy)

    if not site_process(host, action):
        # site_process_host_action returns True and and only if it matched a
        # site-specific processing option
        if action == HostAction.BLOCK:
            host.blocked = True
        elif action == HostAction.UNBLOCK:
            host.blocked = False
        else:
            assert action == HostAction.REINSTALL
            raise NotImplemented('TODO: implement reinstall')

        host.save()
Ejemplo n.º 11
0
    def _do_request(self, method, uri, query_parameters, encoded_body):
        uri_parts = [uri]
        if query_parameters:
            if '?' in uri:
                uri_parts += '&'
            else:
                uri_parts += '?'
            uri_parts += urllib.urlencode(query_parameters, doseq=True)
        full_uri = ''.join(uri_parts)

        if encoded_body:
            entity_body = simplejson.dumps(encoded_body)
        else:
            entity_body = None

        logging.debug('%s %s', method, full_uri)
        if entity_body:
            logging.debug(entity_body)

        site_verify = utils.import_site_function(
                __file__, 'autotest_lib.frontend.shared.site_rest_client',
                'site_verify_response', _site_verify_response_default)
        headers, response_body = self._http.request(
                full_uri, method, body=entity_body,
                headers=_get_request_headers(uri))
        if not site_verify(headers, response_body):
            logging.debug('Response verification failed, clearing headers and '
                          'trying again:\n%s', response_body)
            _clear_request_headers(uri)
            headers, response_body = _http.request(
                full_uri, method, body=entity_body,
                headers=_get_request_headers(uri))

        logging.debug('Response: %s', headers['status'])

        return Response(headers, response_body)
Ejemplo n.º 12
0
import common
from autotest_lib.client.common_lib import enum, utils


# common enums for Host attributes
HostStatus = enum.Enum('Finished', 'Running', 'Blocked', string_values=True)


# common enums for TestRun attributes
TestRunStatus = enum.Enum('Active', 'Passed', 'Failed', string_values=True)


# common enums for SavedObject attributes
SavedObjectType = enum.Enum('support', 'triage', 'autoprocess', 'custom_query',
                            string_values=True)


# common enums for AdditionalParameter attributes
def _site_additional_parameter_types_dummy():
    return []
_site_additional_parameter_types = utils.import_site_function(
        __file__, 'autotest_lib.frontend.planner.site_model_attributes',
        'site_additional_parameter_types',
        _site_additional_parameter_types_dummy)
AdditionalParameterType = enum.Enum(
        string_values=True,
        *(_site_additional_parameter_types() + ['Verify']))
Ejemplo n.º 13
0
        for start, end in disabled_intervals:
            if timestamp >= start and (end is None or timestamp < end):
                return False
        return True

    def disable_warnings(self, warning_type, current_time_func=time.time):
        """As of now, disables all further warnings of this type."""
        intervals = self.disabled_warnings.setdefault(warning_type, [])
        if not intervals or intervals[-1][1] is not None:
            intervals.append((int(current_time_func()), None))

    def enable_warnings(self, warning_type, current_time_func=time.time):
        """As of now, enables all further warnings of this type."""
        intervals = self.disabled_warnings.get(warning_type, [])
        if intervals and intervals[-1][1] is None:
            intervals[-1] = (intervals[-1][0], int(current_time_func()))


# load up site-specific code for generating site-specific job data
get_site_job_data = utils.import_site_function(
    __file__, "autotest_lib.server.site_server_job", "get_site_job_data",
    _get_site_job_data_dummy)

site_server_job = utils.import_site_class(
    __file__, "autotest_lib.server.site_server_job", "site_server_job",
    base_server_job)


class server_job(site_server_job):
    pass
Ejemplo n.º 14
0
"""
Autotest scheduling utility.
"""

import logging

from autotest_lib.client.common_lib import global_config, utils
from autotest_lib.frontend.afe import models
from autotest_lib.scheduler import metahost_scheduler, scheduler_config
from autotest_lib.scheduler import scheduler_models

get_site_metahost_schedulers = utils.import_site_function(
    __file__, 'autotest_lib.scheduler.site_metahost_scheduler',
    'get_metahost_schedulers', lambda: ())


class SchedulerError(Exception):
    """Raised by HostScheduler when an inconsistent state occurs."""


class BaseHostScheduler(metahost_scheduler.HostSchedulingUtility):
    """Handles the logic for choosing when to run jobs and on which hosts.

    This class makes several queries to the database on each tick, building up
    some auxiliary data structures and using them to determine which hosts are
    eligible to run which jobs, taking into account all the various factors that
    affect that.

    In the past this was done with one or two very large, complex database
    queries.  It has proven much simpler and faster to build these auxiliary
    data structures and perform the logic in Python.
Ejemplo n.º 15
0
from autotest_lib.client.common_lib import utils


class AuthError(Exception):
    pass


def get_proxy(*args, **kwargs):
    """Use this to access the AFE or TKO RPC interfaces."""
    return proxy.ServiceProxy(*args, **kwargs)


def _base_authorization_headers(username, server):
    """
    Don't call this directly, call authorization_headers().
    This implementation may be overridden by site code.

    @returns A dictionary of authorization headers to pass in to get_proxy().
    """
    if not username:
        if 'AUTOTEST_USER' in os.environ:
            username = os.environ['AUTOTEST_USER']
        else:
            username = getpass.getuser()
    return {'AUTHORIZATION': username}


authorization_headers = utils.import_site_function(
    __file__, 'autotest_lib.frontend.afe.site_rpc_client_lib',
    'authorization_headers', _base_authorization_headers)
Ejemplo n.º 16
0
import common
from autotest_lib.client.common_lib import enum, utils


def _site_host_actions_dummy():
    return []

_site_host_actions = utils.import_site_function(
        __file__, 'autotest_lib.frontend.planner.site_failure_actions',
        'site_host_actions', _site_host_actions_dummy)

HostAction = enum.Enum(
        string_values=True,
        *(_site_host_actions() + ['Block', 'Unblock', 'Reinstall']))


TestAction = enum.Enum('Skip', 'Rerun', string_values=True)
Ejemplo n.º 17
0
def parse_one(db, jobname, path, parse_options):
    """Parse a single job. Optionally send email on failure.

    @param db: database object.
    @param jobname: the tag used to search for existing job in db,
                    e.g. '1234-chromeos-test/host1'
    @param path: The path to the results to be parsed.
    @param parse_options: _ParseOptions instance.
    """
    reparse = parse_options.reparse
    mail_on_failure = parse_options.mail_on_failure
    dry_run = parse_options.dry_run
    suite_report = parse_options.suite_report
    datastore_creds = parse_options.datastore_creds
    export_to_gcloud_path = parse_options.export_to_gcloud_path

    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = parser_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        if not dry_run:
            for test_idx in old_tests.itervalues():
                where = {'test_idx': test_idx}
                db.delete('tko_iteration_result', where)
                db.delete('tko_iteration_perf_value', where)
                db.delete('tko_iteration_attributes', where)
                db.delete('tko_test_attributes', where)
                db.delete('tko_test_labels_tests', {'test_id': test_idx})
                db.delete('tko_tests', where)

    job.build = None
    job.board = None
    job.build_version = None
    job.suite = None
    if job.label:
        label_info = site_utils.parse_job_name(job.label)
        if label_info:
            job.build = label_info.get('build', None)
            job.build_version = label_info.get('build_version', None)
            job.board = label_info.get('board', None)
            job.suite = label_info.get('suite', None)

    # Upload job details to Sponge.
    if not dry_run:
        sponge_url = sponge_utils.upload_results(job, log=tko_utils.dprint)
        if sponge_url:
            job.keyval_dict['sponge_url'] = sponge_url

    # check for failures
    message_lines = [""]
    job_successful = True
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s" %
                         (test.subdir, test.status, test.reason))
        if test.status != 'GOOD':
            job_successful = False
            message_lines.append(
                format_failure_message(jobname, test.kernel.base, test.subdir,
                                       test.status, test.reason))
    try:
        message = "\n".join(message_lines)

        if not dry_run:
            # send out a email report of failure
            if len(message) > 2 and mail_on_failure:
                tko_utils.dprint(
                    "Sending email report of failure on %s to %s" %
                    (jobname, job.user))
                mailfailure(jobname, job, message)

            # write the job into the database.
            job_data = db.insert_job(jobname,
                                     job,
                                     parent_job_id=job_keyval.get(
                                         constants.PARENT_JOB_ID, None))

            # Upload perf values to the perf dashboard, if applicable.
            for test in job.tests:
                perf_uploader.upload_test(job, test, jobname)

            # Although the cursor has autocommit, we still need to force it to
            # commit existing changes before we can use django models, otherwise
            # it will go into deadlock when django models try to start a new
            # trasaction while the current one has not finished yet.
            db.commit()

            # Handle retry job.
            orig_afe_job_id = job_keyval.get(constants.RETRY_ORIGINAL_JOB_ID,
                                             None)
            if orig_afe_job_id:
                orig_job_idx = tko_models.Job.objects.get(
                    afe_job_id=orig_afe_job_id).job_idx
                _invalidate_original_tests(orig_job_idx, job.index)
    except Exception as e:
        metadata = {
            'path': path,
            'error': str(e),
            'details': traceback.format_exc()
        }
        tko_utils.dprint("Hit exception while uploading to tko db:\n%s" %
                         traceback.format_exc())
        autotest_es.post(use_http=True,
                         type_str='parse_failure',
                         metadata=metadata)
        raise e

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    if not dry_run:
        db.commit()

    # Generate a suite report.
    # Check whether this is a suite job, a suite job will be a hostless job, its
    # jobname will be <JOB_ID>-<USERNAME>/hostless, the suite field will not be
    # NULL. Only generate timeline report when datastore_parent_key is given.
    try:
        datastore_parent_key = job_keyval.get('datastore_parent_key', None)
        if (suite_report and jobname.endswith('/hostless')
                and job_data['suite'] and datastore_parent_key):
            tko_utils.dprint('Start dumping suite timing report...')
            timing_log = os.path.join(path, 'suite_timing.log')
            dump_cmd = (
                "%s/site_utils/dump_suite_report.py %s "
                "--output='%s' --debug" %
                (common.autotest_dir, job_data['afe_job_id'], timing_log))
            subprocess.check_output(dump_cmd, shell=True)
            tko_utils.dprint('Successfully finish dumping suite timing report')

            if (datastore_creds and export_to_gcloud_path
                    and os.path.exists(export_to_gcloud_path)):
                upload_cmd = [
                    export_to_gcloud_path, datastore_creds, timing_log,
                    '--parent_key',
                    repr(tuple(datastore_parent_key))
                ]
                tko_utils.dprint('Start exporting timeline report to gcloud')
                subprocess.check_output(upload_cmd)
                tko_utils.dprint('Successfully export timeline report to '
                                 'gcloud')
            else:
                tko_utils.dprint('DEBUG: skip exporting suite timeline to '
                                 'gcloud, because either gcloud creds or '
                                 'export_to_gcloud script is not found.')
    except Exception as e:
        tko_utils.dprint("WARNING: fail to dump/export suite report. "
                         "Error:\n%s" % e)

    # Mark GS_OFFLOADER_NO_OFFLOAD in gs_offloader_instructions at the end of
    # the function, so any failure, e.g., db connection error, will stop
    # gs_offloader_instructions being updated, and logs can be uploaded for
    # troubleshooting.
    if job_successful:
        # Check if we should not offload this test's results.
        if job_keyval.get(constants.JOB_OFFLOAD_FAILURES_KEY, False):
            # Update the gs_offloader_instructions json file.
            gs_instructions_file = os.path.join(
                path, constants.GS_OFFLOADER_INSTRUCTIONS)
            gs_offloader_instructions = {}
            if os.path.exists(gs_instructions_file):
                with open(gs_instructions_file, 'r') as f:
                    gs_offloader_instructions = json.load(f)

            gs_offloader_instructions[constants.GS_OFFLOADER_NO_OFFLOAD] = True
            with open(gs_instructions_file, 'w') as f:
                json.dump(gs_offloader_instructions, f)
Ejemplo n.º 18
0
import common
from autotest_lib.client.common_lib import enum, utils


def _site_host_actions_dummy():
    return []


_site_host_actions = utils.import_site_function(
    __file__, 'autotest_lib.frontend.planner.site_failure_actions',
    'site_host_actions', _site_host_actions_dummy)

HostAction = enum.Enum(string_values=True,
                       *(_site_host_actions() +
                         ['Block', 'Unblock', 'Reinstall']))

TestAction = enum.Enum('Skip', 'Rerun', string_values=True)
Ejemplo n.º 19
0
            if timestamp >= start and (end is None or timestamp < end):
                return False
        return True

    def disable_warnings(self, warning_type, current_time_func=time.time):
        """As of now, disables all further warnings of this type."""
        intervals = self.disabled_warnings.setdefault(warning_type, [])
        if not intervals or intervals[-1][1] is not None:
            intervals.append((int(current_time_func()), None))

    def enable_warnings(self, warning_type, current_time_func=time.time):
        """As of now, enables all further warnings of this type."""
        intervals = self.disabled_warnings.get(warning_type, [])
        if intervals and intervals[-1][1] is None:
            intervals[-1] = (intervals[-1][0], int(current_time_func()))


# load up site-specific code for generating site-specific job data
get_site_job_data = utils.import_site_function(
    __file__, "autotest_lib.server.site_server_job", "get_site_job_data", _get_site_job_data_dummy
)


site_server_job = utils.import_site_class(
    __file__, "autotest_lib.server.site_server_job", "site_server_job", base_server_job
)


class server_job(site_server_job):
    pass
Ejemplo n.º 20
0
import common
from autotest_lib.client.common_lib import enum, utils

# common enums for Host attributes
HostStatus = enum.Enum('Finished', 'Running', 'Blocked', string_values=True)

# common enums for TestRun attributes
TestRunStatus = enum.Enum('Active', 'Passed', 'Failed', string_values=True)

# common enums for SavedObject attributes
SavedObjectType = enum.Enum('support',
                            'triage',
                            'autoprocess',
                            'custom_query',
                            string_values=True)


# common enums for AdditionalParameter attributes
def _site_additional_parameter_types_dummy():
    return []


_site_additional_parameter_types = utils.import_site_function(
    __file__, 'autotest_lib.frontend.planner.site_model_attributes',
    'site_additional_parameter_types', _site_additional_parameter_types_dummy)
AdditionalParameterType = enum.Enum(string_values=True,
                                    *(_site_additional_parameter_types() +
                                      ['Verify']))
Ejemplo n.º 21
0
def parse_one(db, jobname, path, reparse, mail_on_failure):
    """Parse a single job. Optionally send email on failure.

    @param db: database object.
    @param jobname: the tag used to search for existing job in db,
                    e.g. '1234-chromeos-test/host1'
    @param path: The path to the results to be parsed.
    @param reparse: True/False, whether this is reparsing of the job.
    @param mail_on_failure: whether to send email on FAILED test.


    """
    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = status_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        for test_idx in old_tests.itervalues():
            where = {'test_idx': test_idx}
            db.delete('tko_iteration_result', where)
            db.delete('tko_iteration_perf_value', where)
            db.delete('tko_iteration_attributes', where)
            db.delete('tko_test_attributes', where)
            db.delete('tko_test_labels_tests', {'test_id': test_idx})
            db.delete('tko_tests', where)

    # check for failures
    message_lines = [""]
    job_successful = True
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s" %
                         (test.subdir, test.status, test.reason))
        if test.status != 'GOOD':
            job_successful = False
            message_lines.append(
                format_failure_message(jobname, test.kernel.base, test.subdir,
                                       test.status, test.reason))

    message = "\n".join(message_lines)

    # send out a email report of failure
    if len(message) > 2 and mail_on_failure:
        tko_utils.dprint("Sending email report of failure on %s to %s" %
                         (jobname, job.user))
        mailfailure(jobname, job, message)

    # write the job into the database.
    db.insert_job(jobname,
                  job,
                  parent_job_id=job_keyval.get(constants.PARENT_JOB_ID, None))

    # Upload perf values to the perf dashboard, if applicable.
    for test in job.tests:
        perf_uploader.upload_test(job, test)

    # Although the cursor has autocommit, we still need to force it to commit
    # existing changes before we can use django models, otherwise it
    # will go into deadlock when django models try to start a new trasaction
    # while the current one has not finished yet.
    db.commit()

    # Handle retry job.
    orig_afe_job_id = job_keyval.get(constants.RETRY_ORIGINAL_JOB_ID, None)
    if orig_afe_job_id:
        orig_job_idx = tko_models.Job.objects.get(
            afe_job_id=orig_afe_job_id).job_idx
        _invalidate_original_tests(orig_job_idx, job.index)

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    db.commit()

    # Mark GS_OFFLOADER_NO_OFFLOAD in gs_offloader_instructions at the end of
    # the function, so any failure, e.g., db connection error, will stop
    # gs_offloader_instructions being updated, and logs can be uploaded for
    # troubleshooting.
    if job_successful:
        # Check if we should not offload this test's results.
        if job_keyval.get(constants.JOB_OFFLOAD_FAILURES_KEY, False):
            # Update the gs_offloader_instructions json file.
            gs_instructions_file = os.path.join(
                path, constants.GS_OFFLOADER_INSTRUCTIONS)
            gs_offloader_instructions = {}
            if os.path.exists(gs_instructions_file):
                with open(gs_instructions_file, 'r') as f:
                    gs_offloader_instructions = json.load(f)

            gs_offloader_instructions[constants.GS_OFFLOADER_NO_OFFLOAD] = True
            with open(gs_instructions_file, 'w') as f:
                json.dump(gs_offloader_instructions, f)
Ejemplo n.º 22
0
from autotest_lib.client.common_lib import utils


class AuthError(Exception):
    pass


def get_proxy(*args, **kwargs):
    """Use this to access the AFE or TKO RPC interfaces."""
    return proxy.ServiceProxy(*args, **kwargs)


def _base_authorization_headers(username, server):
    """
    Don't call this directly, call authorization_headers().
    This implementation may be overridden by site code.

    @returns A dictionary of authorization headers to pass in to get_proxy().
    """
    if not username:
        if 'AUTOTEST_USER' in os.environ:
            username = os.environ['AUTOTEST_USER']
        else:
            username = getpass.getuser()
    return {'AUTHORIZATION' : username}


authorization_headers = utils.import_site_function(
        __file__, 'autotest_lib.frontend.afe.site_rpc_client_lib',
        'authorization_headers', _base_authorization_headers)
Ejemplo n.º 23
0
def parse_one(db, jobname, path, reparse, mail_on_failure):
    """
    Parse a single job. Optionally send email on failure.
    """
    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = status_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        for test_idx in old_tests.itervalues():
            where = {'test_idx' : test_idx}
            db.delete('tko_iteration_result', where)
            db.delete('tko_iteration_attributes', where)
            db.delete('tko_test_attributes', where)
            db.delete('tko_test_labels_tests', {'test_id': test_idx})
            db.delete('tko_tests', where)

    # check for failures
    message_lines = [""]
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s"
                         % (test.subdir, test.status, test.reason))
        if test.status in ("FAIL", "WARN"):
            message_lines.append(format_failure_message(
                jobname, test.kernel.base, test.subdir,
                test.status, test.reason))
    message = "\n".join(message_lines)

    # send out a email report of failure
    if len(message) > 2 and mail_on_failure:
        tko_utils.dprint("Sending email report of failure on %s to %s"
                         % (jobname, job.user))
        mailfailure(jobname, job, message)

    # write the job into the database
    db.insert_job(jobname, job)

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    db.commit()
Ejemplo n.º 24
0
"""
Autotest scheduling utility.
"""


import logging

from autotest_lib.client.common_lib import global_config, utils
from autotest_lib.frontend.afe import models
from autotest_lib.scheduler import metahost_scheduler, scheduler_config
from autotest_lib.scheduler import scheduler_models


get_site_metahost_schedulers = utils.import_site_function(
        __file__, 'autotest_lib.scheduler.site_metahost_scheduler',
        'get_metahost_schedulers', lambda : ())


class SchedulerError(Exception):
    """Raised by HostScheduler when an inconsistent state occurs."""


class BaseHostScheduler(metahost_scheduler.HostSchedulingUtility):
    """Handles the logic for choosing when to run jobs and on which hosts.

    This class makes several queries to the database on each tick, building up
    some auxiliary data structures and using them to determine which hosts are
    eligible to run which jobs, taking into account all the various factors that
    affect that.

    In the past this was done with one or two very large, complex database
Ejemplo n.º 25
0
def parse_one(db, jobname, path, reparse, mail_on_failure):
    """
    Parse a single job. Optionally send email on failure.
    """
    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = status_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        for test_idx in old_tests.itervalues():
            where = {'test_idx' : test_idx}
            db.delete('tko_iteration_result', where)
            db.delete('tko_iteration_attributes', where)
            db.delete('tko_test_attributes', where)
            db.delete('tko_test_labels_tests', {'test_id': test_idx})
            db.delete('tko_tests', where)

    # check for failures
    message_lines = [""]
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s"
                         % (test.subdir, test.status, test.reason))
        if test.status in ("FAIL", "WARN"):
            message_lines.append(format_failure_message(
                jobname, test.kernel.base, test.subdir,
                test.status, test.reason))
    message = "\n".join(message_lines)

    # send out a email report of failure
    if len(message) > 2 and mail_on_failure:
        tko_utils.dprint("Sending email report of failure on %s to %s"
                         % (jobname, job.user))
        mailfailure(jobname, job, message)

    # write the job into the database
    db.insert_job(jobname, job)

    # Serializing job into a binary file
    try:
        from autotest_lib.tko import tko_pb2
        from autotest_lib.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest_lib.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    db.commit()