Пример #1
0
    def refresh(self, pidfile_paths):
        """
        pidfile_paths should be a list of paths to check for pidfiles.

        Returns a dict containing:
        * pidfiles: dict mapping pidfile paths to file contents, for pidfiles
        that exist.
        * autoserv_processes: list of dicts corresponding to running autoserv
        processes.  each dict contain pid, pgid, ppid, comm, and args (see
        "man ps" for details).
        * parse_processes: likewise, for parse processes.
        * pidfiles_second_read: same info as pidfiles, but gathered after the
        processes are scanned.
        """
        site_check_parse = utils.import_site_function(
            __file__, 'autotest.scheduler.site_drone_utility', 'check_parse',
            lambda x: False)
        results = {
            'pidfiles':
            self._read_pidfiles(pidfile_paths),
            'autoserv_processes':
            self._refresh_processes('autoserv'),
            'parse_processes':
            self._refresh_processes('parse',
                                    site_check_parse=site_check_parse),
            'pidfiles_second_read':
            self._read_pidfiles(pidfile_paths),
        }
        return results
Пример #2
0
    def refresh(self, pidfile_paths):
        """
        pidfile_paths should be a list of paths to check for pidfiles.

        Returns a dict containing:
        * pidfiles: dict mapping pidfile paths to file contents, for pidfiles
        that exist.
        * autoserv_processes: list of dicts corresponding to running autoserv
        processes.  each dict contain pid, pgid, ppid, comm, and args (see
        "man ps" for details).
        * parse_processes: likewise, for parse processes.
        * pidfiles_second_read: same info as pidfiles, but gathered after the
        processes are scanned.
        """
        site_check_parse = utils.import_site_function(
            __file__, 'autotest.scheduler.site_drone_utility',
            'check_parse', lambda x: False)
        results = {
            'pidfiles': self._read_pidfiles(pidfile_paths),
            'autoserv_processes': self._refresh_processes(['autoserv',
                                                           'autotest-remote']),
            'parse_processes': self._refresh_processes(
                'parse', site_check_parse=site_check_parse),
            'pidfiles_second_read': self._read_pidfiles(pidfile_paths),
        }
        return results
Пример #3
0
    def _do_request(self, method, uri, query_parameters, encoded_body):
        uri_parts = [uri]
        if query_parameters:
            if '?' in uri:
                uri_parts += '&'
            else:
                uri_parts += '?'
            uri_parts += urllib.urlencode(query_parameters, doseq=True)
        full_uri = ''.join(uri_parts)

        if encoded_body:
            entity_body = simplejson.dumps(encoded_body)
        else:
            entity_body = None

        logging.debug('%s %s', method, full_uri)
        if entity_body:
            logging.debug(entity_body)

        site_verify = utils.import_site_function(
            __file__, 'autotest.frontend.shared.site_rest_client',
            'site_verify_response', _site_verify_response_default)
        headers, response_body = self._http.request(
            full_uri,
            method,
            body=entity_body,
            headers=_get_request_headers(uri))
        if not site_verify(headers, response_body):
            logging.debug(
                'Response verification failed, clearing headers and '
                'trying again:\n%s', response_body)
            _clear_request_headers(uri)
            headers, response_body = self._http.request(
                full_uri,
                method,
                body=entity_body,
                headers=_get_request_headers(uri))

        logging.debug('Response: %s', headers['status'])

        return Response(headers, response_body)
Пример #4
0
    def _do_request(self, method, uri, query_parameters, encoded_body):
        uri_parts = [uri]
        if query_parameters:
            if '?' in uri:
                uri_parts += '&'
            else:
                uri_parts += '?'
            uri_parts += urllib.urlencode(query_parameters, doseq=True)
        full_uri = ''.join(uri_parts)

        if encoded_body:
            entity_body = simplejson.dumps(encoded_body)
        else:
            entity_body = None

        logging.debug('%s %s', method, full_uri)
        if entity_body:
            logging.debug(entity_body)

        site_verify = utils.import_site_function(
            __file__, 'autotest.frontend.shared.site_rest_client',
            'site_verify_response', _site_verify_response_default)
        headers, response_body = self._http.request(
            full_uri, method, body=entity_body,
            headers=_get_request_headers(uri))
        if not site_verify(headers, response_body):
            logging.debug('Response verification failed, clearing headers and '
                          'trying again:\n%s', response_body)
            _clear_request_headers(uri)
            headers, response_body = self._http.request(
                full_uri, method, body=entity_body,
                headers=_get_request_headers(uri))

        logging.debug('Response: %s', headers['status'])

        return Response(headers, response_body)
Пример #5
0
        return True


    def disable_warnings(self, warning_type, current_time_func=time.time):
        """As of now, disables all further warnings of this type."""
        intervals = self.disabled_warnings.setdefault(warning_type, [])
        if not intervals or intervals[-1][1] is not None:
            intervals.append((int(current_time_func()), None))


    def enable_warnings(self, warning_type, current_time_func=time.time):
        """As of now, enables all further warnings of this type."""
        intervals = self.disabled_warnings.get(warning_type, [])
        if intervals and intervals[-1][1] is None:
            intervals[-1] = (intervals[-1][0], int(current_time_func()))


# load up site-specific code for generating site-specific job data
get_site_job_data = utils.import_site_function(__file__,
    "autotest.server.site_server_job", "get_site_job_data",
    _get_site_job_data_dummy)


site_server_job = utils.import_site_class(
    __file__, "autotest.server.site_server_job", "site_server_job",
    base_server_job)


class server_job(site_server_job):
    pass
Пример #6
0
from autotest.client.shared import utils


class AuthError(Exception):
    pass


def get_proxy(*args, **kwargs):
    """Use this to access the AFE or TKO RPC interfaces."""
    return proxy.ServiceProxy(*args, **kwargs)


def _base_authorization_headers(username, server):
    """
    Don't call this directly, call authorization_headers().
    This implementation may be overridden by site code.

    @returns A dictionary of authorization headers to pass in to get_proxy().
    """
    if not username:
        if 'AUTOTEST_USER' in os.environ:
            username = os.environ['AUTOTEST_USER']
        else:
            username = getpass.getuser()
    return {'AUTHORIZATION' : username}


authorization_headers = utils.import_site_function(
        __file__, 'autotest.frontend.afe.site_rpc_client_lib',
        'authorization_headers', _base_authorization_headers)
Пример #7
0
from .json_rpc import proxy


class AuthError(Exception):
    pass


def get_proxy(*args, **kwargs):
    """Use this to access the AFE or TKO RPC interfaces."""
    return proxy.ServiceProxy(*args, **kwargs)


def _base_authorization_headers(username, server):
    """
    Don't call this directly, call authorization_headers().
    This implementation may be overridden by site code.

    :return: A dictionary of authorization headers to pass in to get_proxy().
    """
    if not username:
        if 'AUTOTEST_USER' in os.environ:
            username = os.environ['AUTOTEST_USER']
        else:
            username = getpass.getuser()
    return {'AUTHORIZATION': username}


authorization_headers = utils.import_site_function(
    __file__, 'autotest.frontend.afe.site_rpc_client_lib',
    'authorization_headers', _base_authorization_headers)
Пример #8
0
"""
Autotest scheduling utility.
"""
import logging

from autotest.client.shared import utils
from autotest.client.shared.settings import settings
from autotest.frontend.afe import models
from autotest.scheduler import metahost_scheduler, scheduler_config
from autotest.scheduler import scheduler_models


get_site_metahost_schedulers = utils.import_site_function(
    __file__, "autotest.scheduler.site_metahost_scheduler", "get_metahost_schedulers", lambda: ()
)


class SchedulerError(Exception):
    """Raised by HostScheduler when an inconsistent state occurs."""


class BaseHostScheduler(metahost_scheduler.HostSchedulingUtility):
    """Handles the logic for choosing when to run jobs and on which hosts.

    This class makes several queries to the database on each tick, building up
    some auxiliary data structures and using them to determine which hosts are
    eligible to run which jobs, taking into account all the various factors that
    affect that.

    In the past this was done with one or two very large, complex database
    queries.  It has proven much simpler and faster to build these auxiliary
Пример #9
0
"""
Autotest scheduling utility.
"""
import logging

from autotest.client.shared import utils
from autotest.client.shared.settings import settings
from autotest.frontend.afe import models
from autotest.scheduler import metahost_scheduler, scheduler_config
from autotest.scheduler import scheduler_models


get_site_metahost_schedulers = utils.import_site_function(
    __file__, 'autotest.scheduler.site_metahost_scheduler',
    'get_metahost_schedulers', lambda: ())


class SchedulerError(Exception):

    """Raised by HostScheduler when an inconsistent state occurs."""


class BaseHostScheduler(metahost_scheduler.HostSchedulingUtility):

    """Handles the logic for choosing when to run jobs and on which hosts.

    This class makes several queries to the database on each tick, building up
    some auxiliary data structures and using them to determine which hosts are
    eligible to run which jobs, taking into account all the various factors that
    affect that.
Пример #10
0
"""
Autotest scheduling utility.
"""
import logging

from autotest.client.shared import utils
from autotest.client.shared.settings import settings
from autotest.frontend.afe import models
from autotest.scheduler import metahost_scheduler, scheduler_config
from autotest.scheduler import scheduler_models


get_site_metahost_schedulers = utils.import_site_function(
    __file__, 'autotest.scheduler.site_metahost_scheduler',
    'get_metahost_schedulers', lambda: ())


class SchedulerError(Exception):

    """Raised by HostScheduler when an inconsistent state occurs."""


class BaseHostScheduler(metahost_scheduler.HostSchedulingUtility):

    """Handles the logic for choosing when to run jobs and on which hosts.

    This class makes several queries to the database on each tick, building up
    some auxiliary data structures and using them to determine which hosts are
    eligible to run which jobs, taking into account all the various factors that
    affect that.
Пример #11
0
def parse_one(db, jobname, path, reparse, mail_on_failure):
    """
    Parse a single job. Optionally send email on failure.
    """
    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = status_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        for test_idx in old_tests.itervalues():
            where = {'test_idx' : test_idx}
            db.delete('tko_iteration_result', where)
            db.delete('tko_iteration_attributes', where)
            db.delete('tko_test_attributes', where)
            db.delete('tko_test_labels_tests', {'test_id': test_idx})
            db.delete('tko_tests', where)

    # check for failures
    message_lines = [""]
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s"
                         % (test.subdir, test.status, test.reason))
        if test.status in ("FAIL", "WARN"):
            message_lines.append(format_failure_message(
                jobname, test.kernel.base, test.subdir,
                test.status, test.reason))
    message = "\n".join(message_lines)

    # send out a email report of failure
    if len(message) > 2 and mail_on_failure:
        tko_utils.dprint("Sending email report of failure on %s to %s"
                         % (jobname, job.user))
        mailfailure(jobname, job, message)

    # write the job into the database
    db.insert_job(jobname, job)

    # Serializing job into a binary file
    try:
        from autotest.tko import tko_pb2
        from autotest.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    db.commit()
Пример #12
0
def parse_one(db, jobname, path, reparse, mail_on_failure):
    """
    Parse a single job. Optionally send email on failure.
    """
    tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
    old_job_idx = db.find_job(jobname)
    # old tests is a dict from tuple (test_name, subdir) to test_idx
    old_tests = {}
    if old_job_idx is not None:
        if not reparse:
            tko_utils.dprint("! Job is already parsed, done")
            return

        raw_old_tests = db.select("test_idx,subdir,test", "tko_tests",
                                  {"job_idx": old_job_idx})
        if raw_old_tests:
            old_tests = dict(((test, subdir), test_idx)
                             for test_idx, subdir, test in raw_old_tests)

    # look up the status version
    job_keyval = models.job.read_keyval(path)
    status_version = job_keyval.get("status_version", 0)

    # parse out the job
    parser = status_lib.parser(status_version)
    job = parser.make_job(path)
    status_log = os.path.join(path, "status.log")
    if not os.path.exists(status_log):
        status_log = os.path.join(path, "status")
    if not os.path.exists(status_log):
        tko_utils.dprint("! Unable to parse job, no status file")
        return

    # parse the status logs
    tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
    status_lines = open(status_log).readlines()
    parser.start(job)
    tests = parser.end(status_lines)

    # parser.end can return the same object multiple times, so filter out dups
    job.tests = []
    already_added = set()
    for test in tests:
        if test not in already_added:
            already_added.add(test)
            job.tests.append(test)

    # try and port test_idx over from the old tests, but if old tests stop
    # matching up with new ones just give up
    if reparse and old_job_idx is not None:
        job.index = old_job_idx
        for test in job.tests:
            test_idx = old_tests.pop((test.testname, test.subdir), None)
            if test_idx is not None:
                test.test_idx = test_idx
            else:
                tko_utils.dprint("! Reparse returned new test "
                                 "testname=%r subdir=%r" %
                                 (test.testname, test.subdir))
        for test_idx in old_tests.itervalues():
            where = {'test_idx': test_idx}
            db.delete('tko_iteration_result', where)
            db.delete('tko_iteration_attributes', where)
            db.delete('tko_test_attributes', where)
            db.delete('tko_test_labels_tests', {'test_id': test_idx})
            db.delete('tko_tests', where)

    # check for failures
    message_lines = [""]
    for test in job.tests:
        if not test.subdir:
            continue
        tko_utils.dprint("* testname, status, reason: %s %s %s" %
                         (test.subdir, test.status, test.reason))
        if test.status in ("FAIL", "WARN"):
            message_lines.append(
                format_failure_message(jobname, test.kernel.base, test.subdir,
                                       test.status, test.reason))
    message = "\n".join(message_lines)

    # send out a email report of failure
    if len(message) > 2 and mail_on_failure:
        tko_utils.dprint("Sending email report of failure on %s to %s" %
                         (jobname, job.user))
        mailfailure(jobname, job, message)

    # write the job into the database
    db.insert_job(jobname, job)

    # Serializing job into a binary file
    try:
        from autotest.tko import tko_pb2
        from autotest.tko import job_serializer

        serializer = job_serializer.JobSerializer()
        binary_file_name = os.path.join(path, "job.serialize")
        serializer.serialize_to_binary(job, jobname, binary_file_name)

        if reparse:
            site_export_file = "autotest.tko.site_export"
            site_export = utils.import_site_function(__file__,
                                                     site_export_file,
                                                     "site_export",
                                                     _site_export_dummy)
            site_export(binary_file_name)

    except ImportError:
        tko_utils.dprint("DEBUG: tko_pb2.py doesn't exist. Create by "
                         "compiling tko/tko.proto.")

    db.commit()