Example #1
0
    def _run(self, urls=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        self._setup_job_results()

        test_suite = self._make_test_suite(urls)
        self._validate_test_suite(test_suite)
        test_suite = self._filter_test_suite(test_suite)
        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, "
                     "filters, typos)")
            raise exceptions.OptionValidationError(e_msg)

        mux = multiplexer.Mux(self.args)
        self.args.test_result_total = mux.get_number_of_tests(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile,
                                     self.loglevel,
                                     self.unique_id)
        _TEST_LOGGER.info('Job ID: %s', self.unique_id)
        _TEST_LOGGER.info('')

        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite, mux,
                                              timeout=self.timeout)
        self.view.stop_file_logging()
        if not self.standalone:
            self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if getattr(self.args, 'archive', False):
            filename = self.logdir + '.zip'
            archive.create(filename, self.logdir)
        if not settings.get_value('runner.behavior', 'keep_tmp_files',
                                  key_type=bool, default=False):
            data_dir.clean_tmp_files()
        _TEST_LOGGER.info('Test results available in %s', self.logdir)

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL
Example #2
0
    def __init__(self, hostname=None, port=None, username=None, password=None):
        """
        Initializes a connection to an avocado-server instance

        :param hostname: the hostname or IP address to connect to
        :type hostname: str
        :param port: the port number where avocado-server is running
        :type port: int
        :param username: the name of the user to be authenticated as
        :type username: str
        :param password: the password to use for authentication
        :type password: str
        """
        if hostname is None:
            hostname = settings.get_value('restclient.connection',
                                          'hostname',
                                          default='localhost')
        self.hostname = hostname

        if port is None:
            port = settings.get_value('restclient.connection',
                                      'port',
                                      key_type='int',
                                      default=9405)
        self.port = port

        if username is None:
            username = settings.get_value('restclient.connection',
                                          'username',
                                          default='')
        self.username = username

        if password is None:
            password = settings.get_value('restclient.connection',
                                          'password',
                                          default='')
        self.password = password

        try:
            version = self.request('version')
        except (requests.exceptions.ConnectionError, UnexpectedHttpStatusCode):
            raise InvalidConnectionError

        if not self.check_min_version(version):
            raise InvalidServerVersionError
Example #3
0
    def __init__(self, hostname=None, port=None, username=None, password=None):
        """
        Initializes a connection to an avocado-server instance

        :param hostname: the hostname or IP address to connect to
        :type hostname: str
        :param port: the port number where avocado-server is running
        :type port: int
        :param username: the name of the user to be authenticated as
        :type username: str
        :param password: the password to use for authentication
        :type password: str
        """
        if hostname is None:
            hostname = settings.get_value('restclient.connection',
                                          'hostname', default='localhost')
        self.hostname = hostname

        if port is None:
            port = settings.get_value('restclient.connection',
                                      'port', key_type='int',
                                      default=9405)
        self.port = port

        if username is None:
            username = settings.get_value('restclient.connection',
                                          'username', default='')
        self.username = username

        if password is None:
            password = settings.get_value('restclient.connection',
                                          'password', default='')
        self.password = password

        try:
            version = self.request('version')
        except (requests.exceptions.ConnectionError, UnexpectedHttpStatusCode):
            raise InvalidConnectionError

        if not self.check_min_version(version):
            raise InvalidServerVersionError
Example #4
0
File: gdb.py Project: ypu/avocado
 def activate(self, app_args):
     try:
         for binary in app_args.gdb_run_bin:
             runtime.GDB_RUN_BINARY_NAMES_EXPR.append(binary)
         for commands in app_args.gdb_prerun_commands:
             if ':' in commands:
                 binary, commands_path = commands.split(':', 1)
                 runtime.GDB_PRERUN_COMMANDS['binary'] = commands_path
             else:
                 runtime.GDB_PRERUN_COMMANDS[''] = commands
         runtime.GDB_ENABLE_CORE = True if app_args.gdb_coredump == 'on' else False
         system_gdb_path = utils_path.find_command('gdb', '/usr/bin/gdb')
         runtime.GDB_PATH = settings.get_value('gdb.paths',
                                               'gdb',
                                               default=system_gdb_path)
         system_gdbserver_path = utils_path.find_command(
             'gdbserver', '/usr/bin/gdbserver')
         runtime.GDBSERVER_PATH = settings.get_value(
             'gdb.paths', 'gdbserver', default=system_gdbserver_path)
     except AttributeError:
         pass
Example #5
0
 def activate(self, app_args):
     try:
         for binary in app_args.gdb_run_bin:
             runtime.GDB_RUN_BINARY_NAMES_EXPR.append(binary)
         for commands in app_args.gdb_prerun_commands:
             if ':' in commands:
                 binary, commands_path = commands.split(':', 1)
                 runtime.GDB_PRERUN_COMMANDS['binary'] = commands_path
             else:
                 runtime.GDB_PRERUN_COMMANDS[''] = commands
         runtime.GDB_ENABLE_CORE = True if app_args.gdb_coredump == 'on' else False
         system_gdb_path = utils_path.find_command('gdb', '/usr/bin/gdb')
         runtime.GDB_PATH = settings.get_value('gdb.paths', 'gdb',
                                               default=system_gdb_path)
         system_gdbserver_path = utils_path.find_command('gdbserver',
                                                         '/usr/bin/gdbserver')
         runtime.GDBSERVER_PATH = settings.get_value('gdb.paths',
                                                     'gdbserver',
                                                     default=system_gdbserver_path)
     except AttributeError:
         pass
Example #6
0
File: job.py Project: ypu/avocado
    def __init__(self, args=None):
        """
        Creates an instance of Job class.

        :param args: an instance of :class:`argparse.Namespace`.
        """
        if args is None:
            args = argparse.Namespace()
        self.args = args
        self.standalone = getattr(self.args, 'standalone', False)
        unique_id = getattr(self.args, 'unique_job_id', None)
        if unique_id is None:
            unique_id = job_id.create_unique_job_id()
        self.unique_id = unique_id
        self.view = output.View(app_args=self.args)
        self.logdir = None
        raw_log_level = settings.get_value('job.output',
                                           'loglevel',
                                           default='debug')
        mapping = {
            'info': logging.INFO,
            'debug': logging.DEBUG,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL
        }
        if raw_log_level in mapping:
            self.loglevel = mapping[raw_log_level]
        else:
            self.loglevel = logging.DEBUG
        self.show_job_log = getattr(self.args, 'show_job_log', False)
        self.silent = getattr(self.args, 'silent', False)

        if self.standalone:
            self.show_job_log = True
            if self.args is not None:
                setattr(self.args, 'show_job_log', True)

        if self.show_job_log:
            if not self.silent:
                output.add_console_handler(_TEST_LOGGER)
                _TEST_LOGGER.setLevel(self.loglevel)

        self.test_dir = data_dir.get_test_dir()
        self.test_index = 1
        self.status = "RUNNING"
        self.result_proxy = result.TestResultProxy()
        self.sysinfo = None
Example #7
0
    def __init__(self, args=None):
        """
        Creates an instance of Job class.

        :param args: an instance of :class:`argparse.Namespace`.
        """
        if args is None:
            args = argparse.Namespace()
        self.args = args
        self.standalone = getattr(self.args, 'standalone', False)
        unique_id = getattr(self.args, 'unique_job_id', None)
        if unique_id is None:
            unique_id = job_id.create_unique_job_id()
        self.unique_id = unique_id
        self.view = output.View(app_args=self.args)
        self.logdir = None
        raw_log_level = settings.get_value('job.output', 'loglevel',
                                           default='debug')
        mapping = {'info': logging.INFO,
                   'debug': logging.DEBUG,
                   'warning': logging.WARNING,
                   'error': logging.ERROR,
                   'critical': logging.CRITICAL}
        if raw_log_level in mapping:
            self.loglevel = mapping[raw_log_level]
        else:
            self.loglevel = logging.DEBUG
        self.show_job_log = getattr(self.args, 'show_job_log', False)
        self.silent = getattr(self.args, 'silent', False)

        if self.standalone:
            self.show_job_log = True
            if self.args is not None:
                setattr(self.args, 'show_job_log', True)

        if self.show_job_log:
            if not self.silent:
                output.add_console_handler(_TEST_LOGGER)
                _TEST_LOGGER.setLevel(self.loglevel)

        self.test_dir = data_dir.get_test_dir()
        self.test_index = 1
        self.status = "RUNNING"
        self.result_proxy = result.TestResultProxy()
        self.sysinfo = None
        self.timeout = getattr(self.args, 'job_timeout', 0)
Example #8
0
 def __init__(self):
     self.HEADER = self.COLOR_BLUE
     self.PASS = self.COLOR_GREEN
     self.SKIP = self.COLOR_YELLOW
     self.FAIL = self.COLOR_RED
     self.ERROR = self.COLOR_RED
     self.WARN = self.COLOR_YELLOW
     self.PARTIAL = self.COLOR_YELLOW
     self.ENDC = self.CONTROL_END
     self.LOWLIGHT = self.COLOR_DARKGREY
     self.enabled = True
     term = os.environ.get("TERM")
     colored = settings.get_value('runner.output',
                                  'colored',
                                  key_type='bool')
     if ((not colored) or (not os.isatty(1))
             or (term not in self.allowed_terms)):
         self.disable()
Example #9
0
 def __init__(self):
     self.HEADER = self.COLOR_BLUE
     self.PASS = self.COLOR_GREEN
     self.SKIP = self.COLOR_YELLOW
     self.FAIL = self.COLOR_RED
     self.INTERRUPT = self.COLOR_RED
     self.ERROR = self.COLOR_RED
     self.WARN = self.COLOR_YELLOW
     self.PARTIAL = self.COLOR_YELLOW
     self.ENDC = self.CONTROL_END
     self.LOWLIGHT = self.COLOR_DARKGREY
     self.enabled = True
     term = os.environ.get("TERM")
     colored = settings.get_value('runner.output', 'colored',
                                  key_type='bool')
     if ((not colored) or (not os.isatty(1)) or
             (term not in self.allowed_terms)):
         self.disable()
Example #10
0
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Lucas Meneghel Rodrigues <*****@*****.**>
"""
Default values used in tests and plugin code.
"""

from avocado.core import data_dir
from avocado.settings import settings
from avocado.settings import SettingsError
from avocado.virt.qemu import path

try:
    qemu_bin = settings.get_value('virt.qemu.paths', 'qemu_bin')
except SettingsError:
    try:
        qemu_bin = path.get_qemu_binary()
    except path.QEMUCmdNotFoundError:
        qemu_bin = 'qemu'

try:
    qemu_dst = settings.get_value('virt.qemu.paths', 'qemu_dst_bin')
except SettingsError:
    try:
        qemu_dst = path.get_qemu_dst_binary()
    except path.QEMUCmdNotFoundError:
        qemu_dst = 'qemu'

try:
Example #11
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        self.parser = parser.subcommands.add_parser(
            'run',
            help=
            'Run one or more tests (native test, test alias, binary or script)'
        )

        self.parser.add_argument('url',
                                 type=str,
                                 default=[],
                                 nargs='+',
                                 help='List of test IDs (aliases or paths)')

        self.parser.add_argument('-z',
                                 '--archive',
                                 action='store_true',
                                 default=False,
                                 help='Archive (ZIP) files generated by tests')

        self.parser.add_argument(
            '--force-job-id',
            dest='unique_job_id',
            type=str,
            default=None,
            help=('Forces the use of a particular job ID. Used '
                  'internally when interacting with an avocado '
                  'server. You should not use this option '
                  'unless you know exactly what you\'re doing'))

        self.parser.add_argument('--job-results-dir',
                                 action='store',
                                 dest='logdir',
                                 default=None,
                                 metavar='DIRECTORY',
                                 help=('Forces to use of an alternate job '
                                       'results directory.'))

        self.parser.add_argument(
            '--job-timeout',
            action='store',
            default=None,
            metavar='SECONDS',
            help=('Set the maximum amount of time (in SECONDS) that '
                  'tests are allowed to execute. '
                  'Note that zero means "no timeout". '
                  'You can also use suffixes, like: '
                  ' s (seconds), m (minutes), h (hours). '))

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        self.parser.add_argument('--sysinfo',
                                 choices=('on', 'off'),
                                 default=sysinfo_default,
                                 help=('Enable or disable system information '
                                       '(hardware details, profilers, etc.). '
                                       'Current:  %(default)s'))

        self.parser.output = self.parser.add_argument_group(
            'output related arguments')

        self.parser.output.add_argument('-s',
                                        '--silent',
                                        action='store_true',
                                        default=False,
                                        help='Silence stdout')

        self.parser.output.add_argument(
            '--show-job-log',
            action='store_true',
            default=False,
            help=('Display only the job log on stdout. Useful '
                  'for test debugging purposes. No output will '
                  'be displayed if you also specify --silent'))

        out_check = self.parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'all', 'stdout', 'stderr'),
                               default='none',
                               help=('Record output streams of your tests '
                                     'to reference files (valid options: '
                                     'none (do not record output streams), '
                                     'all (record both stdout and stderr), '
                                     'stdout (record only stderr), '
                                     'stderr (record only stderr). '
                                     'Current: %(default)s'))

        out_check.add_argument(
            '--output-check',
            choices=('on', 'off'),
            default='on',
            help=('Enable or disable test output (stdout/stderr) check. '
                  'If this option is off, no output will '
                  'be checked, even if there are reference files '
                  'present for the test. '
                  'Current: on (output check enabled)'))

        if multiplexer.MULTIPLEX_CAPABLE:
            mux = self.parser.add_argument_group('multiplex arguments')
            mux.add_argument('-m',
                             '--multiplex-files',
                             nargs='*',
                             default=None,
                             help='Path(s) to a avocado multiplex (.yaml) '
                             'file(s) (order dependent)')
            mux.add_argument('--filter-only',
                             nargs='*',
                             default=[],
                             help='Filter only path(s) from multiplexing')
            mux.add_argument('--filter-out',
                             nargs='*',
                             default=[],
                             help='Filter out path(s) from multiplexing')
            mux.add_argument('--mux-entry',
                             nargs='*',
                             default=None,
                             help="Multiplex entry point(s)")

        super(TestRunner, self).configure(self.parser)
        # Export the test runner parser back to the main parser
        parser.runner = self.parser
Example #12
0
* The next best location is the default system wide one.
* The next best location is the default user specific one.
"""
import os
import sys
import shutil
import time
import tempfile

from avocado.settings import settings

_BASE_DIR = os.path.join(sys.modules[__name__].__file__, "..", "..", "..")
_BASE_DIR = os.path.abspath(_BASE_DIR)
_IN_TREE_TESTS_DIR = os.path.join(_BASE_DIR, 'tests')

SETTINGS_BASE_DIR = os.path.expanduser(settings.get_value('runner', 'base_dir'))
SETTINGS_TEST_DIR = os.path.expanduser(settings.get_value('runner', 'test_dir'))
SETTINGS_DATA_DIR = os.path.expanduser(settings.get_value('runner', 'data_dir'))
SETTINGS_LOG_DIR = os.path.expanduser(settings.get_value('runner', 'logs_dir'))
SETTINGS_TMP_DIR = os.path.expanduser(settings.get_value('runner', 'tmp_dir'))

SYSTEM_BASE_DIR = '/var/lib/avocado'
SYSTEM_TEST_DIR = os.path.join(SYSTEM_BASE_DIR, 'tests')
SYSTEM_DATA_DIR = os.path.join(SYSTEM_BASE_DIR, 'data')
SYSTEM_LOG_DIR = os.path.join(SYSTEM_BASE_DIR, 'logs')
SYSTEM_TMP_DIR = '/var/tmp/avocado'

USER_BASE_DIR = os.path.expanduser('~/avocado')
USER_TEST_DIR = os.path.join(USER_BASE_DIR, 'tests')
USER_DATA_DIR = os.path.join(USER_BASE_DIR, 'data')
USER_LOG_DIR = os.path.join(USER_BASE_DIR, 'logs')
Example #13
0
    def __init__(self, basedir=None, log_packages=None, profiler=None):
        """
        Set sysinfo collectibles.

        :param basedir: Base log dir where sysinfo files will be located.
        :param log_packages: Whether to log system packages (optional because
                             logging packages is a costly operation). If not
                             given explicitly, tries to look in the config
                             files, and if not found, defaults to False.
        :param profiler: Wether to use the profiler. If not given explicitly,
                         tries to look in the config files.
        """
        if basedir is None:
            basedir = utils.path.init_dir('sysinfo')
        self.basedir = basedir

        self._installed_pkgs = None
        if log_packages is None:
            self.log_packages = settings.get_value('sysinfo.collect',
                                                   'installed_packages',
                                                   key_type='bool',
                                                   default=False)
        else:
            self.log_packages = log_packages

        commands_file = settings.get_value('sysinfo.collectibles',
                                           'commands',
                                           key_type='str',
                                           default='')
        log.info('Commands configured by file: %s', commands_file)
        self.commands = utils.genio.read_all_lines(commands_file)

        files_file = settings.get_value('sysinfo.collectibles',
                                        'files',
                                        key_type='str',
                                        default='')
        log.info('Files configured by file: %s', files_file)
        self.files = utils.genio.read_all_lines(files_file)

        if profiler is None:
            self.profiler = settings.get_value('sysinfo.collect',
                                               'profiler',
                                               key_type='bool',
                                               default=False)
        else:
            self.profiler = profiler

        profiler_file = settings.get_value('sysinfo.collectibles',
                                           'profilers',
                                           key_type='str',
                                           default='')
        self.profilers = utils.genio.read_all_lines(profiler_file)

        log.info('Profilers configured by file: %s', profiler_file)
        log.info('Profilers declared: %s', self.profilers)
        if not self.profilers:
            self.profiler = False

        if self.profiler is False:
            if not self.profilers:
                log.info('Profiler disabled: no profiler commands configured')
            else:
                log.info('Profiler disabled')

        self.start_job_collectibles = set()
        self.end_job_collectibles = set()

        self.start_test_collectibles = set()
        self.end_test_collectibles = set()

        self.hook_mapping = {'start_job': self.start_job_collectibles,
                             'end_job': self.end_job_collectibles,
                             'start_test': self.start_test_collectibles,
                             'end_test': self.end_test_collectibles}

        self.pre_dir = utils.path.init_dir(self.basedir, 'pre')
        self.post_dir = utils.path.init_dir(self.basedir, 'post')
        self.profile_dir = utils.path.init_dir(self.basedir, 'profile')

        self._set_collectibles()
Example #14
0
File: job.py Project: ypu/avocado
    def _run(self, urls=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        if urls is None:
            urls = getattr(self.args, 'url', None)

        if isinstance(urls, str):
            urls = urls.split()

        if not urls:
            e_msg = "Empty test ID. A test path or alias must be provided"
            raise exceptions.OptionValidationError(e_msg)

        self._make_test_loader()

        params_list = self.test_loader.discover_urls(urls)

        mux = multiplexer.Mux(self.args)
        self._setup_job_results()

        try:
            test_suite = self.test_loader.discover(params_list)
            # Do not attempt to validate the tests given on the command line if
            # the tests will not be copied from this system to a remote one
            # using the remote plugin features
            if not getattr(self.args, 'remote_no_copy', False):
                error_msg_parts = self.test_loader.validate_ui(test_suite)
            else:
                error_msg_parts = []
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)

        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, typos)")
            raise exceptions.OptionValidationError(e_msg)

        self.args.test_result_total = mux.get_number_of_tests(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile,
                                     self.loglevel,
                                     self.unique_id)
        _TEST_LOGGER.info('Job ID: %s', self.unique_id)
        _TEST_LOGGER.info('')

        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite, mux)
        self.view.stop_file_logging()
        if not self.standalone:
            self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if getattr(self.args, 'archive', False):
            filename = self.logdir + '.zip'
            archive.create(filename, self.logdir)
        if not settings.get_value('runner.behavior', 'keep_tmp_files',
                                  key_type=bool, default=False):
            data_dir.clean_tmp_files()
        _TEST_LOGGER.info('Test results available in %s', self.logdir)

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL
Example #15
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        self.parser = parser.subcommands.add_parser(
            'run',
            help='Run one or more tests (native test, test alias, binary or script)')

        self.parser.add_argument('url', type=str, default=[], nargs='+',
                                 help='List of test IDs (aliases or paths)')

        self.parser.add_argument('-z', '--archive', action='store_true', default=False,
                                 help='Archive (ZIP) files generated by tests')

        self.parser.add_argument('--force-job-id', dest='unique_job_id',
                                 type=str, default=None,
                                 help=('Forces the use of a particular job ID. Used '
                                       'internally when interacting with an avocado '
                                       'server. You should not use this option '
                                       'unless you know exactly what you\'re doing'))

        self.parser.add_argument('--job-results-dir', action='store',
                                 dest='logdir', default=None, metavar='DIRECTORY',
                                 help=('Forces to use of an alternate job '
                                       'results directory.'))

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        self.parser.add_argument('--sysinfo', choices=('on', 'off'), default=sysinfo_default,
                                 help=('Enable or disable system information '
                                       '(hardware details, profilers, etc.). '
                                       'Current:  %(default)s'))

        self.parser.output = self.parser.add_argument_group('output related arguments')

        self.parser.output.add_argument(
            '-s', '--silent', action='store_true', default=False,
            help='Silence stdout')

        self.parser.output.add_argument(
            '--show-job-log', action='store_true', default=False,
            help=('Display only the job log on stdout. Useful '
                  'for test debugging purposes. No output will '
                  'be displayed if you also specify --silent'))

        out_check = self.parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'all', 'stdout', 'stderr'),
                               default='none',
                               help=('Record output streams of your tests '
                                     'to reference files (valid options: '
                                     'none (do not record output streams), '
                                     'all (record both stdout and stderr), '
                                     'stdout (record only stderr), '
                                     'stderr (record only stderr). '
                                     'Current: %(default)s'))

        out_check.add_argument('--output-check', choices=('on', 'off'),
                               default='on',
                               help=('Enable or disable test output (stdout/stderr) check. '
                                     'If this option is off, no output will '
                                     'be checked, even if there are reference files '
                                     'present for the test. '
                                     'Current: on (output check enabled)'))

        if multiplexer.MULTIPLEX_CAPABLE:
            mux = self.parser.add_argument_group('multiplex arguments')
            mux.add_argument('-m', '--multiplex-files', nargs='*', default=None,
                             help='Path(s) to a avocado multiplex (.yaml) '
                             'file(s) (order dependent)')
            mux.add_argument('--filter-only', nargs='*', default=[],
                             help='Filter only path(s) from multiplexing')
            mux.add_argument('--filter-out', nargs='*', default=[],
                             help='Filter out path(s) from multiplexing')

        super(TestRunner, self).configure(self.parser)
        # Export the test runner parser back to the main parser
        parser.runner = self.parser
Example #16
0
    def __init__(self, basedir=None, log_packages=None, profilers=None):
        """
        Set sysinfo loggables.

        :param basedir: Base log dir where sysinfo files will be located.
        :param log_packages: Whether to log system packages (optional because
                             logging packages is a costly operation). If not
                             given explicitly, tries to look in the config
                             files, and if not found, defaults to False.
        :param profilers: Wether to use the profiler. If not given explicitly,
                          tries to look in the config files.
        """
        if basedir is None:
            basedir = utils.path.init_dir(os.getcwd(), 'sysinfo')

        self.basedir = basedir

        self._installed_pkgs = None
        if log_packages is None:
            self.log_packages = settings.get_value('sysinfo.collect',
                                                   'installed_packages',
                                                   key_type='bool',
                                                   default=False)
        else:
            self.log_packages = log_packages

        if profilers is None:
            self.profiler = settings.get_value('sysinfo.collect',
                                               'profiler',
                                               key_type='bool',
                                               default=False)
            profiler_commands = settings.get_value('sysinfo.collect',
                                                   'profiler_commands',
                                                   key_type='str',
                                                   default='')
        else:
            self.profiler = True
            profiler_commands = profilers

        self.profiler_commands = [
            x for x in profiler_commands.split(':') if x.strip()
        ]
        log.info('Profilers declared: %s', self.profiler_commands)
        if not self.profiler_commands:
            self.profiler = False

        if self.profiler is False:
            if not self.profiler_commands:
                log.info('Profiler disabled: no profiler commands configured')
            else:
                log.info('Profiler disabled')

        self.start_job_loggables = set()
        self.end_job_loggables = set()

        self.start_test_loggables = set()
        self.end_test_loggables = set()

        self.start_iteration_loggables = set()
        self.end_iteration_loggables = set()

        self.hook_mapping = {
            'start_job': self.start_job_loggables,
            'end_job': self.end_job_loggables,
            'start_test': self.start_test_loggables,
            'end_test': self.end_test_loggables,
            'start_iteration': self.start_iteration_loggables,
            'end_iteration': self.end_iteration_loggables
        }

        self.pre_dir = utils.path.init_dir(self.basedir, 'pre')
        self.post_dir = utils.path.init_dir(self.basedir, 'post')
        self.profile_dir = utils.path.init_dir(self.basedir, 'profile')

        self._set_loggables()
Example #17
0
File: job.py Project: ypu/avocado
    def _run(self, urls=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        if urls is None:
            urls = getattr(self.args, 'url', None)

        if isinstance(urls, str):
            urls = urls.split()

        if not urls:
            e_msg = "Empty test ID. A test path or alias must be provided"
            raise exceptions.OptionValidationError(e_msg)

        self._make_test_loader()

        params_list = self.test_loader.discover_urls(urls)

        mux = multiplexer.Mux(self.args)
        self._setup_job_results()

        try:
            test_suite = self.test_loader.discover(params_list)
            # Do not attempt to validate the tests given on the command line if
            # the tests will not be copied from this system to a remote one
            # using the remote plugin features
            if not getattr(self.args, 'remote_no_copy', False):
                error_msg_parts = self.test_loader.validate_ui(test_suite)
            else:
                error_msg_parts = []
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)

        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, typos)")
            raise exceptions.OptionValidationError(e_msg)

        self.args.test_result_total = mux.get_number_of_tests(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile, self.loglevel,
                                     self.unique_id)
        _TEST_LOGGER.info('Job ID: %s', self.unique_id)
        _TEST_LOGGER.info('')

        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite, mux)
        self.view.stop_file_logging()
        if not self.standalone:
            self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if getattr(self.args, 'archive', False):
            filename = self.logdir + '.zip'
            archive.create(filename, self.logdir)
        if not settings.get_value(
                'runner.behavior', 'keep_tmp_files', key_type=bool,
                default=False):
            data_dir.clean_tmp_files()
        _TEST_LOGGER.info('Test results available in %s', self.logdir)

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL
Example #18
0
import sys
import shutil
import time
import tempfile

from avocado.core import job_id
from avocado.utils import path as utils_path
from avocado.utils.data_structures import Borg
from avocado.settings import settings

_BASE_DIR = os.path.join(sys.modules[__name__].__file__, "..", "..", "..")
_BASE_DIR = os.path.abspath(_BASE_DIR)
_IN_TREE_TESTS_DIR = os.path.join(_BASE_DIR, 'examples', 'tests')

SETTINGS_BASE_DIR = os.path.expanduser(
    settings.get_value('datadir.paths', 'base_dir'))
SETTINGS_TEST_DIR = os.path.expanduser(
    settings.get_value('datadir.paths', 'test_dir'))
SETTINGS_DATA_DIR = os.path.expanduser(
    settings.get_value('datadir.paths', 'data_dir'))
SETTINGS_LOG_DIR = os.path.expanduser(
    settings.get_value('datadir.paths', 'logs_dir'))

SYSTEM_BASE_DIR = '/var/lib/avocado'
if 'VIRTUAL_ENV' in os.environ:
    SYSTEM_BASE_DIR = os.environ['VIRTUAL_ENV']
SYSTEM_TEST_DIR = os.path.join(SYSTEM_BASE_DIR, 'tests')
SYSTEM_DATA_DIR = os.path.join(SYSTEM_BASE_DIR, 'data')
SYSTEM_LOG_DIR = os.path.join(SYSTEM_BASE_DIR, 'job-results')

USER_BASE_DIR = os.path.expanduser('~/avocado')
Example #19
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        self.parser = parser.subcommands.add_parser(
            'run',
            help=
            'Run one or more tests (native test, test alias, binary or script)'
        )

        self.parser.add_argument('url',
                                 type=str,
                                 default=[],
                                 nargs='+',
                                 help='List of test IDs (aliases or paths)')

        self.parser.add_argument('-z',
                                 '--archive',
                                 action='store_true',
                                 default=False,
                                 help='Archive (ZIP) files generated by tests')

        self.parser.add_argument(
            '--keep-tmp-files',
            action='store_true',
            default=False,
            help='Keep temporary files generated by tests')

        self.parser.add_argument(
            '--force-job-id',
            dest='unique_job_id',
            type=str,
            default=None,
            help=('Forces the use of a particular job ID. Used '
                  'internally when interacting with an avocado '
                  'server. You should not use this option '
                  'unless you know exactly what you\'re doing'))

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        self.parser.add_argument('--sysinfo',
                                 choices=('on', 'off'),
                                 default=sysinfo_default,
                                 help=('Enable or disable system information '
                                       '(hardware details, profilers, etc.). '
                                       'Current:  %(default)s'))

        out = self.parser.add_argument_group('output related arguments')

        out.add_argument('-s',
                         '--silent',
                         action='store_true',
                         default=False,
                         help='Silence stdout')

        out.add_argument('--show-job-log',
                         action='store_true',
                         default=False,
                         help=('Display only the job log on stdout. Useful '
                               'for test debugging purposes. No output will '
                               'be displayed if you also specify --silent'))

        out.add_argument('--job-log-level',
                         action='store',
                         help=("Log level of the job log. Options: "
                               "'debug', 'info', 'warning', 'error', "
                               "'critical'. Current: debug"),
                         default='debug')

        out_check = self.parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               type=str,
                               default='none',
                               help=('Record output streams of your tests '
                                     'to reference files (valid options: '
                                     'none (do not record output streams), '
                                     'all (record both stdout and stderr), '
                                     'stdout (record only stderr), '
                                     'stderr (record only stderr). '
                                     'Current: none'))

        out_check.add_argument(
            '--disable-output-check',
            action='store_true',
            default=False,
            help=('Disable test output (stdout/stderr) check. '
                  'If this option is selected, no output will '
                  'be checked, even if there are reference files '
                  'present for the test. '
                  'Current: False (output check enabled)'))

        if multiplexer.MULTIPLEX_CAPABLE:
            mux = self.parser.add_argument_group('multiplex arguments')
            mux.add_argument(
                '-m',
                '--multiplex-files',
                nargs='*',
                default=None,
                help='Path(s) to a avocado multiplex (.yaml) file(s)')
            mux.add_argument('--filter-only',
                             nargs='*',
                             default=[],
                             help='Filter only path(s) from multiplexing')
            mux.add_argument('--filter-out',
                             nargs='*',
                             default=[],
                             help='Filter out path(s) from multiplexing')

        super(TestRunner, self).configure(self.parser)
        # Export the test runner parser back to the main parser
        parser.runner = self.parser
Example #20
0
    def __init__(self, basedir=None, log_packages=None, profiler=None):
        """
        Set sysinfo collectibles.

        :param basedir: Base log dir where sysinfo files will be located.
        :param log_packages: Whether to log system packages (optional because
                             logging packages is a costly operation). If not
                             given explicitly, tries to look in the config
                             files, and if not found, defaults to False.
        :param profiler: Wether to use the profiler. If not given explicitly,
                         tries to look in the config files.
        """
        if basedir is None:
            basedir = utils.path.init_dir('sysinfo')
        self.basedir = basedir

        self._installed_pkgs = None
        if log_packages is None:
            self.log_packages = settings.get_value('sysinfo.collect',
                                                   'installed_packages',
                                                   key_type='bool',
                                                   default=False)
        else:
            self.log_packages = log_packages

        commands_file = settings.get_value('sysinfo.collectibles',
                                           'commands',
                                           key_type='str',
                                           default='')
        log.info('Commands configured by file: %s', commands_file)
        self.commands = utils.genio.read_all_lines(commands_file)

        files_file = settings.get_value('sysinfo.collectibles',
                                        'files',
                                        key_type='str',
                                        default='')
        log.info('Files configured by file: %s', files_file)
        self.files = utils.genio.read_all_lines(files_file)

        if profiler is None:
            self.profiler = settings.get_value('sysinfo.collect',
                                               'profiler',
                                               key_type='bool',
                                               default=False)
        else:
            self.profiler = profiler

        profiler_file = settings.get_value('sysinfo.collectibles',
                                           'profilers',
                                           key_type='str',
                                           default='')
        self.profilers = utils.genio.read_all_lines(profiler_file)

        log.info('Profilers configured by file: %s', profiler_file)
        log.info('Profilers declared: %s', self.profilers)
        if not self.profilers:
            self.profiler = False

        if self.profiler is False:
            if not self.profilers:
                log.info('Profiler disabled: no profiler commands configured')
            else:
                log.info('Profiler disabled')

        self.start_job_collectibles = set()
        self.end_job_collectibles = set()

        self.start_test_collectibles = set()
        self.end_test_collectibles = set()

        self.hook_mapping = {
            'start_job': self.start_job_collectibles,
            'end_job': self.end_job_collectibles,
            'start_test': self.start_test_collectibles,
            'end_test': self.end_test_collectibles
        }

        self.pre_dir = utils.path.init_dir(self.basedir, 'pre')
        self.post_dir = utils.path.init_dir(self.basedir, 'post')
        self.profile_dir = utils.path.init_dir(self.basedir, 'profile')

        self._set_collectibles()
Example #21
0
import sys
import shutil
import time
import tempfile

from avocado.core import job_id
from avocado.utils import path as utils_path
from avocado.utils.data_structures import Borg
from avocado.settings import settings


_BASE_DIR = os.path.join(sys.modules[__name__].__file__, "..", "..", "..")
_BASE_DIR = os.path.abspath(_BASE_DIR)
_IN_TREE_TESTS_DIR = os.path.join(_BASE_DIR, 'examples', 'tests')

SETTINGS_BASE_DIR = os.path.expanduser(settings.get_value('datadir.paths', 'base_dir'))
SETTINGS_TEST_DIR = os.path.expanduser(settings.get_value('datadir.paths', 'test_dir'))
SETTINGS_DATA_DIR = os.path.expanduser(settings.get_value('datadir.paths', 'data_dir'))
SETTINGS_LOG_DIR = os.path.expanduser(settings.get_value('datadir.paths', 'logs_dir'))

SYSTEM_BASE_DIR = '/var/lib/avocado'
if 'VIRTUAL_ENV' in os.environ:
    SYSTEM_BASE_DIR = os.environ['VIRTUAL_ENV']
SYSTEM_TEST_DIR = os.path.join(SYSTEM_BASE_DIR, 'tests')
SYSTEM_DATA_DIR = os.path.join(SYSTEM_BASE_DIR, 'data')
SYSTEM_LOG_DIR = os.path.join(SYSTEM_BASE_DIR, 'job-results')

USER_BASE_DIR = os.path.expanduser('~/avocado')
USER_TEST_DIR = os.path.join(USER_BASE_DIR, 'tests')
USER_DATA_DIR = os.path.join(USER_BASE_DIR, 'data')
USER_LOG_DIR = os.path.join(USER_BASE_DIR, 'job-results')