Example #1
0
def add_basic_vt_options(parser):
    """
    Add basic vt options to parser
    """
    parser.add_argument("--vt-config", action="store", dest="vt_config",
                        help="Explicitly choose a cartesian config. When "
                        "choosing this, some options will be ignored (see "
                        "options below)")
    msg = ("Choose test type (%s). Default: %%(default)s" %
           ", ".join(SUPPORTED_TEST_TYPES))
    parser.add_argument("--vt-type", action="store", dest="vt_type",
                        help=msg, default='qemu')
    arch = settings.get_value('vt.common', 'arch', default=None)
    parser.add_argument("--vt-arch", help="Choose the VM architecture. "
                        "Default: %(default)s", default=arch)
    machine = settings.get_value('vt.common', 'machine_type',
                                 default=defaults.DEFAULT_MACHINE_TYPE)
    parser.add_argument("--vt-machine-type", help="Choose the VM machine type."
                        " Default: %(default)s", default=machine)
    parser.add_argument("--vt-guest-os", action="store",
                        dest="vt_guest_os", default=defaults.DEFAULT_GUEST_OS,
                        help="Select the guest OS to be used. If --vt-config "
                        "is provided, this will be ignored. Default: "
                        "%(default)s")
    parser.add_argument("--vt-no-filter", action="store", dest="vt_no_filter",
                        default="", help="List of space separated 'no' filters"
                        " to be passed to the config parser.  Default: "
                        "'%(default)s'")
    parser.add_argument("--vt-only-filter", action="store",
                        dest="vt_only_filter", default="", help="List of space"
                        " separated 'only' filters to be passed to the config "
                        "parser.  Default: '%(default)s'")
Example #2
0
def add_qemu_bin_vt_option(parser):
    """
    Add qemu-bin vt option to parser
    """
    def _str_or_none(arg):
        if arg is None:
            return "Could not find one"
        else:
            return arg

    try:
        qemu_bin_path = standalone_test.find_default_qemu_paths()[0]
    except (RuntimeError, utils_path.CmdNotFoundError):
        qemu_bin_path = None
    qemu_bin = settings.get_value('vt.qemu', 'qemu_bin',
                                  default=qemu_bin_path)
    parser.add_argument("--vt-qemu-bin", action="store", dest="vt_qemu_bin",
                        default=qemu_bin, help="Path to a custom qemu binary "
                        "to be tested. If --vt-config is provided and this "
                        "flag is omitted, no attempt to set the qemu binaries "
                        "will be made. Current: %s" % _str_or_none(qemu_bin))
    qemu_dst = settings.get_value('vt.qemu', 'qemu_dst_bin',
                                  default=qemu_bin_path)
    parser.add_argument("--vt-qemu-dst-bin", action="store",
                        dest="vt_dst_qemu_bin", default=qemu_dst, help="Path "
                        "to a custom qemu binary to be tested for the "
                        "destination of a migration, overrides --vt-qemu-bin. "
                        "If --vt-config is provided and this flag is omitted, "
                        "no attempt to set the qemu binaries will be made. "
                        "Current: %s" % _str_or_none(qemu_dst))
Example #3
0
    def run(self, args):
        if 'gdb_run_bin' in args:
            for binary in args.gdb_run_bin:
                gdb.GDB_RUN_BINARY_NAMES_EXPR.append(binary)

        if 'gdb_prerun_commands' in args:
            for commands in args.gdb_prerun_commands:
                if ':' in commands:
                    binary, commands_path = commands.split(':', 1)
                    gdb.GDB_PRERUN_COMMANDS['binary'] = commands_path
                else:
                    gdb.GDB_PRERUN_COMMANDS[''] = commands

        if 'gdb_coredump' in args:
            gdb.GDB_ENABLE_CORE = True if args.gdb_coredump == 'on' else False

        system_gdb_path = utils_path.find_command('gdb', '/usr/bin/gdb')
        gdb.GDB_PATH = settings.get_value('gdb.paths', 'gdb',
                                          default=system_gdb_path)
        system_gdbserver_path = utils_path.find_command('gdbserver',
                                                        '/usr/bin/gdbserver')
        gdb.GDBSERVER_PATH = settings.get_value('gdb.paths',
                                                'gdbserver',
                                                default=system_gdbserver_path)
        process.UNDEFINED_BEHAVIOR_EXCEPTION = exceptions.TestError
Example #4
0
 def __init__(self):
     self.warn_non_existing_dir = settings.get_value(section=CONFIG_SECTION,
                                                     key="warn_non_existing_dir",
                                                     key_type=bool,
                                                     default=False)
     self.warn_non_zero_status = settings.get_value(section=CONFIG_SECTION,
                                                    key="warn_non_zero_status",
                                                    key_type=bool,
                                                    default=True)
Example #5
0
    def run(self, args):
        resultsdb_api = getattr(args, 'resultsdb_api', None)
        if resultsdb_api is None:
            resultsdb_api = settings.get_value('plugins.resultsdb',
                                               'api_url',
                                               default=None)
            if resultsdb_api is not None:
                args.resultsdb_api = resultsdb_api

        resultsdb_logs = getattr(args, 'resultsdb_logs', None)
        if resultsdb_logs is None:
            resultsdb_logs = settings.get_value('plugins.resultsdb',
                                                'logs_url',
                                                default=None)
            if resultsdb_logs is not None:
                args.resultsdb_logs = resultsdb_logs
Example #6
0
    def _setup_job(job_id):
        if os.path.isdir(job_id):
            resultsdir = os.path.expanduser(job_id)
            job_id = ''
        elif os.path.isfile(job_id):
            resultsdir = os.path.dirname(os.path.expanduser(job_id))
            job_id = ''
        else:
            logdir = settings.get_value(section='datadir.paths',
                                        key='logs_dir', key_type='path',
                                        default=None)
            try:
                resultsdir = jobdata.get_resultsdir(logdir, job_id)
            except ValueError as exception:
                LOG_UI.error(exception)
                sys.exit(exit_codes.AVOCADO_FAIL)

        if resultsdir is None:
            LOG_UI.error("Can't find job results directory for '%s' in '%s'",
                         job_id, logdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'), job_id)
        if sourcejob is None:
            LOG_UI.error("Can't find matching job id '%s' in '%s' directory.",
                         job_id, resultsdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        return resultsdir, sourcejob
Example #7
0
    def _setup_job(job_id):
        if os.path.isdir(job_id):
            resultsdir = os.path.expanduser(job_id)
            job_id = ''
        elif os.path.isfile(job_id):
            resultsdir = os.path.dirname(os.path.expanduser(job_id))
            job_id = ''
        else:
            logs_dir = settings.get_value('datadir.paths', 'logs_dir',
                                          default=None)
            logdir = os.path.expanduser(logs_dir)
            resultsdir = replay.get_resultsdir(logdir, job_id)

        if resultsdir is None:
            LOG.error("Can't find job results directory for '%s' in '%s'",
                      job_id, logdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        sourcejob = replay.get_id(os.path.join(resultsdir, 'id'), job_id)
        if sourcejob is None:
            LOG.error("Can't find matching job id '%s' in '%s' directory.",
                      job_id, resultsdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        return resultsdir, sourcejob
Example #8
0
    def run(self, args):
        url = getattr(args, 'result_upload_url', None)
        if url is None:
            url = settings.get_value('plugins.result_upload',
                                     'url',
                                     default=None)
            if url is not None:
                args.result_upload_url = url

        cmd = getattr(args, 'result_upload_cmd', None)
        if cmd is None:
            cmd = settings.get_value('plugins.result_upload',
                                     'command',
                                     default=None)
            if cmd is not None:
                args.result_upload_cmd = cmd
Example #9
0
 def __init__(self):
     self.rcpt = settings.get_value(section="plugins.job.mail",
                                    key="recipient",
                                    key_type=str,
                                    default='*****@*****.**')
     self.subject = settings.get_value(section="plugins.job.mail",
                                       key="subject",
                                       key_type=str,
                                       default='[AVOCADO JOB NOTIFICATION]')
     self.sender = settings.get_value(section="plugins.job.mail",
                                      key="sender",
                                      key_type=str,
                                      default='*****@*****.**')
     self.server = settings.get_value(section="plugins.job.mail",
                                      key="server",
                                      key_type=str,
                                      default='localhost')
Example #10
0
 def __init__(self):
     self.log = logging.getLogger("avocado.app")
     self.lock_dir = os.path.expanduser(settings.get_value(
         section="plugins.vtjoblock",
         key="dir",
         key_type=str,
         default='/tmp'))
     self.lock_file = None
Example #11
0
    def __init__(self, hostname, username=None, password=None,
                 key_filename=None, port=22, timeout=60, attempts=10,
                 env_keep=None):
        """
        Creates an instance of :class:`Remote`.

        :param hostname: the hostname.
        :param username: the username. Default: autodetect.
        :param password: the password. Default: try to use public key.
        :param key_filename: path to an identity file (Example: .pem files
            from Amazon EC2).
        :param timeout: remote command timeout, in seconds. Default: 60.
        :param attempts: number of attempts to connect. Default: 10.
        """
        self.hostname = hostname
        if username is None:
            username = getpass.getuser()
        self.username = username
        self.key_filename = key_filename
        # None = use public key
        self.password = password
        self.port = port
        reject_unknown_hosts = settings.get_value('remoter.behavior',
                                                  'reject_unknown_hosts',
                                                  key_type=bool,
                                                  default=False)
        disable_known_hosts = settings.get_value('remoter.behavior',
                                                 'disable_known_hosts',
                                                 key_type=bool,
                                                 default=False)
        if env_keep is None:
            self.env_vars = {}
        else:
            self.env_vars = _get_env_vars(env_keep)
        fabric.api.env.update(host_string=hostname,
                              user=username,
                              password=password,
                              key_filename=key_filename,
                              port=port,
                              timeout=timeout / attempts,
                              connection_attempts=attempts,
                              linewise=True,
                              abort_on_prompts=True,
                              abort_exception=ConnectError,
                              reject_unknown_hosts=reject_unknown_hosts,
                              disable_known_hosts=disable_known_hosts)
Example #12
0
    def run(self, args):
        log = logging.getLogger("avocado.app")
        err = None
        if args.tree and args.mux_debug:
            err = "Option --tree is incompatible with --debug."
        elif not args.tree and args.inherit:
            err = "Option --inherit can be only used with --tree"
        if err:
            log.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)
        mux = args.mux
        try:
            mux.parse(args)
        except (IOError, ValueError) as details:
            log.error("Unable to parse mux: %s", details)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)
        if args.tree:
            if args.contents:
                verbose = 1
            else:
                verbose = 0
            if args.inherit:
                verbose += 2
            use_utf8 = settings.get_value("runner.output", "utf8",
                                          key_type=bool, default=None)
            log.debug(tree.tree_view(mux.variants.root, verbose, use_utf8))
            sys.exit(exit_codes.AVOCADO_ALL_OK)

        log.info('Variants generated:')
        for (index, tpl) in enumerate(mux.variants):
            if not args.mux_debug:
                paths = ', '.join([x.path for x in tpl])
            else:
                color = output.TERM_SUPPORT.LOWLIGHT
                cend = output.TERM_SUPPORT.ENDC
                paths = ', '.join(["%s%s@%s%s" % (_.name, color,
                                                  getattr(_, 'yaml',
                                                          "Unknown"),
                                                  cend)
                                   for _ in tpl])
            log.debug('%sVariant %s:    %s', '\n' if args.contents else '',
                      index + 1, paths)
            if args.contents:
                env = set()
                for node in tpl:
                    for key, value in node.environment.iteritems():
                        origin = node.environment_origin[key].path
                        env.add(("%s:%s" % (origin, key), str(value)))
                if not env:
                    continue
                fmt = '    %%-%ds => %%s' % max([len(_[0]) for _ in env])
                for record in sorted(env):
                    log.debug(fmt, *record)

        sys.exit(exit_codes.AVOCADO_ALL_OK)
Example #13
0
    def run(self, args):
        err = None
        if args.tree and args.varianter_debug:
            err = "Option --tree is incompatible with --debug."
        elif not args.tree and args.inherit:
            err = "Option --inherit can be only used with --tree"
        if err:
            LOG_UI.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)
        varianter = args.avocado_variants
        try:
            varianter.parse(args)
        except (IOError, ValueError) as details:
            LOG_UI.error("Unable to parse varianter: %s", details)
            sys.exit(exit_codes.AVOCADO_FAIL)
        use_utf8 = settings.get_value("runner.output", "utf8",
                                      key_type=bool, default=None)
        summary = args.summary or 0
        variants = args.variants or 0

        # Parse obsolete options (unsafe to combine them with new args)
        if args.tree:
            variants = 0
            summary += 1
            if args.contents:
                summary += 1
            if args.inherit:
                summary += 2
        else:
            if args.contents:
                variants += 2

        # Export the serialized avocado_variants
        if args.json_variants_dump is not None:
            try:
                with open(args.json_variants_dump, 'w') as variants_file:
                    json.dump(args.avocado_variants.dump(), variants_file)
            except IOError:
                LOG_UI.error("Cannot write %s", args.json_variants_dump)
                sys.exit(exit_codes.AVOCADO_FAIL)

        # Produce the output
        lines = args.avocado_variants.to_str(summary=summary,
                                             variants=variants,
                                             use_utf8=use_utf8)
        for line in lines.splitlines():
            LOG_UI.debug(line)

        sys.exit(exit_codes.AVOCADO_ALL_OK)
Example #14
0
    def run(self, args):
        resultsdb_api_url = getattr(args, 'resultsdb_api', None)
        if resultsdb_api_url is None:
            resultsdb_api_url = settings.get_value('plugins.resultsdb',
                                                   'api_url',
                                                   default=None)
            if resultsdb_api_url is not None:
                args.resultsdb_api = resultsdb_api_url

        resultsdb_logs = getattr(args, 'resultsdb_logs', None)
        if resultsdb_logs is None:
            resultsdb_logs = settings.get_value('plugins.resultsdb',
                                                'logs_url',
                                                default=None)
            if resultsdb_logs is not None:
                args.resultsdb_logs = resultsdb_logs

        resultsdb_note_limit = getattr(args, 'resultsdb_note_limit', None)
        if resultsdb_note_limit is None:
            resultsdb_note_limit = settings.get_value('plugins.resultsdb',
                                                      'note_size_limit',
                                                      default=None)
            if resultsdb_note_limit is not None:
                args.resultsdb_note_limit = resultsdb_note_limit
Example #15
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        list_subcommand_parser = parser.subcommands.choices.get('list', None)
        if list_subcommand_parser is None:
            return

        vt_compat_group_lister = list_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - Lister options')
        vt_compat_group_lister.add_argument("--vt-type", action="store",
                                            dest="vt_type",
                                            help="Choose test type (%s). "
                                                 "Default: qemu" %
                                            ", ".join(SUPPORTED_TEST_TYPES),
                                            default='qemu')
        vt_compat_group_lister.add_argument("--vt-guest-os", action="store",
                                            dest="vt_guest_os",
                                            default=None,
                                            help=("Select the guest OS to be "
                                                  "used (different guests "
                                                  "support different test "
                                                  "lists). You can list "
                                                  "available guests "
                                                  "with --vt-list-guests. "
                                                  "Default: %s" %
                                                  defaults.DEFAULT_GUEST_OS))
        vt_compat_group_lister.add_argument("--vt-list-guests",
                                            action="store_true",
                                            default=False,
                                            help="List available guests")
        machine = settings.get_value('vt.common', 'machine_type',
                                     default=defaults.DEFAULT_MACHINE_TYPE)
        vt_compat_group_lister.add_argument("--vt-machine-type",
                                            help="Choose the VM machine type. "
                                            "Default: %s" % machine,
                                            default=machine)
        vt_compat_group_lister.add_argument("--vt-only-filter", action="store",
                                            dest="vt_only_filter", default="",
                                            help=("List of space separated "
                                                  "'only' filters to be passed"
                                                  " to the config parser. "
                                                  " Default: ''"))
Example #16
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        run_subcommand_parser = parser.subcommands.choices.get('run', None)
        if run_subcommand_parser is None:
            return

        qemu_nw_msg = "QEMU network option (%s). " % ", ".join(
            SUPPORTED_NET_TYPES)
        qemu_nw_msg += "Default: user"

        vt_compat_group_common = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - Common options')
        vt_compat_group_qemu = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - QEMU options')
        vt_compat_group_libvirt = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - Libvirt options')

        add_basic_vt_options(vt_compat_group_common)
        add_qemu_bin_vt_option(vt_compat_group_qemu)
        vt_compat_group_qemu.add_argument("--vt-extra-params", nargs='*',
                                          help="List of 'key=value' pairs "
                                          "passed to cartesian parser.")
        supported_uris = ", ".join(SUPPORTED_LIBVIRT_URIS)
        msg = ("Choose test connect uri for libvirt (E.g: %s). "
               "Current: %%(default)s" % supported_uris)
        uri_current = settings.get_value('vt.libvirt', 'connect_uri',
                                         default=None)
        vt_compat_group_libvirt.add_argument("--vt-connect-uri",
                                             action="store",
                                             dest="vt_connect_uri",
                                             default=uri_current,
                                             help=msg)
Example #17
0
    def run(self, args):
        if getattr(args, 'replay_jobid', None) is None:
            return

        log = logging.getLogger("avocado.app")

        err = None
        if args.replay_teststatus and 'variants' in args.replay_ignore:
            err = ("Option `--replay-test-status` is incompatible with "
                   "`--replay-ignore variants`.")
        elif args.replay_teststatus and args.reference:
            err = ("Option --replay-test-status is incompatible with "
                   "test references given on the command line.")
        elif args.remote_hostname:
            err = "Currently we don't replay jobs in remote hosts."
        if err is not None:
            log.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if getattr(args, 'logdir', None) is not None:
            logdir = args.logdir
        else:
            logdir = settings.get_value(section='datadir.paths',
                                        key='logs_dir', key_type='path',
                                        default=None)
        try:
            resultsdir = jobdata.get_resultsdir(logdir, args.replay_jobid)
        except ValueError as exception:
            log.error(exception.message)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        if resultsdir is None:
            log.error("Can't find job results directory in '%s'", logdir)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'),
                                   args.replay_jobid)
        if sourcejob is None:
            msg = ("Can't find matching job id '%s' in '%s' directory."
                   % (args.replay_jobid, resultsdir))
            log.error(msg)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)
        setattr(args, 'replay_sourcejob', sourcejob)

        replay_args = jobdata.retrieve_args(resultsdir)
        whitelist = ['loaders',
                     'external_runner',
                     'external_runner_testdir',
                     'external_runner_chdir',
                     'failfast']
        if replay_args is None:
            log.warn('Source job args data not found. These options will not '
                     'be loaded in this replay job: %s', ', '.join(whitelist))
        else:
            for option in whitelist:
                optvalue = getattr(args, option, None)
                if optvalue is not None:
                    log.warn("Overriding the replay %s with the --%s value "
                             "given on the command line.",
                             option.replace('_', '-'),
                             option.replace('_', '-'))
                else:
                    setattr(args, option, replay_args[option])

        # Keeping this for compatibility.
        # TODO: Use replay_args['reference'] at some point in the future.
        if getattr(args, 'reference', None):
            log.warn('Overriding the replay test references with test '
                     'references given in the command line.')
        else:
            references = jobdata.retrieve_references(resultsdir)
            if references is None:
                log.error('Source job test references data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                setattr(args, 'reference', references)

        if 'config' in args.replay_ignore:
            log.warn("Ignoring configuration from source job with "
                     "--replay-ignore.")
        else:
            self.load_config(resultsdir)

        if 'variants' in args.replay_ignore:
            log.warn("Ignoring variants from source job with "
                     "--replay-ignore.")
        else:
            variants = jobdata.retrieve_variants(resultsdir)
            if variants is None:
                log.error('Source job variants data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                # Ignore data manipulation. This is necessary, because
                # we replaced the unparsed object with parsed one. There
                # are other plugins running before/after this which might
                # want to alter the variants object.
                if (len(args.avocado_variants.data) or
                        args.avocado_variants.data.environment):
                    log.warning("Using src job Mux data only, use `--replay-"
                                "ignore variants` to override them.")
                setattr(args, "avocado_variants", variants)
                variants.ignore_new_data = True

        if args.replay_teststatus:
            replay_map = self._create_replay_map(resultsdir,
                                                 args.replay_teststatus)
            setattr(args, 'replay_map', replay_map)

        # Use the original directory to resolve test references properly
        pwd = jobdata.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                log.warn("Directory used in the replay source job '%s' does "
                         "not exist, using '.' instead", pwd)
Default values used in tests and plugin code.
"""

from avocado.core import data_dir
from avocado.core.settings import settings
from avocado.core.settings import SettingsError
from .qemu import path


#: The name or path of the QEMU binary.  Hardcoded default is 'qemu',
#: but will be overwritten by configuration value ("qemu_bin" under
#: section [virt.qemu.paths]) or by a dynamic (run time) search for a
#: suitable binary
QEMU_BIN = 'qemu'
try:
    QEMU_BIN = settings.get_value('virt.qemu.paths', 'qemu_bin')
except SettingsError:
    try:
        QEMU_BIN = path.get_qemu_binary()
    except path.QEMUCmdNotFoundError:
        pass

#: The name or path of the QEMU binary used for the destination
#: instance when doing migration.  Hardcoded default is 'qemu', but
#: will be overwritten by configuration value ("qemu_bin" under section
#: [virt.qemu.paths]) or by a dynamic (run time) search for a suitable
#: binary
QEMU_DST_BIN = 'qemu'
try:
    QEMU_DST_BIN = settings.get_value('virt.qemu.paths', 'qemu_dst_bin')
except SettingsError:
Example #19
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        def str_or_none(arg):
            if arg is None:
                return "Could not find one"
            else:
                return arg
        run_subcommand_parser = parser.subcommands.choices.get('run', None)
        if run_subcommand_parser is None:
            return

        try:
            qemu_bin_path = standalone_test.find_default_qemu_paths()[0]
        except (RuntimeError, utils_path.CmdNotFoundError):
            qemu_bin_path = None

        qemu_nw_msg = "QEMU network option (%s). " % ", ".join(
            SUPPORTED_NET_TYPES)
        qemu_nw_msg += "Default: user"

        vt_compat_group_setup = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - VM Setup options')
        vt_compat_group_common = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - Common options')
        vt_compat_group_qemu = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - QEMU options')
        vt_compat_group_libvirt = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - Libvirt options')

        vt_compat_group_common.add_argument("--vt-config", action="store",
                                            dest="vt_config",
                                            help=("Explicitly choose a "
                                                  "cartesian config. "
                                                  "When choosing this, "
                                                  "some options will be "
                                                  "ignored (see options "
                                                  "below)"))
        vt_compat_group_common.add_argument("--vt-type", action="store",
                                            dest="vt_type",
                                            help=("Choose test type (%s). "
                                                  "Default: qemu" %
                                                  ", ".join(
                                                      SUPPORTED_TEST_TYPES)),
                                            default='qemu')
        arch = settings.get_value('vt.common', 'arch', default=None)
        vt_compat_group_common.add_argument("--vt-arch",
                                            help="Choose the VM architecture. "
                                            "Default: %s" % arch,
                                            default=arch)
        machine = settings.get_value('vt.common', 'machine_type',
                                     default=defaults.DEFAULT_MACHINE_TYPE)
        vt_compat_group_common.add_argument("--vt-machine-type",
                                            help="Choose the VM machine type. "
                                            "Default: %s" % machine,
                                            default=machine)
        vt_compat_group_common.add_argument("--vt-guest-os", action="store",
                                            dest="vt_guest_os",
                                            default=defaults.DEFAULT_GUEST_OS,
                                            help=("Select the guest OS to "
                                                  "be used. If --vt-config is "
                                                  "provided, this will be "
                                                  "ignored. Default: %s" %
                                                  defaults.DEFAULT_GUEST_OS))
        vt_compat_group_common.add_argument("--vt-no-filter", action="store",
                                            dest="vt_no_filter", default="",
                                            help=("List of space separated "
                                                  "'no' filters to be passed "
                                                  "to the config parser. "
                                                  " Default: ''"))
        vt_compat_group_common.add_argument("--vt-only-filter", action="store",
                                            dest="vt_only_filter", default="",
                                            help=("List of space separated "
                                                  "'only' filters to be passed"
                                                  " to the config parser. "
                                                  " Default: ''"))
        qemu_bin = settings.get_value('vt.qemu', 'qemu_bin',
                                      default=qemu_bin_path)
        vt_compat_group_qemu.add_argument("--vt-qemu-bin", action="store",
                                          dest="vt_qemu_bin",
                                          default=qemu_bin,
                                          help=("Path to a custom qemu binary "
                                                "to be tested. If --vt-config "
                                                "is provided and this flag is "
                                                "omitted, no attempt to set "
                                                "the qemu binaries will be "
                                                "made. Current: %s" %
                                                str_or_none(qemu_bin)))
        qemu_dst = settings.get_value('vt.qemu', 'qemu_dst_bin',
                                      default=qemu_bin_path)
        vt_compat_group_qemu.add_argument("--vt-qemu-dst-bin", action="store",
                                          dest="vt_dst_qemu_bin",
                                          default=qemu_dst,
                                          help=("Path to a custom qemu binary "
                                                "to be tested for the "
                                                "destination of a migration, "
                                                "overrides --vt-qemu-bin. "
                                                "If --vt-config is provided "
                                                "and this flag is omitted, "
                                                "no attempt to set the qemu "
                                                "binaries will be made. "
                                                "Current: %s" %
                                                str_or_none(qemu_dst)))
        vt_compat_group_qemu.add_argument("--vt-extra-params", nargs='*',
                                          help="List of 'key=value' pairs "
                                          "passed to cartesian parser.")
        supported_uris = ", ".join(SUPPORTED_LIBVIRT_URIS)
        uri_current = settings.get_value('vt.libvirt', 'connect_uri',
                                         default=None)
        vt_compat_group_libvirt.add_argument("--vt-connect-uri",
                                             action="store",
                                             dest="vt_connect_uri",
                                             default=uri_current,
                                             help=("Choose test connect uri "
                                                   "for libvirt (E.g: %s). "
                                                   "Current: %s" %
                                                   (supported_uris,
                                                    uri_current)))
Example #20
0
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2014
# Author: Lucas Meneghel Rodrigues <*****@*****.**>

"""
Default values used in tests and plugin code.
"""

from avocado.core import data_dir
from avocado.core.settings import settings
from avocado.core.settings import SettingsError
from avocado.virt.qemu import path

try:
    qemu_bin = settings.get_value('virt.qemu.paths', 'qemu_bin')
except SettingsError:
    try:
        qemu_bin = path.get_qemu_binary()
    except path.QEMUCmdNotFoundError:
        qemu_bin = 'qemu'

try:
    qemu_dst = settings.get_value('virt.qemu.paths', 'qemu_dst_bin')
except SettingsError:
    try:
        qemu_dst = path.get_qemu_dst_binary()
    except path.QEMUCmdNotFoundError:
        qemu_dst = 'qemu'

try:
Example #21
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        parser = super(Run, self).configure(parser)

        parser.add_argument("reference",
                            type=str,
                            default=[],
                            nargs='*',
                            metavar="TEST_REFERENCE",
                            help='List of test references (aliases or paths)')

        parser.add_argument("-d",
                            "--dry-run",
                            action="store_true",
                            help="Instead of running the test only "
                            "list them and log their params.")

        parser.add_argument('--force-job-id',
                            dest='unique_job_id',
                            type=str,
                            default=None,
                            help='Forces the use of a particular job ID. Used '
                            'internally when interacting with an avocado '
                            'server. You should not use this option '
                            'unless you know exactly what you\'re doing')

        parser.add_argument('--job-results-dir',
                            action='store',
                            dest='base_logdir',
                            default=None,
                            metavar='DIRECTORY',
                            help=('Forces to use of an alternate job '
                                  'results directory.'))

        parser.add_argument('--job-timeout',
                            action='store',
                            default=None,
                            metavar='SECONDS',
                            help='Set the maximum amount of time (in SECONDS) '
                            'that tests are allowed to execute. '
                            'Values <= zero means "no timeout". '
                            'You can also use suffixes, like: '
                            ' s (seconds), m (minutes), h (hours). ')

        parser.add_argument('--failfast',
                            choices=('on', 'off'),
                            help='Enable or disable the job interruption on '
                            'first failed test.')

        parser.add_argument('--keep-tmp',
                            choices=('on', 'off'),
                            default='off',
                            help='Keep job temporary files '
                            '(useful for avocado debugging). Defaults to off.')

        parser.add_argument('--ignore-missing-references',
                            choices=('on', 'off'),
                            help="Force the job execution, even if some of "
                            "the test references are not resolved to tests.")

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        parser.add_argument('--sysinfo',
                            choices=('on', 'off'),
                            default=sysinfo_default,
                            help="Enable or disable "
                            "system information (hardware details, profilers, "
                            "etc.). Current:  %(default)s")

        parser.add_argument("--execution-order",
                            choices=("tests-per-variant", "variants-per-test"),
                            help="Defines the order of iterating through test "
                            "suite and test variants")

        parser.output = parser.add_argument_group('output and result format')

        parser.output.add_argument('-s',
                                   '--silent',
                                   action="store_true",
                                   default=argparse.SUPPRESS,
                                   help='Silence stdout')

        parser.output.add_argument('--show-job-log',
                                   action='store_true',
                                   default=False,
                                   help="Display only the job "
                                   "log on stdout. Useful for test debugging "
                                   "purposes. No output will be displayed if "
                                   "you also specify --silent")

        parser.output.add_argument("--store-logging-stream",
                                   nargs="*",
                                   default=[],
                                   metavar="STREAM[:LEVEL]",
                                   help="Store given logging STREAMs in "
                                   "$JOB_RESULTS_DIR/$STREAM.$LEVEL.")

        parser.output.add_argument("--log-test-data-directories",
                                   action="store_true",
                                   help="Logs the possible data directories "
                                   "for each test. This is helpful when "
                                   "writing new tests and not being sure "
                                   "where to put data files. Look for \""
                                   "Test data directories\" in your test log")

        out_check = parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'stdout', 'stderr', 'both',
                                        'combined', 'all'),
                               help="Record the output produced by each test "
                               "(from stdout and stderr) into both the "
                               "current executing result and into  "
                               "reference files.  Reference files are "
                               "used on subsequent runs to determine if "
                               "the test produced the expected output or "
                               "not, and the current executing result is "
                               "used to check against a previously "
                               "recorded reference file.  Valid values: "
                               "'none' (to explicitly disable all "
                               "recording) 'stdout' (to record standard "
                               "output *only*), 'stderr' (to record "
                               "standard error *only*), 'both' (to record"
                               " standard output and error in separate "
                               "files), 'combined' (for standard output "
                               "and error in a single file). 'all' is "
                               "also a valid but deprecated option that "
                               "is a synonym of 'both'.  This option "
                               "does not have a default value, but the "
                               "Avocado test runner will record the "
                               "test under execution in the most suitable"
                               " way unless it's explicitly disabled with"
                               " value 'none'")

        out_check.add_argument('--output-check',
                               choices=('on', 'off'),
                               default='on',
                               help="Enable or disable test output (stdout/"
                               "stderr) check. If this option is off, no "
                               "output will be checked, even if there are "
                               "reference files present for the test. "
                               "Current: on (output check enabled)")

        loader.add_loader_options(parser)

        filtering = parser.add_argument_group('filtering parameters')
        filtering.add_argument('-t',
                               '--filter-by-tags',
                               metavar='TAGS',
                               action='append',
                               help='Filter INSTRUMENTED tests based on '
                               '":avocado: tags=tag1,tag2" notation in '
                               'their class docstring')
        filtering.add_argument('--filter-by-tags-include-empty',
                               action='store_true',
                               default=False,
                               help=('Include all tests without tags during '
                                     'filtering. This effectively means they '
                                     'will be kept in the test suite found '
                                     'previously to filtering.'))
Example #22
0
    def run(self, args):
        if getattr(args, 'replay_jobid', None) is None:
            return

        view = output.View()

        if args.remote_hostname is not None:
            msg = "Currently we don't replay jobs in remote hosts."
            view.notify(event='error', msg=(msg))
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        if args.replay_datadir is not None:
            resultsdir = args.replay_datadir
        else:
            logs_dir = settings.get_value('datadir.paths', 'logs_dir',
                                          default=None)
            self.logdir = os.path.expanduser(logs_dir)
            resultsdir = replay.get_resultsdir(self.logdir, args.replay_jobid)

        if resultsdir is None:
            msg = "can't find job results directory in '%s'" % self.logdir
            view.notify(event='error', msg=(msg))
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        sourcejob = replay.get_id(os.path.join(resultsdir, 'id'),
                                  args.replay_jobid)
        if sourcejob is None:
            msg = "can't find matching job id '%s' in '%s' directory." % \
                  (args.replay_jobid, resultsdir)
            view.notify(event='error', msg=(msg))
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        setattr(args, 'replay_sourcejob', sourcejob)

        if getattr(args, 'url', None):
            msg = 'Overriding the replay urls with urls provided in '\
                  'command line.'
            view.notify(event='warning', msg=(msg))
        else:
            urls = replay.retrieve_urls(resultsdir)
            if urls is None:
                msg = 'Source job urls data not found. Aborting.'
                view.notify(event='error', msg=(msg))
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                setattr(args, 'url', urls)

        if args.replay_ignore and 'config' in args.replay_ignore:
            msg = "Ignoring configuration from source job with " \
                  "--replay-ignore."
            view.notify(event='warning', msg=(msg))
        else:
            self.load_config(resultsdir)

        if args.replay_ignore and 'mux' in args.replay_ignore:
            msg = "Ignoring multiplex from source job with --replay-ignore."
            view.notify(event='warning', msg=(msg))
        else:
            if getattr(args, 'multiplex_files', None) is not None:
                msg = 'Overriding the replay multiplex with '\
                      '--multiplex-file.'
                view.notify(event='warning', msg=(msg))
                # Use absolute paths to avoid problems with os.chdir
                args.multiplex_files = [os.path.abspath(_)
                                        for _ in args.multiplex_files]
            else:
                mux = replay.retrieve_mux(resultsdir)
                if mux is None:
                    msg = 'Source job multiplex data not found. Aborting.'
                    view.notify(event='error', msg=(msg))
                    sys.exit(exit_codes.AVOCADO_JOB_FAIL)
                else:
                    setattr(args, "multiplex_files", mux)

        if args.replay_teststatus:
            replay_map = replay.retrieve_replay_map(resultsdir,
                                                    args.replay_teststatus)
            setattr(args, 'replay_map', replay_map)

        # Use the original directory to discover test urls properly
        pwd = replay.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                view.notify(event="warning", msg="Directory used in the replay"
                            " source job '%s' does not exist, using '.' "
                            "instead" % pwd)
Example #23
0
    import imp
    setup_modules = imp.load_source('autotest_setup_modules',
                                    setup_modules_path)
    setup_modules.setup(base_path=client_dir,
                        root_module_name="autotest.client")

# The code below is used by this plugin to find the virt test directory,
# so that it can load the virttest python lib, used by the plugin code.
# If the user doesn't provide the proper configuration, the plugin will
# fail to load.
VIRT_TEST_PATH = None

if 'VIRT_TEST_PATH' in os.environ:
    VIRT_TEST_PATH = os.environ['VIRT_TEST_PATH']
else:
    VIRT_TEST_PATH = settings.get_value(section='virt_test',
                                        key='virt_test_path', default=None)

if VIRT_TEST_PATH is not None:
    sys.path.append(os.path.expanduser(VIRT_TEST_PATH))

from virttest.standalone_test import SUPPORTED_TEST_TYPES
from virttest.defaults import DEFAULT_GUEST_OS
from virttest import data_dir


_PROVIDERS_DOWNLOAD_DIR = os.path.join(data_dir.get_test_providers_dir(),
                                       'downloads')

try:
    assert len(os.listdir(_PROVIDERS_DOWNLOAD_DIR)) != 0
except (OSError, AssertionError):
Example #24
0
def add_basic_vt_options(parser):
    """
    Add basic vt options to parser
    """
    parser.add_argument("--vt-config",
                        action="store",
                        dest="vt_config",
                        help="Explicitly choose a cartesian config. When "
                        "choosing this, some options will be ignored (see "
                        "options below)")
    msg = ("Choose test type (%s). Default: %%(default)s" %
           ", ".join(SUPPORTED_TEST_TYPES))
    parser.add_argument("--vt-type",
                        action="store",
                        dest="vt_type",
                        help=msg,
                        default='qemu')
    arch = settings.get_value('vt.common', 'arch', default=None)
    parser.add_argument("--vt-arch",
                        help="Choose the VM architecture. "
                        "Default: %(default)s",
                        default=arch)
    machine = settings.get_value('vt.common',
                                 'machine_type',
                                 default=defaults.DEFAULT_MACHINE_TYPE)
    parser.add_argument("--vt-machine-type",
                        help="Choose the VM machine type."
                        " Default: %(default)s",
                        default=machine)
    parser.add_argument("--vt-guest-os",
                        action="store",
                        dest="vt_guest_os",
                        default=defaults.DEFAULT_GUEST_OS,
                        help="Select the guest OS to be used. If --vt-config "
                        "is provided, this will be ignored. Default: "
                        "%(default)s")
    parser.add_argument("--vt-no-filter",
                        action="store",
                        dest="vt_no_filter",
                        default="",
                        help="List of space separated 'no' filters"
                        " to be passed to the config parser.  Default: "
                        "'%(default)s'")
    parser.add_argument("--vt-only-filter",
                        action="store",
                        dest="vt_only_filter",
                        default="",
                        help="List of space"
                        " separated 'only' filters to be passed to the config "
                        "parser.  Default: '%(default)s'")
    parser.add_argument("--vt-filter-default-filters",
                        nargs='+',
                        help="Allows to selectively skip certain default "
                        "filters. This uses directly 'tests-shared.cfg' and "
                        "instead of '$provider/tests.cfg' and applies "
                        "following lists of default filters, unless they are "
                        "specified as arguments: no_9p_export,no_virtio_rng,"
                        "no_pci_assignable,smallpages,default_bios,bridge,"
                        "image_backend,multihost. This can be used to eg. "
                        "run hugepages tests by filtering 'smallpages' via "
                        "this option.")
Example #25
0
 def get_settings_value(section, key, **kwargs):
     return settings.get_value(section, key, **kwargs)
Example #26
0
 def set_opt_from_settings(opt, section, key, **kwargs):
     """Sets option default value from the configuration file."""
     value = settings.get_value(section, key, **kwargs)
     namespace = '%s.%s' % (section, key)
     set_opt(opt, namespace, value)
Example #27
0
    def run(self, args):
        if getattr(args, 'replay_jobid', None) is None:
            return

        log = logging.getLogger("avocado.app")

        err = None
        if args.replay_teststatus and 'mux' in args.replay_ignore:
            err = ("Option `--replay-test-status` is incompatible with "
                   "`--replay-ignore mux`.")
        elif args.replay_teststatus and args.reference:
            err = ("Option --replay-test-status is incompatible with "
                   "test references given on the command line.")
        elif args.remote_hostname:
            err = "Currently we don't replay jobs in remote hosts."
        if err is not None:
            log.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if getattr(args, 'logdir', None) is not None:
            logdir = args.logdir
        else:
            logdir = settings.get_value(section='datadir.paths',
                                        key='logs_dir', key_type='path',
                                        default=None)
        try:
            resultsdir = jobdata.get_resultsdir(logdir, args.replay_jobid)
        except ValueError as exception:
            log.error(exception.message)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        if resultsdir is None:
            log.error("Can't find job results directory in '%s'", logdir)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'),
                                   args.replay_jobid)
        if sourcejob is None:
            msg = ("Can't find matching job id '%s' in '%s' directory."
                   % (args.replay_jobid, resultsdir))
            log.error(msg)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)
        setattr(args, 'replay_sourcejob', sourcejob)

        replay_args = jobdata.retrieve_args(resultsdir)
        whitelist = ['loaders',
                     'external_runner',
                     'external_runner_testdir',
                     'external_runner_chdir',
                     'failfast']
        if replay_args is None:
            log.warn('Source job args data not found. These options will not '
                     'be loaded in this replay job: %s', ', '.join(whitelist))
        else:
            for option in whitelist:
                optvalue = getattr(args, option, None)
                if optvalue is not None:
                    log.warn("Overriding the replay %s with the --%s value "
                             "given on the command line.",
                             option.replace('_', '-'),
                             option.replace('_', '-'))
                else:
                    setattr(args, option, replay_args[option])

        # Keeping this for compatibility.
        # TODO: Use replay_args['reference'] at some point in the future.
        if getattr(args, 'reference', None):
            log.warn('Overriding the replay test references with test '
                     'references given in the command line.')
        else:
            references = jobdata.retrieve_references(resultsdir)
            if references is None:
                log.error('Source job test references data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                setattr(args, 'reference', references)

        if 'config' in args.replay_ignore:
            log.warn("Ignoring configuration from source job with "
                     "--replay-ignore.")
        else:
            self.load_config(resultsdir)

        if 'mux' in args.replay_ignore:
            log.warn("Ignoring multiplex from source job with "
                     "--replay-ignore.")
        else:
            mux = jobdata.retrieve_mux(resultsdir)
            if mux is None:
                log.error('Source job multiplex data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                # Ignore data manipulation. This is necessary, because
                # we replaced the unparsed object with parsed one. There
                # are other plugins running before/after this which might
                # want to alter the mux object.
                if len(args.mux.data) or args.mux.data.environment:
                    log.warning("Using src job Mux data only, use `--replay-"
                                "ignore mux` to override them.")
                setattr(args, "mux", mux)
                mux.data_merge = ignore_call
                mux.data_inject = ignore_call

        if args.replay_teststatus:
            replay_map = self._create_replay_map(resultsdir,
                                                 args.replay_teststatus)
            setattr(args, 'replay_map', replay_map)

        # Use the original directory to resolve test references properly
        pwd = jobdata.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                log.warn("Directory used in the replay source job '%s' does "
                         "not exist, using '.' instead", pwd)
Example #28
0
    def run_test(self, job, result, test_factory, queue, summary, job_deadline=0):
        """
        Run a test instance inside a subprocess.

        :param test_factory: Test factory (test class and parameters).
        :type test_factory: tuple of :class:`avocado.core.test.Test` and dict.
        :param queue: Multiprocess queue.
        :type queue: :class`multiprocessing.Queue` instance.
        :param summary: Contains types of test failures.
        :type summary: set.
        :param job_deadline: Maximum time to execute.
        :type job_deadline: int.
        """
        proc = None
        sigtstp = multiprocessing.Lock()

        def sigtstp_handler(signum, frame):     # pylint: disable=W0613
            """ SIGSTOP all test processes on SIGTSTP """
            if not proc:    # Ignore ctrl+z when proc not yet started
                return
            with sigtstp:
                msg = "ctrl+z pressed, %%s test (%s)" % proc.pid
                app_log_msg = '\n%s' % msg
                if self.sigstopped:
                    APP_LOG.info(app_log_msg, "resumming")
                    TEST_LOG.info(msg, "resumming")
                    process.kill_process_tree(proc.pid, signal.SIGCONT, False)
                    self.sigstopped = False
                else:
                    APP_LOG.info(app_log_msg, "stopping")
                    TEST_LOG.info(msg, "stopping")
                    process.kill_process_tree(proc.pid, signal.SIGSTOP, False)
                    self.sigstopped = True

        proc = multiprocessing.Process(target=self._run_test,
                                       args=(job, result, test_factory, queue,))
        test_status = TestStatus(job, queue)

        cycle_timeout = 1
        time_started = time.time()
        signal.signal(signal.SIGTSTP, signal.SIG_IGN)
        proc.start()
        signal.signal(signal.SIGTSTP, sigtstp_handler)
        test_status.wait_for_early_status(proc, 60)

        # At this point, the test is already initialized and we know
        # for sure if there's a timeout set.
        timeout = test_status.early_status.get('timeout')
        timeout = float(timeout or self.DEFAULT_TIMEOUT)

        test_deadline = time_started + timeout
        if job_deadline is not None and job_deadline > 0:
            deadline = min(test_deadline, job_deadline)
        else:
            deadline = test_deadline

        ctrl_c_count = 0
        ignore_window = 2.0
        ignore_time_started = time.time()
        stage_1_msg_displayed = False
        stage_2_msg_displayed = False
        first = 0.01
        step = 0.01
        abort_reason = None
        result_dispatcher = job.result_events_dispatcher

        while True:
            try:
                if time.time() >= deadline:
                    abort_reason = "Timeout reached"
                    try:
                        os.kill(proc.pid, signal.SIGTERM)
                    except OSError:
                        pass
                    break
                wait.wait_for(lambda: not queue.empty() or not proc.is_alive(),
                              cycle_timeout, first, step)
                if test_status.interrupt:
                    break
                if proc.is_alive():
                    if ctrl_c_count == 0:
                        if (test_status.status.get('running') or
                                self.sigstopped):
                            result_dispatcher.map_method('test_progress',
                                                         False)
                        else:
                            result_dispatcher.map_method('test_progress', True)
                else:
                    break
            except KeyboardInterrupt:
                time_elapsed = time.time() - ignore_time_started
                ctrl_c_count += 1
                if ctrl_c_count == 1:
                    if not stage_1_msg_displayed:
                        abort_reason = "Interrupted by ctrl+c"
                        job.log.debug("\nInterrupt requested. Waiting %d "
                                      "seconds for test to finish "
                                      "(ignoring new Ctrl+C until then)",
                                      ignore_window)
                        stage_1_msg_displayed = True
                    ignore_time_started = time.time()
                    process.kill_process_tree(proc.pid, signal.SIGINT)
                if (ctrl_c_count > 1) and (time_elapsed > ignore_window):
                    if not stage_2_msg_displayed:
                        abort_reason = "Interrupted by ctrl+c (multiple-times)"
                        job.log.debug("Killing test subprocess %s",
                                      proc.pid)
                        stage_2_msg_displayed = True
                    process.kill_process_tree(proc.pid, signal.SIGKILL)

        # Get/update the test status (decrease timeout on abort)
        if abort_reason:
            finish_deadline = time.time() + settings.get_value(
                'runner.timeout',
                'after_interrupted',
                key_type=int,
                default=defaults.TIMEOUT_AFTER_INTERRUPTED)
        else:
            finish_deadline = deadline
        test_state = test_status.finish(proc, time_started, step,
                                        finish_deadline,
                                        result_dispatcher)

        # Try to log the timeout reason to test's results and update test_state
        if abort_reason:
            test_state = add_runner_failure(test_state, "INTERRUPTED",
                                            abort_reason)

        # don't process other tests from the list
        if ctrl_c_count > 0:
            job.log.debug('')

        # Make sure the test status is correct
        if test_state.get('status') not in status.user_facing_status:
            test_state = add_runner_failure(test_state, "ERROR", "Test reports"
                                            " unsupported test status.")

        result.check_test(test_state)
        result_dispatcher.map_method('end_test', result, test_state)
        if test_state['status'] == "INTERRUPTED":
            summary.add("INTERRUPTED")
        elif not mapping[test_state['status']]:
            summary.add("FAIL")

            if job.config.get('failfast', 'off') == 'on':
                summary.add("INTERRUPTED")
                job.interrupted_reason = "Interrupting job (failfast)."
                return False

        if ctrl_c_count > 0:
            return False
        return True
Example #29
0
    def __init__(self, options):
        """
        Parses options and initializes attributes.
        """
        self.options = options
        # There are a few options from the original virt-test runner
        # that don't quite make sense for avocado (avocado implements a
        # better version of the virt-test feature).
        # So let's just inject some values into options.
        self.options.vt_verbose = False
        self.options.vt_log_level = logging.DEBUG
        self.options.vt_console_level = logging.DEBUG
        self.options.vt_no_downloads = False
        self.options.vt_selinux_setup = False

        # Here we'll inject values from the config file.
        # Doing this makes things configurable yet the number of options
        # is not overwhelming.
        # setup section
        self.options.vt_backup_image_before_test = settings.get_value(
            'vt.setup',
            'backup_image_before_test',
            key_type=bool,
            default=True)
        self.options.vt_restore_image_after_test = settings.get_value(
            'vt.setup',
            'restore_image_after_test',
            key_type=bool,
            default=True)
        self.options.vt_keep_guest_running = settings.get_value(
            'vt.setup', 'keep_guest_running', key_type=bool, default=False)
        # common section
        self.options.vt_data_dir = settings.get_value('vt.common',
                                                      'data_dir',
                                                      default=None)
        self.options.vt_type_specific = settings.get_value(
            'vt.common', 'type_specific_only', key_type=bool, default=False)
        self.options.vt_mem = settings.get_value('vt.common',
                                                 'mem',
                                                 key_type=int,
                                                 default=1024)
        self.options.vt_nettype = settings.get_value('vt.common',
                                                     'nettype',
                                                     default=None)
        self.options.vt_netdst = settings.get_value('vt.common',
                                                    'netdst',
                                                    default='virbr0')
        # qemu section
        self.options.vt_accel = settings.get_value('vt.qemu',
                                                   'accel',
                                                   default='kvm')
        self.options.vt_vhost = settings.get_value('vt.qemu',
                                                   'vhost',
                                                   default='off')
        self.options.vt_monitor = settings.get_value('vt.qemu',
                                                     'monitor',
                                                     default='human')
        self.options.vt_smp = settings.get_value('vt.qemu', 'smp', default='2')
        self.options.vt_image_type = settings.get_value('vt.qemu',
                                                        'image_type',
                                                        default='qcow2')
        self.options.vt_nic_model = settings.get_value('vt.qemu',
                                                       'nic_model',
                                                       default='virtio_net')
        self.options.vt_disk_bus = settings.get_value('vt.qemu',
                                                      'disk_bus',
                                                      default='virtio_blk')
        self.options.vt_qemu_sandbox = settings.get_value('vt.qemu',
                                                          'sandbox',
                                                          default='on')
        self.options.vt_qemu_defconfig = settings.get_value('vt.qemu',
                                                            'defconfig',
                                                            default='yes')
        self.options.vt_malloc_perturb = settings.get_value('vt.qemu',
                                                            'malloc_perturb',
                                                            default='yes')

        # debug section
        self.options.vt_no_cleanup = settings.get_value('vt.debug',
                                                        'no_cleanup',
                                                        key_type=bool,
                                                        default=False)

        self.cartesian_parser = None
Example #30
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        parser = super(Run, self).configure(parser)

        parser.add_argument("reference",
                            type=str,
                            default=[],
                            nargs='*',
                            metavar="TEST_REFERENCE",
                            help='List of test references (aliases or paths)')

        parser.add_argument("-d",
                            "--dry-run",
                            action="store_true",
                            help="Instead of running the test only "
                            "list them and log their params.")

        parser.add_argument('--force-job-id',
                            dest='unique_job_id',
                            type=str,
                            default=None,
                            help='Forces the use of a particular job ID. Used '
                            'internally when interacting with an avocado '
                            'server. You should not use this option '
                            'unless you know exactly what you\'re doing')

        parser.add_argument('--job-results-dir',
                            action='store',
                            dest='logdir',
                            default=None,
                            metavar='DIRECTORY',
                            help=('Forces to use of an alternate job '
                                  'results directory.'))

        parser.add_argument('--job-timeout',
                            action='store',
                            default=None,
                            metavar='SECONDS',
                            help='Set the maximum amount of time (in SECONDS) '
                            'that tests are allowed to execute. '
                            'Values <= zero means "no timeout". '
                            'You can also use suffixes, like: '
                            ' s (seconds), m (minutes), h (hours). ')

        parser.add_argument('--failfast',
                            choices=('on', 'off'),
                            help='Enable or disable the job interruption on '
                            'first failed test.')

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        parser.add_argument('--sysinfo',
                            choices=('on', 'off'),
                            default=sysinfo_default,
                            help="Enable or disable "
                            "system information (hardware details, profilers, "
                            "etc.). Current:  %(default)s")

        parser.output = parser.add_argument_group('output and result format')

        parser.output.add_argument('-s',
                                   '--silent',
                                   action="store_true",
                                   default=argparse.SUPPRESS,
                                   help='Silence stdout')

        parser.output.add_argument('--show-job-log',
                                   action='store_true',
                                   default=False,
                                   help="Display only the job "
                                   "log on stdout. Useful for test debugging "
                                   "purposes. No output will be displayed if "
                                   "you also specify --silent")

        parser.output.add_argument("--store-logging-stream",
                                   nargs="*",
                                   default=[],
                                   metavar="STREAM[:LEVEL]",
                                   help="Store given logging STREAMs in "
                                   "$JOB_RESULTS_DIR/$STREAM.$LEVEL.")

        out_check = parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'all', 'stdout', 'stderr'),
                               default='none',
                               help="Record output streams of your tests "
                               "to reference files (valid options: none (do "
                               "not record output streams), all (record both "
                               "stdout and stderr), stdout (record only "
                               "stderr), stderr (record only stderr). "
                               'Current: %(default)s')

        out_check.add_argument('--output-check',
                               choices=('on', 'off'),
                               default='on',
                               help="Enable or disable test output (stdout/"
                               "stderr) check. If this option is off, no "
                               "output will be checked, even if there are "
                               "reference files present for the test. "
                               "Current: on (output check enabled)")

        loader.add_loader_options(parser)

        mux = parser.add_argument_group('test parameters')
        mux.add_argument('--filter-only',
                         nargs='*',
                         default=[],
                         help='Filter only path(s) from multiplexing')
        mux.add_argument('--filter-out',
                         nargs='*',
                         default=[],
                         help='Filter out path(s) from multiplexing')
        mux.add_argument('--mux-path',
                         nargs='*',
                         default=None,
                         help="List of paths used to determine path "
                         "priority when querying for parameters")
        mux.add_argument('--mux-inject',
                         default=[],
                         nargs='*',
                         help="Inject [path:]key:node values into the "
                         "final multiplex tree.")
Example #31
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        parser = super(Run, self).configure(parser)

        parser.add_argument('url', type=str, default=[], nargs='*',
                            help='List of test IDs (aliases or paths)')

        parser.add_argument("-d", "--dry-run", action="store_true",
                            help="Instead of running the test only "
                            "list them and log their params.")

        parser.add_argument('-z', '--archive', action='store_true', default=False,
                            help='Archive (ZIP) files generated by tests')

        parser.add_argument('--force-job-id', dest='unique_job_id',
                            type=str, default=None,
                            help=('Forces the use of a particular job ID. Used '
                                  'internally when interacting with an avocado '
                                  'server. You should not use this option '
                                  'unless you know exactly what you\'re doing'))

        parser.add_argument('--job-results-dir', action='store',
                            dest='logdir', default=None, metavar='DIRECTORY',
                            help=('Forces to use of an alternate job '
                                  'results directory.'))

        parser.add_argument('--job-timeout', action='store',
                            default=None, metavar='SECONDS',
                            help=('Set the maximum amount of time (in SECONDS) that '
                                  'tests are allowed to execute. '
                                  'Note that zero means "no timeout". '
                                  'You can also use suffixes, like: '
                                  ' s (seconds), m (minutes), h (hours). '))

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        parser.add_argument('--sysinfo', choices=('on', 'off'), default=sysinfo_default,
                            help=('Enable or disable system information '
                                  '(hardware details, profilers, etc.). '
                                  'Current:  %(default)s'))

        parser.output = parser.add_argument_group('output and result format')

        parser.output.add_argument(
            '-s', '--silent', action="store_true", default=argparse.SUPPRESS,
            help='Silence stdout')

        parser.output.add_argument(
            '--show-job-log', action='store_true', default=False,
            help=('Display only the job log on stdout. Useful '
                  'for test debugging purposes. No output will '
                  'be displayed if you also specify --silent'))

        parser.output.add_argument("--store-logging-stream", nargs="*",
                                   default=[], metavar="STREAM[:LEVEL]",
                                   help="Store given logging STREAMs in "
                                   "$JOB_RESULTS_DIR/$STREAM.$LEVEL.")

        out_check = parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'all', 'stdout', 'stderr'),
                               default='none',
                               help=('Record output streams of your tests '
                                     'to reference files (valid options: '
                                     'none (do not record output streams), '
                                     'all (record both stdout and stderr), '
                                     'stdout (record only stderr), '
                                     'stderr (record only stderr). '
                                     'Current: %(default)s'))

        out_check.add_argument('--output-check', choices=('on', 'off'),
                               default='on',
                               help=('Enable or disable test output (stdout/stderr) check. '
                                     'If this option is off, no output will '
                                     'be checked, even if there are reference files '
                                     'present for the test. '
                                     'Current: on (output check enabled)'))

        loader.add_loader_options(parser)

        if multiplexer.MULTIPLEX_CAPABLE:
            mux = parser.add_argument_group('multiplexer use on test execution')
            mux.add_argument('-m', '--multiplex', nargs='*', dest='multiplex_files',
                             default=None, metavar='FILE',
                             help='Location of one or more Avocado multiplex (.yaml) '
                             'FILE(s) (order dependent)')
            mux.add_argument('--multiplex-files', nargs='*',
                             default=None, metavar='FILE',
                             help='DEPRECATED: please use --multiplex instead')
            mux.add_argument('--filter-only', nargs='*', default=[],
                             help='Filter only path(s) from multiplexing')
            mux.add_argument('--filter-out', nargs='*', default=[],
                             help='Filter out path(s) from multiplexing')
            mux.add_argument('--mux-path', nargs='*', default=None,
                             help="List of paths used to determine path "
                             "priority when querying for parameters")
            mux.add_argument('--mux-inject', default=[], nargs='*',
                             help="Inject [path:]key:node values into the "
                             "final multiplex tree.")
Example #32
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        def str_or_none(arg):
            if arg is None:
                return "Could not find one"
            else:
                return arg

        run_subcommand_parser = parser.subcommands.choices.get('run', None)
        if run_subcommand_parser is None:
            return

        try:
            qemu_bin_path = standalone_test.find_default_qemu_paths()[0]
        except (RuntimeError, utils_path.CmdNotFoundError):
            qemu_bin_path = None

        qemu_nw_msg = "QEMU network option (%s). " % ", ".join(
            SUPPORTED_NET_TYPES)
        qemu_nw_msg += "Default: user"

        vt_compat_group_setup = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - VM Setup options')
        vt_compat_group_common = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - Common options')
        vt_compat_group_qemu = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - QEMU options')
        vt_compat_group_libvirt = run_subcommand_parser.add_argument_group(
            'Virt-Test compat layer - Libvirt options')

        vt_compat_group_common.add_argument("--vt-config",
                                            action="store",
                                            dest="vt_config",
                                            help=("Explicitly choose a "
                                                  "cartesian config. "
                                                  "When choosing this, "
                                                  "some options will be "
                                                  "ignored (see options "
                                                  "below)"))
        vt_compat_group_common.add_argument(
            "--vt-type",
            action="store",
            dest="vt_type",
            help=("Choose test type (%s). "
                  "Default: qemu" % ", ".join(SUPPORTED_TEST_TYPES)),
            default='qemu')
        arch = settings.get_value('vt.common', 'arch', default=None)
        vt_compat_group_common.add_argument("--vt-arch",
                                            help="Choose the VM architecture. "
                                            "Default: %s" % arch,
                                            default=arch)
        machine = settings.get_value('vt.common',
                                     'machine_type',
                                     default=defaults.DEFAULT_MACHINE_TYPE)
        vt_compat_group_common.add_argument("--vt-machine-type",
                                            help="Choose the VM machine type. "
                                            "Default: %s" % machine,
                                            default=machine)
        vt_compat_group_common.add_argument("--vt-guest-os",
                                            action="store",
                                            dest="vt_guest_os",
                                            default=defaults.DEFAULT_GUEST_OS,
                                            help=("Select the guest OS to "
                                                  "be used. If --vt-config is "
                                                  "provided, this will be "
                                                  "ignored. Default: %s" %
                                                  defaults.DEFAULT_GUEST_OS))
        vt_compat_group_common.add_argument("--vt-no-filter",
                                            action="store",
                                            dest="vt_no_filter",
                                            default="",
                                            help=("List of space separated "
                                                  "'no' filters to be passed "
                                                  "to the config parser. "
                                                  "If --vt-config is "
                                                  "provided, this will be "
                                                  "ignored. Default: ''"))
        qemu_bin = settings.get_value('vt.qemu',
                                      'qemu_bin',
                                      default=qemu_bin_path)
        vt_compat_group_qemu.add_argument("--vt-qemu-bin",
                                          action="store",
                                          dest="vt_qemu_bin",
                                          default=qemu_bin,
                                          help=("Path to a custom qemu binary "
                                                "to be tested. If --vt-config "
                                                "is provided and this flag is "
                                                "omitted, no attempt to set "
                                                "the qemu binaries will be "
                                                "made. Current: %s" %
                                                str_or_none(qemu_bin)))
        qemu_dst = settings.get_value('vt.qemu',
                                      'qemu_dst_bin',
                                      default=qemu_bin_path)
        vt_compat_group_qemu.add_argument("--vt-qemu-dst-bin",
                                          action="store",
                                          dest="vt_dst_qemu_bin",
                                          default=qemu_dst,
                                          help=("Path to a custom qemu binary "
                                                "to be tested for the "
                                                "destination of a migration, "
                                                "overrides --vt-qemu-bin. "
                                                "If --vt-config is provided "
                                                "and this flag is omitted, "
                                                "no attempt to set the qemu "
                                                "binaries will be made. "
                                                "Current: %s" %
                                                str_or_none(qemu_dst)))
        vt_compat_group_qemu.add_argument("--vt-extra-params",
                                          nargs='*',
                                          help="List of 'key=value' pairs "
                                          "passed to cartesian parser.")
        supported_uris = ", ".join(SUPPORTED_LIBVIRT_URIS)
        uri_current = settings.get_value('vt.libvirt',
                                         'connect_uri',
                                         default=None)
        vt_compat_group_libvirt.add_argument(
            "--vt-connect-uri",
            action="store",
            dest="vt_connect_uri",
            default=uri_current,
            help=("Choose test connect uri "
                  "for libvirt (E.g: %s). "
                  "Current: %s" % (supported_uris, uri_current)))
Example #33
0
 def post(self, job):
     path = settings.get_value(section=CONFIG_SECTION,
                               key="post", key_type='path',
                               default="/etc/avocado/scripts/job/post.d/")
     self._run_scripts('post', path, job)
Example #34
0
    def __init__(self, options):
        """
        Parses options and initializes attributes.
        """
        self.options = options
        # There are a few options from the original virt-test runner
        # that don't quite make sense for avocado (avocado implements a
        # better version of the virt-test feature).
        # So let's just inject some values into options.
        self.options.vt_verbose = False
        self.options.vt_log_level = logging.DEBUG
        self.options.vt_console_level = logging.DEBUG
        self.options.vt_no_downloads = False
        self.options.vt_selinux_setup = False

        # Here we'll inject values from the config file.
        # Doing this makes things configurable yet the number of options
        # is not overwhelming.
        # setup section
        self.options.vt_backup_image_before_test = settings.get_value(
            'vt.setup', 'backup_image_before_test', key_type=bool,
            default=True)
        self.options.vt_restore_image_after_test = settings.get_value(
            'vt.setup', 'restore_image_after_test', key_type=bool,
            default=True)
        self.options.vt_keep_guest_running = settings.get_value(
            'vt.setup', 'keep_guest_running', key_type=bool,
            default=False)
        # common section
        self.options.vt_data_dir = settings.get_value(
            'vt.common', 'data_dir', default=None)
        self.options.vt_type_specific = settings.get_value(
            'vt.common', 'type_specific_only', key_type=bool,
            default=False)
        self.options.vt_mem = settings.get_value(
            'vt.common', 'mem', key_type=int,
            default=1024)
        self.options.vt_nettype = settings.get_value(
            'vt.common', 'nettype', default=None)
        self.options.vt_netdst = settings.get_value(
            'vt.common', 'netdst', default='virbr0')
        # qemu section
        self.options.vt_accel = settings.get_value(
            'vt.qemu', 'accel', default='kvm')
        self.options.vt_vhost = settings.get_value(
            'vt.qemu', 'vhost', default='off')
        self.options.vt_monitor = settings.get_value(
            'vt.qemu', 'monitor', default='human')
        self.options.vt_smp = settings.get_value(
            'vt.qemu', 'smp', default='2')
        self.options.vt_image_type = settings.get_value(
            'vt.qemu', 'image_type', default='qcow2')
        self.options.vt_nic_model = settings.get_value(
            'vt.qemu', 'nic_model', default='virtio_net')
        self.options.vt_disk_bus = settings.get_value(
            'vt.qemu', 'disk_bus', default='virtio_blk')
        self.options.vt_qemu_sandbox = settings.get_value(
            'vt.qemu', 'sandbox', default='on')
        self.options.vt_qemu_defconfig = settings.get_value(
            'vt.qemu', 'defconfig', default='yes')
        self.options.vt_malloc_perturb = settings.get_value(
            'vt.qemu', 'malloc_perturb', default='yes')

        # debug section
        self.options.vt_no_cleanup = settings.get_value(
            'vt.debug', 'no_cleanup', key_type=bool, default=False)

        self.cartesian_parser = None
Example #35
0
    def run(self, args):
        if getattr(args, 'replay_jobid', None) is None:
            return

        err = None
        if args.replay_teststatus and 'variants' in args.replay_ignore:
            err = ("Option `--replay-test-status` is incompatible with "
                   "`--replay-ignore variants`.")
        elif args.replay_teststatus and args.reference:
            err = ("Option --replay-test-status is incompatible with "
                   "test references given on the command line.")
        elif getattr(args, "remote_hostname", False):
            err = "Currently we don't replay jobs in remote hosts."
        if err is not None:
            LOG_UI.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if getattr(args, 'logdir', None) is not None:
            logdir = args.logdir
        else:
            logdir = settings.get_value(section='datadir.paths',
                                        key='logs_dir', key_type='path',
                                        default=None)
        try:
            resultsdir = jobdata.get_resultsdir(logdir, args.replay_jobid)
        except ValueError as exception:
            LOG_UI.error(exception.message)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if resultsdir is None:
            LOG_UI.error("Can't find job results directory in '%s'", logdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'),
                                   args.replay_jobid)
        if sourcejob is None:
            msg = ("Can't find matching job id '%s' in '%s' directory."
                   % (args.replay_jobid, resultsdir))
            LOG_UI.error(msg)
            sys.exit(exit_codes.AVOCADO_FAIL)
        setattr(args, 'replay_sourcejob', sourcejob)

        replay_args = jobdata.retrieve_args(resultsdir)
        whitelist = ['loaders',
                     'external_runner',
                     'external_runner_testdir',
                     'external_runner_chdir',
                     'failfast',
                     'ignore_missing_references',
                     'execution_order']
        if replay_args is None:
            LOG_UI.warn('Source job args data not found. These options will '
                        'not be loaded in this replay job: %s',
                        ', '.join(whitelist))
        else:
            for option in whitelist:
                optvalue = getattr(args, option, None)
                if optvalue is not None:
                    LOG_UI.warn("Overriding the replay %s with the --%s value "
                                "given on the command line.",
                                option.replace('_', '-'),
                                option.replace('_', '-'))
                elif option in replay_args:
                    setattr(args, option, replay_args[option])

        # Keeping this for compatibility.
        # TODO: Use replay_args['reference'] at some point in the future.
        if getattr(args, 'reference', None):
            LOG_UI.warn('Overriding the replay test references with test '
                        'references given in the command line.')
        else:
            references = jobdata.retrieve_references(resultsdir)
            if references is None:
                LOG_UI.error('Source job test references data not found. '
                             'Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                setattr(args, 'reference', references)

        if 'config' in args.replay_ignore:
            LOG_UI.warn("Ignoring configuration from source job with "
                        "--replay-ignore.")
        else:
            self.load_config(resultsdir)

        if 'variants' in args.replay_ignore:
            LOG_UI.warn("Ignoring variants from source job with "
                        "--replay-ignore.")
        else:
            variants = jobdata.retrieve_variants(resultsdir)
            if variants is None:
                LOG_UI.error('Source job variants data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                LOG_UI.warning("Using src job Mux data only, use "
                               "`--replay-ignore variants` to override "
                               "them.")
                setattr(args, "avocado_variants", variants)

        # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume
        # supplied.
        if args.replay_resume:
            if not args.replay_teststatus:
                args.replay_teststatus = ["INTERRUPTED"]
            elif "INTERRUPTED" not in args.replay_teststatus:
                args.replay_teststatus.append("INTERRUPTED")
        if args.replay_teststatus:
            replay_map = self._create_replay_map(resultsdir,
                                                 args.replay_teststatus)
            setattr(args, 'replay_map', replay_map)

        # Use the original directory to resolve test references properly
        pwd = jobdata.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                LOG_UI.warn("Directory used in the replay source job '%s' does"
                            " not exist, using '.' instead", pwd)
Example #36
0
#
# $ python avocado-get-job-results-dir.py <job_id>
#

import sys

from avocado.core import jobdata
from avocado.core.settings import settings

if __name__ == '__main__':
    if len(sys.argv) < 2:
        sys.stderr.write("Please inform the Job ID.\n")
        sys.exit(-1)

    logdir = settings.get_value(section='datadir.paths',
                                key='logs_dir',
                                key_type='path',
                                default=None)

    if logdir is None:
        sys.sterr.write("Log directory not configured in Avocado settings.\n")
        sys.exit(-1)

    try:
        resultsdir = jobdata.get_resultsdir(logdir, sys.argv[1])
    except ValueError as exception:
        sys.stderr.write('%s\n' % exception.message)
        sys.exit(-1)
    else:
        if resultsdir is None:
            sys.stderr.write("Can't find job results directory in '%s'\n" %
                             logdir)
Example #37
0
    def run(self, args):
        if getattr(args, 'replay_jobid', None) is None:
            return

        log = logging.getLogger("avocado.app")

        err = None
        if args.replay_teststatus and args.multiplex_files:
            err = ("Option --replay-test-status is incompatible with "
                   "--multiplex.")
        elif args.replay_teststatus and args.url:
            err = ("Option --replay-test-status is incompatible with "
                   "test URLs given on the command line.")
        elif args.remote_hostname:
            err = "Currently we don't replay jobs in remote hosts."
        if err is not None:
            log.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if args.replay_datadir is not None:
            resultsdir = args.replay_datadir
        else:
            logs_dir = settings.get_value('datadir.paths', 'logs_dir',
                                          default=None)
            logdir = os.path.expanduser(logs_dir)
            resultsdir = replay.get_resultsdir(logdir, args.replay_jobid)

        if resultsdir is None:
            log.error("Can't find job results directory in '%s'", logdir)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        sourcejob = replay.get_id(os.path.join(resultsdir, 'id'),
                                  args.replay_jobid)
        if sourcejob is None:
            msg = ("Can't find matching job id '%s' in '%s' directory."
                   % (args.replay_jobid, resultsdir))
            log.error(msg)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)
        setattr(args, 'replay_sourcejob', sourcejob)

        replay_args = replay.retrieve_args(resultsdir)
        whitelist = ['loaders',
                     'external_runner',
                     'external_runner_testdir',
                     'external_runner_chdir']
        if replay_args is None:
            log.warn('Source job args data not found. These options will not '
                     'be loaded in this replay job: %s', ', '.join(whitelist))
        else:
            for option in whitelist:
                optvalue = getattr(args, option, None)
                if optvalue:
                    log.warn("Overriding the replay %s with the --%s value "
                             "given on the command line.",
                             option.replace('_', '-'),
                             option.replace('_', '-'))
                else:
                    setattr(args, option, replay_args[option])

        # Keeping this for compatibility.
        # TODO: Use replay_args['url'] at some point in the future.
        if getattr(args, 'url', None):
            log.warn('Overriding the replay urls with urls provided in '
                     'command line.')
        else:
            urls = replay.retrieve_urls(resultsdir)
            if urls is None:
                log.error('Source job urls data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                setattr(args, 'url', urls)

        if args.replay_ignore and 'config' in args.replay_ignore:
            log.warn("Ignoring configuration from source job with "
                     "--replay-ignore.")
        else:
            self.load_config(resultsdir)

        if args.replay_ignore and 'mux' in args.replay_ignore:
            log.warn("Ignoring multiplex from source job with "
                     "--replay-ignore.")
        else:
            if getattr(args, 'multiplex_files', None) is not None:
                log.warn('Overriding the replay multiplex with '
                         '--multiplex-file.')
                # Use absolute paths to avoid problems with os.chdir
                args.multiplex_files = [os.path.abspath(_)
                                        for _ in args.multiplex_files]
            else:
                mux = replay.retrieve_mux(resultsdir)
                if mux is None:
                    log.error('Source job multiplex data not found. Aborting.')
                    sys.exit(exit_codes.AVOCADO_JOB_FAIL)
                else:
                    setattr(args, "multiplex_files", mux)

        if args.replay_teststatus:
            replay_map = replay.retrieve_replay_map(resultsdir,
                                                    args.replay_teststatus)
            setattr(args, 'replay_map', replay_map)

        # Use the original directory to discover test urls properly
        pwd = replay.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                log.warn("Directory used in the replay source job '%s' does "
                         "not exist, using '.' instead", pwd)
Example #38
0
class Multiplex(CLICmd):
    """
    Implements the avocado 'multiplex' subcommand
    """

    name = 'multiplex'
    description = 'Generate a list of dictionaries with params from a multiplex file'

    def __init__(self, *args, **kwargs):
        super(Multiplex, self).__init__(*args, **kwargs)
        self._from_args_tree = tree.TreeNode()

    def configure(self, parser):
        if multiplexer.MULTIPLEX_CAPABLE is False:
            return

        parser = super(Multiplex, self).configure(parser)
        parser.add_argument('multiplex_files',
                            nargs='+',
                            help='Path(s) to a multiplex file(s)')

        parser.add_argument('--filter-only',
                            nargs='*',
                            default=[],
                            help='Filter only path(s) from multiplexing')

        parser.add_argument('--filter-out',
                            nargs='*',
                            default=[],
                            help='Filter out path(s) from multiplexing')
        parser.add_argument('-s',
                            '--system-wide',
                            action='store_true',
                            help="Combine the files with the default "
                            "tree.")
        parser.add_argument('-c',
                            '--contents',
                            action='store_true',
                            default=False,
                            help="Shows the node content "
                            "(variables)")
        parser.add_argument('--mux-inject',
                            default=[],
                            nargs='*',
                            help="Inject [path:]key:node values into "
                            "the final multiplex tree.")
        env_parser = parser.add_argument_group("environment view options")
        env_parser.add_argument('-d',
                                '--debug',
                                action='store_true',
                                default=False,
                                help="Debug multiplexed "
                                "files.")
        tree_parser = parser.add_argument_group("tree view options")
        tree_parser.add_argument('-t',
                                 '--tree',
                                 action='store_true',
                                 default=False,
                                 help='Shows the multiplex '
                                 'tree structure')
        tree_parser.add_argument('-i',
                                 '--inherit',
                                 action="store_true",
                                 help="Show the inherited values")

    def _activate(self, args):
        # Extend default multiplex tree of --env values
        for value in getattr(args, "mux_inject", []):
            value = value.split(':', 2)
            if len(value) < 2:
                raise ValueError("key:value pairs required, found only %s" %
                                 (value))
            elif len(value) == 2:
                self._from_args_tree.value[value[0]] = value[1]
            else:
                node = self._from_args_tree.get_node(value[0], True)
                node.value[value[1]] = value[2]

    def run(self, args):
        self._activate(args)
        view = output.View(app_args=args)
        err = None
        if args.tree and args.debug:
            err = "Option --tree is incompatible with --debug."
        elif not args.tree and args.inherit:
            err = "Option --inherit can be only used with --tree"
        if err:
            view.notify(event="minor", msg=self.parser.format_help())
            view.notify(event="error", msg=err)
            sys.exit(exit_codes.AVOCADO_FAIL)
        try:
            mux_tree = multiplexer.yaml2tree(args.multiplex_files,
                                             args.filter_only, args.filter_out,
                                             args.debug)
        except IOError, details:
            view.notify(event='error', msg=details.strerror)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)
        if args.system_wide:
            mux_tree.merge(args.default_multiplex_tree)
        mux_tree.merge(self._from_args_tree)
        if args.tree:
            if args.contents:
                verbose = 1
            else:
                verbose = 0
            if args.inherit:
                verbose += 2
            use_utf8 = settings.get_value("runner.output",
                                          "utf8",
                                          key_type=bool,
                                          default=None)
            view.notify(event='minor',
                        msg=tree.tree_view(mux_tree, verbose, use_utf8))
            sys.exit(exit_codes.AVOCADO_ALL_OK)

        variants = multiplexer.MuxTree(mux_tree)
        view.notify(event='message', msg='Variants generated:')
        for (index, tpl) in enumerate(variants):
            if not args.debug:
                paths = ', '.join([x.path for x in tpl])
            else:
                color = output.term_support.LOWLIGHT
                cend = output.term_support.ENDC
                paths = ', '.join([
                    "%s%s@%s%s" %
                    (_.name, color, getattr(_, 'yaml', "Unknown"), cend)
                    for _ in tpl
                ])
            view.notify(event='minor',
                        msg='%sVariant %s:    %s' %
                        (('\n' if args.contents else ''), index + 1, paths))
            if args.contents:
                env = set()
                for node in tpl:
                    for key, value in node.environment.iteritems():
                        origin = node.environment_origin[key].path
                        env.add(("%s:%s" % (origin, key), str(value)))
                if not env:
                    continue
                fmt = '    %%-%ds => %%s' % max([len(_[0]) for _ in env])
                for record in sorted(env):
                    view.notify(event='minor', msg=fmt % record)

        sys.exit(exit_codes.AVOCADO_ALL_OK)
#
# $ python avocado-get-job-results-dir.py <job_id>
#

import sys

from avocado.core import jobdata
from avocado.core.settings import settings

if __name__ == '__main__':
    if len(sys.argv) < 2:
        sys.stderr.write("Please inform the Job ID.\n")
        sys.exit(-1)

    logdir = settings.get_value(section='datadir.paths',
                                key='logs_dir', key_type='path',
                                default=None)

    if logdir is None:
        sys.sterr.write("Log directory not configured in Avocado settings.\n")
        sys.exit(-1)

    try:
        resultsdir = jobdata.get_resultsdir(logdir, sys.argv[1])
    except ValueError as exception:
        sys.stderr.write('%s\n' % exception.message)
        sys.exit(-1)
    else:
        if resultsdir is None:
            sys.stderr.write("Can't find job results directory in '%s'\n" %
                             logdir)
Example #40
0
    def run(self, config):
        if config.get('replay_jobid', None) is None:
            return

        err = None
        if config.get('replay_teststatus') and 'variants' in config.get(
                'replay_ignore'):
            err = ("Option `--replay-test-status` is incompatible with "
                   "`--replay-ignore variants`.")
        elif config.get('replay_teststatus') and config.get('references'):
            err = ("Option --replay-test-status is incompatible with "
                   "test references given on the command line.")
        elif config.get("remote_hostname", False):
            err = "Currently we don't replay jobs in remote hosts."
        if err is not None:
            LOG_UI.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)

        base_logdir = config.get('base_logdir', None)
        if base_logdir is None:
            base_logdir = settings.get_value(section='datadir.paths',
                                             key='logs_dir',
                                             key_type='path',
                                             default=None)
        try:
            resultsdir = jobdata.get_resultsdir(base_logdir,
                                                config.get('replay_jobid'))
        except ValueError as exception:
            LOG_UI.error(exception)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if resultsdir is None:
            LOG_UI.error("Can't find job results directory in '%s'",
                         base_logdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'),
                                   config.get('replay_jobid'))
        if sourcejob is None:
            msg = ("Can't find matching job id '%s' in '%s' directory." %
                   (config.get('replay_jobid'), resultsdir))
            LOG_UI.error(msg)
            sys.exit(exit_codes.AVOCADO_FAIL)
        config['replay_sourcejob'] = sourcejob

        replay_config = jobdata.retrieve_job_config(resultsdir)
        whitelist = [
            'loaders', 'external_runner', 'external_runner_testdir',
            'external_runner_chdir', 'failfast', 'ignore_missing_references',
            'execution_order'
        ]
        if replay_config is None:
            LOG_UI.warn(
                'Source job config data not found. These options will '
                'not be loaded in this replay job: %s', ', '.join(whitelist))
        else:
            for option in whitelist:
                optvalue = config.get(option, None)
                if optvalue is not None:
                    LOG_UI.warn(
                        "Overriding the replay %s with the --%s value "
                        "given on the command line.", option.replace('_', '-'),
                        option.replace('_', '-'))
                elif option in replay_config:
                    config[option] = replay_config[option]

        if config.get('references', None):
            LOG_UI.warn('Overriding the replay test references with test '
                        'references given in the command line.')
        else:
            references = jobdata.retrieve_references(resultsdir)
            if references is None:
                LOG_UI.error('Source job test references data not found. '
                             'Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                config['references'] = references

        if 'config' in config.get('replay_ignore'):
            LOG_UI.warn("Ignoring configuration from source job with "
                        "--replay-ignore.")
        else:
            self.load_config(resultsdir)

        if 'variants' in config.get('replay_ignore'):
            LOG_UI.warn("Ignoring variants from source job with "
                        "--replay-ignore.")
        else:
            variants = jobdata.retrieve_variants(resultsdir)
            if variants is None:
                LOG_UI.error('Source job variants data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                LOG_UI.warning("Using src job Mux data only, use "
                               "`--replay-ignore variants` to override "
                               "them.")
                config["avocado_variants"] = variants

        # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume
        # supplied.
        if config.get('replay_resume'):
            if not config.get('replay_teststatus'):
                config['replay_teststatus'] = ["INTERRUPTED"]
            elif "INTERRUPTED" not in config.get('replay_teststatus'):
                config['replay_teststatus'].append("INTERRUPTED")
        if config.get('replay_teststatus'):
            replay_map = self._create_replay_map(
                resultsdir, config.get('replay_teststatus'))
            config['replay_map'] = replay_map

        # Use the original directory to resolve test references properly
        pwd = jobdata.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                LOG_UI.warn(
                    "Directory used in the replay source job '%s' does"
                    " not exist, using '.' instead", pwd)
Example #41
0
    import imp
    setup_modules = imp.load_source('autotest_setup_modules',
                                    setup_modules_path)
    setup_modules.setup(base_path=client_dir,
                        root_module_name="autotest.client")

# The code below is used by this plugin to find the virt test directory,
# so that it can load the virttest python lib, used by the plugin code.
# If the user doesn't provide the proper configuration, the plugin will
# fail to load.
VIRT_TEST_PATH = None

if 'VIRT_TEST_PATH' in os.environ:
    VIRT_TEST_PATH = os.environ['VIRT_TEST_PATH']
else:
    VIRT_TEST_PATH = settings.get_value(section='virt_test',
                                        key='virt_test_path', default=None)

if VIRT_TEST_PATH is not None:
    sys.path.append(os.path.expanduser(VIRT_TEST_PATH))

from virttest.standalone_test import SUPPORTED_TEST_TYPES
from virttest.defaults import DEFAULT_GUEST_OS
from virttest import data_dir


_PROVIDERS_DOWNLOAD_DIR = os.path.join(data_dir.get_test_providers_dir(),
                                       'downloads')

if len(os.listdir(_PROVIDERS_DOWNLOAD_DIR)) == 0:
    raise EnvironmentError("Bootstrap missing. "
                           "Execute 'avocado vt-bootstrap' or disable this "
Example #42
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        parser = super(Run, self).configure(parser)

        parser.add_argument('url',
                            type=str,
                            default=[],
                            nargs='*',
                            help='List of test IDs (aliases or paths)')

        parser.add_argument("-d",
                            "--dry-run",
                            action="store_true",
                            help="Instead of running the test only "
                            "list them and log their params.")

        parser.add_argument('-z',
                            '--archive',
                            action='store_true',
                            default=False,
                            help='Archive (ZIP) files generated by tests')

        parser.add_argument(
            '--force-job-id',
            dest='unique_job_id',
            type=str,
            default=None,
            help=('Forces the use of a particular job ID. Used '
                  'internally when interacting with an avocado '
                  'server. You should not use this option '
                  'unless you know exactly what you\'re doing'))

        parser.add_argument('--job-results-dir',
                            action='store',
                            dest='logdir',
                            default=None,
                            metavar='DIRECTORY',
                            help=('Forces to use of an alternate job '
                                  'results directory.'))

        parser.add_argument(
            '--job-timeout',
            action='store',
            default=None,
            metavar='SECONDS',
            help=('Set the maximum amount of time (in SECONDS) that '
                  'tests are allowed to execute. '
                  'Note that zero means "no timeout". '
                  'You can also use suffixes, like: '
                  ' s (seconds), m (minutes), h (hours). '))

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        parser.add_argument('--sysinfo',
                            choices=('on', 'off'),
                            default=sysinfo_default,
                            help=('Enable or disable system information '
                                  '(hardware details, profilers, etc.). '
                                  'Current:  %(default)s'))

        parser.output = parser.add_argument_group('output and result format')

        parser.output.add_argument('-s',
                                   '--silent',
                                   action="store_true",
                                   default=argparse.SUPPRESS,
                                   help='Silence stdout')

        parser.output.add_argument(
            '--show-job-log',
            action='store_true',
            default=False,
            help=('Display only the job log on stdout. Useful '
                  'for test debugging purposes. No output will '
                  'be displayed if you also specify --silent'))

        parser.output.add_argument("--store-logging-stream",
                                   nargs="*",
                                   default=[],
                                   metavar="STREAM[:LEVEL]",
                                   help="Store given logging STREAMs in "
                                   "$JOB_RESULTS_DIR/$STREAM.$LEVEL.")

        out_check = parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'all', 'stdout', 'stderr'),
                               default='none',
                               help=('Record output streams of your tests '
                                     'to reference files (valid options: '
                                     'none (do not record output streams), '
                                     'all (record both stdout and stderr), '
                                     'stdout (record only stderr), '
                                     'stderr (record only stderr). '
                                     'Current: %(default)s'))

        out_check.add_argument(
            '--output-check',
            choices=('on', 'off'),
            default='on',
            help=('Enable or disable test output (stdout/stderr) check. '
                  'If this option is off, no output will '
                  'be checked, even if there are reference files '
                  'present for the test. '
                  'Current: on (output check enabled)'))

        loader.add_loader_options(parser)

        if multiplexer.MULTIPLEX_CAPABLE:
            mux = parser.add_argument_group(
                'multiplexer use on test execution')
            mux.add_argument(
                '-m',
                '--multiplex',
                nargs='*',
                dest='multiplex_files',
                default=None,
                metavar='FILE',
                help='Location of one or more Avocado multiplex (.yaml) '
                'FILE(s) (order dependent)')
            mux.add_argument('--multiplex-files',
                             nargs='*',
                             default=None,
                             metavar='FILE',
                             help='DEPRECATED: please use --multiplex instead')
            mux.add_argument('--filter-only',
                             nargs='*',
                             default=[],
                             help='Filter only path(s) from multiplexing')
            mux.add_argument('--filter-out',
                             nargs='*',
                             default=[],
                             help='Filter out path(s) from multiplexing')
            mux.add_argument('--mux-path',
                             nargs='*',
                             default=None,
                             help="List of paths used to determine path "
                             "priority when querying for parameters")
            mux.add_argument('--mux-inject',
                             default=[],
                             nargs='*',
                             help="Inject [path:]key:node values into the "
                             "final multiplex tree.")
Example #43
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        parser = super(Run, self).configure(parser)

        parser.add_argument("reference", type=str, default=[], nargs='*',
                            metavar="TEST_REFERENCE",
                            help='List of test references (aliases or paths)')

        parser.add_argument("-p", "--test-parameter", action="append",
                            dest='test_parameters', default=[],
                            metavar="NAME_VALUE", type=self._test_parameter,
                            help="Parameter name and value to pass to all "
                            "tests. This is only applicable when not using a "
                            "varianter plugin. This option format must be "
                            "given in the NAME=VALUE format, and may be given "
                            "any number of times, or per parameter.")

        parser.add_argument("-d", "--dry-run", action="store_true",
                            help="Instead of running the test only "
                            "list them and log their params.")

        parser.add_argument("--dry-run-no-cleanup", action="store_true",
                            help="Do not automatically clean up temporary "
                            "directories used by dry-run", default=False)

        parser.add_argument('--force-job-id', dest='unique_job_id',
                            type=str, default=None,
                            help='Forces the use of a particular job ID. Used '
                            'internally when interacting with an avocado '
                            'server. You should not use this option '
                            'unless you know exactly what you\'re doing')

        parser.add_argument('--job-results-dir', action='store',
                            dest='base_logdir', default=None, metavar='DIRECTORY',
                            help=('Forces to use of an alternate job '
                                  'results directory.'))

        parser.add_argument('--job-timeout', action='store',
                            default=None, metavar='SECONDS',
                            help='Set the maximum amount of time (in SECONDS) '
                            'that tests are allowed to execute. '
                            'Values <= zero means "no timeout". '
                            'You can also use suffixes, like: '
                            ' s (seconds), m (minutes), h (hours). ')

        parser.add_argument('--failfast', choices=('on', 'off'),
                            help='Enable or disable the job interruption on '
                            'first failed test.')

        parser.add_argument('--keep-tmp', choices=('on', 'off'),
                            default='off', help='Keep job temporary files '
                            '(useful for avocado debugging). Defaults to off.')

        parser.add_argument('--ignore-missing-references', choices=('on', 'off'),
                            help="Force the job execution, even if some of "
                            "the test references are not resolved to tests.")

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        parser.add_argument('--sysinfo', choices=('on', 'off'),
                            default=sysinfo_default, help="Enable or disable "
                            "system information (hardware details, profilers, "
                            "etc.). Current:  %(default)s")

        parser.add_argument("--execution-order",
                            choices=("tests-per-variant",
                                     "variants-per-test"),
                            help="Defines the order of iterating through test "
                            "suite and test variants")

        parser.output = parser.add_argument_group('output and result format')

        parser.output.add_argument('-s', '--silent', action="store_true",
                                   default=argparse.SUPPRESS,
                                   help='Silence stdout')

        parser.output.add_argument('--show-job-log', action='store_true',
                                   default=False, help="Display only the job "
                                   "log on stdout. Useful for test debugging "
                                   "purposes. No output will be displayed if "
                                   "you also specify --silent")

        parser.output.add_argument("--store-logging-stream", nargs="*",
                                   default=[], metavar="STREAM[:LEVEL]",
                                   help="Store given logging STREAMs in "
                                   "$JOB_RESULTS_DIR/$STREAM.$LEVEL.")

        parser.output.add_argument("--log-test-data-directories",
                                   action="store_true",
                                   help="Logs the possible data directories "
                                   "for each test. This is helpful when "
                                   "writing new tests and not being sure "
                                   "where to put data files. Look for \""
                                   "Test data directories\" in your test log")

        out_check = parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'stdout', 'stderr',
                                        'both', 'combined', 'all'),
                               help="Record the output produced by each test "
                                    "(from stdout and stderr) into both the "
                                    "current executing result and into  "
                                    "reference files.  Reference files are "
                                    "used on subsequent runs to determine if "
                                    "the test produced the expected output or "
                                    "not, and the current executing result is "
                                    "used to check against a previously "
                                    "recorded reference file.  Valid values: "
                                    "'none' (to explicitly disable all "
                                    "recording) 'stdout' (to record standard "
                                    "output *only*), 'stderr' (to record "
                                    "standard error *only*), 'both' (to record"
                                    " standard output and error in separate "
                                    "files), 'combined' (for standard output "
                                    "and error in a single file). 'all' is "
                                    "also a valid but deprecated option that "
                                    "is a synonym of 'both'.  This option "
                                    "does not have a default value, but the "
                                    "Avocado test runner will record the "
                                    "test under execution in the most suitable"
                                    " way unless it's explicitly disabled with"
                                    " value 'none'")

        out_check.add_argument('--output-check', choices=('on', 'off'),
                               default='on',
                               help="Enable or disable test output (stdout/"
                               "stderr) check. If this option is off, no "
                               "output will be checked, even if there are "
                               "reference files present for the test. "
                               "Current: on (output check enabled)")

        loader.add_loader_options(parser)

        filtering = parser.add_argument_group('filtering parameters')
        filtering.add_argument('-t', '--filter-by-tags', metavar='TAGS',
                               action='append',
                               help='Filter INSTRUMENTED tests based on '
                               '":avocado: tags=tag1,tag2" notation in '
                               'their class docstring')
        filtering.add_argument('--filter-by-tags-include-empty',
                               action='store_true', default=False,
                               help=('Include all tests without tags during '
                                     'filtering. This effectively means they '
                                     'will be kept in the test suite found '
                                     'previously to filtering.'))
        filtering.add_argument('--filter-by-tags-include-empty-key',
                               action='store_true', default=False,
                               help=('Include all tests that do not have a '
                                     'matching key in its key:val tags. This '
                                     'effectively means those tests will be '
                                     'kept in the test suite found previously '
                                     'to filtering.'))
Example #44
0
 def pre(self, job):
     path = settings.get_value(section=CONFIG_SECTION,
                               key="pre", key_type=str,
                               default="/etc/avocado/scripts/job/pre.d/")
     self._run_scripts('pre', path, job)
Example #45
0
 def __init__(self):
     self.log = logging.getLogger("avocado.app")
     self.seconds = settings.get_value(section="plugins.job.sleep",
                                       key="seconds",
                                       key_type=int,
                                       default=3)
Example #46
0
                self.result[
                    "compute_node_status"] = self._check_compute_node_status()

            if self.failed_test > 0 and self.raise_health_check_excp:
                raise HealthCheckFail("health check failed")

        finally:
            self.logger.info("Health check result:")
            self.logger.info(self.result)
            self.logger.info("finish health check in host: %s" % self.host_ip)
            self.logger.info("=" * 50)


if __name__ == "__main__":
    parser = cartesian_config.Parser()
    cfg = os.path.join(settings.get_value('datadir.paths', 'base_dir'),
                       'config/tests.cfg')
    parser.parse_file(cfg)
    dicts = parser.get_dicts()
    params = {
        "health_check_cpu": "false",
        "health_check_memory": "false",
        "health_check_disk": "false",
        "health_check_ceph": "true",
        "health_check_process": "false",
        "health_check_vm_count": "false",
        "health_check_service_log": "false",
        "health_check_service": "false",
        "health_check_cluster_status": "false",
        "health_check_compute_node_status": "false"
    }
Example #47
0
    def configure(self, parser):
        """
        Add the subparser for the run action.

        :param parser: Main test runner parser.
        """
        parser = super(Run, self).configure(parser)

        parser.add_argument("reference", type=str, default=[], nargs='*',
                            metavar="TEST_REFERENCE",
                            help='List of test references (aliases or paths)')

        parser.add_argument("-d", "--dry-run", action="store_true",
                            help="Instead of running the test only "
                            "list them and log their params.")

        parser.add_argument('--force-job-id', dest='unique_job_id',
                            type=str, default=None,
                            help='Forces the use of a particular job ID. Used '
                            'internally when interacting with an avocado '
                            'server. You should not use this option '
                            'unless you know exactly what you\'re doing')

        parser.add_argument('--job-results-dir', action='store',
                            dest='logdir', default=None, metavar='DIRECTORY',
                            help=('Forces to use of an alternate job '
                                  'results directory.'))

        parser.add_argument('--job-timeout', action='store',
                            default=None, metavar='SECONDS',
                            help='Set the maximum amount of time (in SECONDS) '
                            'that tests are allowed to execute. '
                            'Values <= zero means "no timeout". '
                            'You can also use suffixes, like: '
                            ' s (seconds), m (minutes), h (hours). ')

        parser.add_argument('--failfast', choices=('on', 'off'),
                            help='Enable or disable the job interruption on '
                            'first failed test.')

        parser.add_argument('--keep-tmp', choices=('on', 'off'),
                            default='off', help='Keep job temporary files '
                            '(useful for avocado debugging). Defaults to off.')

        parser.add_argument('--ignore-missing-references', choices=('on', 'off'),
                            help="Force the job execution, even if some of "
                            "the test references are not resolved to tests.")

        sysinfo_default = settings.get_value('sysinfo.collect',
                                             'enabled',
                                             key_type='bool',
                                             default=True)
        sysinfo_default = 'on' if sysinfo_default is True else 'off'
        parser.add_argument('--sysinfo', choices=('on', 'off'),
                            default=sysinfo_default, help="Enable or disable "
                            "system information (hardware details, profilers, "
                            "etc.). Current:  %(default)s")

        parser.add_argument("--execution-order",
                            choices=("tests-per-variant",
                                     "variants-per-test"),
                            help="Defines the order of iterating through test "
                            "suite and test variants")

        parser.output = parser.add_argument_group('output and result format')

        parser.output.add_argument('-s', '--silent', action="store_true",
                                   default=argparse.SUPPRESS,
                                   help='Silence stdout')

        parser.output.add_argument('--show-job-log', action='store_true',
                                   default=False, help="Display only the job "
                                   "log on stdout. Useful for test debugging "
                                   "purposes. No output will be displayed if "
                                   "you also specify --silent")

        parser.output.add_argument("--store-logging-stream", nargs="*",
                                   default=[], metavar="STREAM[:LEVEL]",
                                   help="Store given logging STREAMs in "
                                   "$JOB_RESULTS_DIR/$STREAM.$LEVEL.")

        out_check = parser.add_argument_group('output check arguments')

        out_check.add_argument('--output-check-record',
                               choices=('none', 'all', 'stdout', 'stderr'),
                               default='none',
                               help="Record output streams of your tests "
                               "to reference files (valid options: none (do "
                               "not record output streams), all (record both "
                               "stdout and stderr), stdout (record only "
                               "stderr), stderr (record only stderr). "
                               'Current: %(default)s')

        out_check.add_argument('--output-check', choices=('on', 'off'),
                               default='on',
                               help="Enable or disable test output (stdout/"
                               "stderr) check. If this option is off, no "
                               "output will be checked, even if there are "
                               "reference files present for the test. "
                               "Current: on (output check enabled)")

        loader.add_loader_options(parser)

        filtering = parser.add_argument_group('filtering parameters')
        filtering.add_argument('-t', '--filter-by-tags', metavar='TAGS',
                               action='append',
                               help='Filter INSTRUMENTED tests based on '
                               '":avocado: tags=tag1,tag2" notation in '
                               'their class docstring')
        filtering.add_argument('--filter-by-tags-include-empty',
                               action='store_true', default=False,
                               help=('Include all tests without tags during '
                                     'filtering. This effectively means they '
                                     'will be kept in the test suite found '
                                     'previously to filtering.'))
Example #48
0
    def run(self, args):
        if getattr(args, 'replay_jobid', None) is None:
            return

        err = None
        if args.replay_teststatus and 'variants' in args.replay_ignore:
            err = ("Option `--replay-test-status` is incompatible with "
                   "`--replay-ignore variants`.")
        elif args.replay_teststatus and args.reference:
            err = ("Option --replay-test-status is incompatible with "
                   "test references given on the command line.")
        elif getattr(args, "remote_hostname", False):
            err = "Currently we don't replay jobs in remote hosts."
        if err is not None:
            LOG_UI.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)

        base_logdir = getattr(args, 'base_logdir', None)
        if base_logdir is None:
            base_logdir = settings.get_value(section='datadir.paths',
                                             key='logs_dir', key_type='path',
                                             default=None)
        try:
            resultsdir = jobdata.get_resultsdir(base_logdir, args.replay_jobid)
        except ValueError as exception:
            LOG_UI.error(exception.message)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if resultsdir is None:
            LOG_UI.error("Can't find job results directory in '%s'", base_logdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'),
                                   args.replay_jobid)
        if sourcejob is None:
            msg = ("Can't find matching job id '%s' in '%s' directory."
                   % (args.replay_jobid, resultsdir))
            LOG_UI.error(msg)
            sys.exit(exit_codes.AVOCADO_FAIL)
        setattr(args, 'replay_sourcejob', sourcejob)

        replay_args = jobdata.retrieve_args(resultsdir)
        whitelist = ['loaders',
                     'external_runner',
                     'external_runner_testdir',
                     'external_runner_chdir',
                     'failfast',
                     'ignore_missing_references',
                     'execution_order']
        if replay_args is None:
            LOG_UI.warn('Source job args data not found. These options will '
                        'not be loaded in this replay job: %s',
                        ', '.join(whitelist))
        else:
            for option in whitelist:
                optvalue = getattr(args, option, None)
                if optvalue is not None:
                    LOG_UI.warn("Overriding the replay %s with the --%s value "
                                "given on the command line.",
                                option.replace('_', '-'),
                                option.replace('_', '-'))
                elif option in replay_args:
                    setattr(args, option, replay_args[option])

        if getattr(args, 'reference', None):
            LOG_UI.warn('Overriding the replay test references with test '
                        'references given in the command line.')
        else:
            references = jobdata.retrieve_references(resultsdir)
            if references is None:
                LOG_UI.error('Source job test references data not found. '
                             'Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                setattr(args, 'reference', references)

        if 'config' in args.replay_ignore:
            LOG_UI.warn("Ignoring configuration from source job with "
                        "--replay-ignore.")
        else:
            self.load_config(resultsdir)

        if 'variants' in args.replay_ignore:
            LOG_UI.warn("Ignoring variants from source job with "
                        "--replay-ignore.")
        else:
            variants = jobdata.retrieve_variants(resultsdir)
            if variants is None:
                LOG_UI.error('Source job variants data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                LOG_UI.warning("Using src job Mux data only, use "
                               "`--replay-ignore variants` to override "
                               "them.")
                setattr(args, "avocado_variants", variants)

        # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume
        # supplied.
        if args.replay_resume:
            if not args.replay_teststatus:
                args.replay_teststatus = ["INTERRUPTED"]
            elif "INTERRUPTED" not in args.replay_teststatus:
                args.replay_teststatus.append("INTERRUPTED")
        if args.replay_teststatus:
            replay_map = self._create_replay_map(resultsdir,
                                                 args.replay_teststatus)
            setattr(args, 'replay_map', replay_map)

        # Use the original directory to resolve test references properly
        pwd = jobdata.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                LOG_UI.warn("Directory used in the replay source job '%s' does"
                            " not exist, using '.' instead", pwd)
import re

# avocado imports
from avocado.core import exceptions
from avocado.core.settings import settings
from virttest import error_context

# custom imports
pass

###############################################################################
# DEFINITIONS
###############################################################################

testsuite_top_path = settings.get_value('i2n.common',
                                        'suite_path',
                                        default=None)
guest_path = testsuite_top_path
source_avocado_path = "/usr/lib/python3.6/site-packages/avocado/utils"
destination_avocado_path = "/tmp/utils/avocado"

###############################################################################
# HELPERS
###############################################################################


def deploy_avocado(vm, params, test):
    """
    Deploy the Autotest package to a vm.

    :param vm: vm to deploy to (must be compatible)
Example #50
0
 def post(self, job):
     path = settings.get_value(section=CONFIG_SECTION,
                               key="post", key_type='path',
                               default="/etc/avocado/scripts/job/post.d/")
     self._run_scripts('post', path, job)
Example #51
0
    def run(self, args):
        self._activate(args)
        log = logging.getLogger("avocado.app")
        err = None
        if args.tree and args.debug:
            err = "Option --tree is incompatible with --debug."
        elif not args.tree and args.inherit:
            err = "Option --inherit can be only used with --tree"
        if err:
            log.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)
        try:
            mux_tree = multiplexer.yaml2tree(args.multiplex_files,
                                             args.filter_only, args.filter_out,
                                             args.debug)
        except IOError as details:
            log.error(details.strerror)
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)
        if args.system_wide:
            mux_tree.merge(args.default_avocado_params)
        mux_tree.merge(self._from_args_tree)
        if args.tree:
            if args.contents:
                verbose = 1
            else:
                verbose = 0
            if args.inherit:
                verbose += 2
            use_utf8 = settings.get_value("runner.output",
                                          "utf8",
                                          key_type=bool,
                                          default=None)
            log.debug(tree.tree_view(mux_tree, verbose, use_utf8))
            sys.exit(exit_codes.AVOCADO_ALL_OK)

        variants = multiplexer.MuxTree(mux_tree)
        log.info('Variants generated:')
        for (index, tpl) in enumerate(variants):
            if not args.debug:
                paths = ', '.join([x.path for x in tpl])
            else:
                color = output.TERM_SUPPORT.LOWLIGHT
                cend = output.TERM_SUPPORT.ENDC
                paths = ', '.join([
                    "%s%s@%s%s" %
                    (_.name, color, getattr(_, 'yaml', "Unknown"), cend)
                    for _ in tpl
                ])
            log.debug('%sVariant %s:    %s', '\n' if args.contents else '',
                      index + 1, paths)
            if args.contents:
                env = set()
                for node in tpl:
                    for key, value in node.environment.iteritems():
                        origin = node.environment_origin[key].path
                        env.add(("%s:%s" % (origin, key), str(value)))
                if not env:
                    continue
                fmt = '    %%-%ds => %%s' % max([len(_[0]) for _ in env])
                for record in sorted(env):
                    log.debug(fmt, *record)

        sys.exit(exit_codes.AVOCADO_ALL_OK)
Example #52
0
 def __init__(self):
     self.seconds = settings.get_value(section="plugins.job.sleep",
                                       key="seconds",
                                       key_type=int,
                                       default=3)