Beispiel #1
0
def full_tests_params_and_str(param_dict, tests_str, use_tests_default):
    """
    Add default tests parameters and string for missing command line such.

    :param param_dict: runtime parameters used for extra customization
    :type param_dict: {str, str} or None
    :param str tests_str: command line variant restrictions
    :param bool use_tests_default: whether to use default primary restriction
    :returns: complete tests parameters and string
    :rtype: (:py:class:`Params`, str)
    :raises: :py:class:`ValueError` if the default primary restriction could is
             not valid (among the available ones)
    """
    tests_config = param.Reparsable()
    tests_config.parse_next_batch(base_file="groups-base.cfg",
                                  ovrwrt_file=param.tests_ovrwrt_file(),
                                  ovrwrt_dict=param_dict)
    tests_params = tests_config.get_params()
    if use_tests_default:
        default = tests_params.get("default_only", "all")
        available_restrictions = param.all_restrictions()
        if default not in available_restrictions:
            raise ValueError("Invalid primary restriction 'only=%s'! It has to be one "
                             "of %s" % (default, ", ".join(available_restrictions)))
        tests_str += "only %s\n" % default
    log.debug("Parsed tests string '%s'", tests_str)
    return tests_params, tests_str
Beispiel #2
0
def full_vm_params_and_strs(param_dict, vm_strs, use_vms_default):
    """
    Add default vm parameters and strings for missing command line such.

    :param param_dict: runtime parameters used for extra customization
    :type param_dict: {str, str} or None
    :param vm_strs: command line vm-specific names and variant restrictions
    :type vm_strs: {str, str}
    :param use_vms_default: whether to use default variant restriction for a
                            particular vm
    :type use_vms_default: {str, bool}
    :returns: complete vm parameters and strings
    :rtype: (:py:class:`Params`, {str, str})
    :raises: :py:class:`ValueError` if no command line or default variant
             restriction could be found for some vm
    """
    vms_config = param.Reparsable()
    vms_config.parse_next_batch(base_file="guest-base.cfg",
                                ovrwrt_file=param.vms_ovrwrt_file(),
                                ovrwrt_dict=param_dict)
    vms_params = vms_config.get_params()
    for vm_name in param.all_vms():
        if use_vms_default[vm_name]:
            default = vms_params.get("default_only_%s" % vm_name)
            if not default:
                raise ValueError("No default variant restriction found for %s!" % vm_name)
            vm_strs[vm_name] += "only %s\n" % default
    log.debug("Parsed vm strings '%s'", vm_strs)
    return vms_params, vm_strs
Beispiel #3
0
    def finish(self, proc, started, step, deadline, result_dispatcher):
        """
        Wait for the test process to finish and report status or error status
        if unable to obtain the status till deadline.

        :param proc: The test's process
        :param started: Time when the test started
        :param first: Delay before first check
        :param step: Step between checks for the status
        :param deadline: Test execution deadline
        :param result_dispatcher: Result dispatcher (for test_progress
               notifications)
        """
        # Wait for either process termination or test status
        wait.wait_for(lambda: not proc.is_alive() or self.status, 1, 0, step)
        config = settings.as_dict()
        if self.status:  # status exists, wait for process to finish
            timeout_process_alive = config.get('runner.timeout.process_alive')
            deadline = min(deadline, time.monotonic() + timeout_process_alive)
            while time.monotonic() < deadline:
                result_dispatcher.map_method('test_progress', False)
                if wait.wait_for(lambda: not proc.is_alive(), 1, 0, step):
                    return self._add_status_failures(self.status)
            err = "Test reported status but did not finish"
        else:  # proc finished, wait for late status delivery
            timeout_process_died = config.get('runner.timeout.process_died')
            deadline = min(deadline, time.monotonic() + timeout_process_died)
            while time.monotonic() < deadline:
                result_dispatcher.map_method('test_progress', False)
                if wait.wait_for(lambda: self.status, 1, 0, step):
                    # Status delivered after the test process finished, pass
                    return self._add_status_failures(self.status)
            err = "Test died without reporting the status."
        # At this point there were failures, fill the new test status
        TEST_LOG.debug("Original status: %s", str(self.status))
        test_state = self.early_status
        test_state['time_start'] = started
        test_state['time_elapsed'] = time.monotonic() - started
        test_state['fail_reason'] = err
        test_state['status'] = exceptions.TestAbortError.status
        test_state['fail_class'] = (
            exceptions.TestAbortError.__class__.__name__)
        test_state['traceback'] = 'Traceback not available'
        try:
            with open(test_state['logfile'], 'r') as log_file_obj:
                test_state['text_output'] = log_file_obj.read()
        except IOError:
            test_state["text_output"] = "Not available, file not created yet"
        TEST_LOG.error('ERROR %s -> TestAbortError: %s.', err,
                       test_state['name'])
        if proc.is_alive():
            TEST_LOG.warning("Killing hanged test process %s", proc.pid)
            os.kill(proc.pid, signal.SIGTERM)
            if not wait.wait_for(lambda: not proc.is_alive(), 1, 0, 0.01):
                os.kill(proc.pid, signal.SIGKILL)
                end_time = time.monotonic() + 60
                while time.monotonic() < end_time:
                    if not proc.is_alive():
                        break
                    time.sleep(0.1)
                else:
                    raise exceptions.TestError("Unable to destroy test's "
                                               "process (%s)" % proc.pid)
        return self._add_status_failures(test_state)
Beispiel #4
0
def params_from_cmd(config):
    """
    Take care of command line overwriting, parameter preparation,
    setup and cleanup chains, and paths/utilities for all host controls.

    :param config: command line arguments
    :type config: {str, str}
    :raises: :py:class:`ValueError` if a command line selected vm is not available
             from the configuration and thus supported or internal tests are
             restricted from the command line
    """
    sys.path.insert(1, os.path.join(param.suite_path, "utils"))

    # validate typed vm names and possible vm specific restrictions
    available_vms = param.all_vms()
    available_restrictions = param.all_restrictions()

    # defaults usage vs command line overriding
    use_tests_default = True
    with_nontrivial_restrictions = False
    use_vms_default = {vm_name: True for vm_name in available_vms}
    with_selected_vms = list(available_vms)

    # the run string includes only pure parameters
    param_dict = {}
    # the tests string includes the test restrictions while the vm strings include the ones for the vm variants
    tests_str, vm_strs = "", {vm: "" for vm in available_vms}

    # main tokenizing loop
    for cmd_param in config["params"]:
        re_param = re.match(r"(\w+)=(.*)", cmd_param)
        if re_param is None:
            log.error("Skipping malformed parameter on the command line '%s' - "
                      "must be of the form <key>=<val>", cmd_param)
            continue
        (key, value) = re_param.group(1, 2)
        if key == "only" or key == "no":
            # detect if this is the primary restriction to escape defaults
            if value in available_restrictions:
                use_tests_default = False
            # else this is an auxiliary restriction
            else:
                with_nontrivial_restrictions = True
            # main test restriction part
            tests_str += "%s %s\n" % (key, value)
        elif key.startswith("only_") or key.startswith("no_"):
            for vm_name in available_vms:
                if re.match("(only|no)_%s" % vm_name, key):
                    # escape defaults for this vm and use the command line
                    use_vms_default[vm_name] = False
                    # main vm restriction part
                    vm_strs[vm_name] += "%s %s\n" % (key.replace("_%s" % vm_name, ""), value)
        # NOTE: comma in a parameter sense implies the same as space in config file
        elif key == "vms":
            # NOTE: no restrictions of the required vms are allowed during tests since
            # these are specified by each test (allowed only for manual setup steps)
            with_selected_vms[:] = value.split(",")
            for vm_name in with_selected_vms:
                if vm_name not in available_vms:
                    raise ValueError("The vm '%s' is not among the supported vms: "
                                     "%s" % (vm_name, ", ".join(available_vms)))
        else:
            # NOTE: comma on the command line is space in a config file
            value = value.replace(",", " ")
            param_dict[key] = value
    config["param_dict"] = param_dict
    log.debug("Parsed param dict '%s'", param_dict)

    # get minimal configurations and parse defaults if no command line arguments
    config["vms_params"], config["vm_strs"] = full_vm_params_and_strs(param_dict, vm_strs,
                                                                      use_vms_default)
    config["vms_params"]["vms"] = " ".join(with_selected_vms)
    config["available_vms"] = vm_strs.copy()
    for vm_name in available_vms:
        # the keys of vm strings must be equivalent to the selected vms
        if vm_name not in with_selected_vms:
            del config["vm_strs"][vm_name]
    config["tests_params"], config["tests_str"] = full_tests_params_and_str(param_dict, tests_str,
                                                                            use_tests_default)
    config["available_restrictions"] = available_restrictions

    # control against invoking only runnable tests and empty Cartesian products
    control_config = param.Reparsable()
    control_config.parse_next_batch(base_file="sets.cfg",
                                    ovrwrt_file=param.tests_ovrwrt_file(),
                                    ovrwrt_str=config["tests_str"],
                                    ovrwrt_dict=config["param_dict"])
    control_parser = control_config.get_parser()
    if with_nontrivial_restrictions:
        log.info("%s tests with nontrivial restriction %s",
                 len(list(control_parser.get_dicts())), config["tests_str"])

    # prefix for all tests of the current run making it possible to perform multiple runs in one command
    config["prefix"] = ""

    # log into files for each major level the way it was done for autotest
    config["run.store_logging_stream"] = [":10", ":20", ":30", ":40"]

    # attach environment processing hooks
    env_process_hooks()
Beispiel #5
0
def params_from_cmd(args):
    """
    Take care of command line overwriting, parameter preparation,
    setup and cleanup chains, and paths/utilities for all host controls.

    :param args: command line arguments
    :type args: :py:class:`argparse.Namespace`
    """
    root_path = settings.get_value('i2n.common', 'suite_path', default=None)
    sys.path.insert(1, os.path.join(root_path, "utils"))

    # validate typed vm names and possible vm specific restrictions
    args.available_vms = available_vms = param.all_vms()
    args.selected_vms = selected_vms = list(available_vms)
    # If the command line restrictions don't contain any of our primary restrictions
    # (all|normal|gui|nongui|minimal|none), we add "only <default>" to the list where <default> is the
    # primary restriction definition found in the configs. If the configs are also
    # not defining any default, we ultimately add "only all". You can have any futher
    # restrictions like "only=curl" only in the command line.
    primary_tests_restrictions = [
        "all", "normal", "gui", "nongui", "minimal", "none"
    ]
    use_tests_default = True
    with_nontrivial_restrictions = False
    use_vms_default = {vm_name: True for vm_name in available_vms}
    # the tests string includes the test restrictions while the vm strings include the ones for the vm variants
    tests_str = ""
    vm_strs = {}
    # the run string includes only pure parameters
    param_str = ""
    for cmd_param in args.params:
        try:
            (key, value) = re.findall("(\w+)=(.*)", cmd_param)[0]
            if key == "only" or key == "no":
                # detect if this is the primary restriction to escape defaults
                if value in primary_tests_restrictions:
                    use_tests_default = False
                # detect if this is a second (i.e. additional) restriction
                if tests_str != "":
                    with_nontrivial_restrictions = True
                # main test restriction part
                tests_str += "%s %s\n" % (key, value)
            elif key.startswith("only_") or key.startswith("no_"):
                for vm_name in available_vms:
                    if re.match("(only|no)_%s" % vm_name, key):
                        # escape defaults for this vm and use the command line
                        use_vms_default[vm_name] = False
                        # detect new restricted vm
                        if vm_name not in vm_strs:
                            vm_strs[vm_name] = ""
                        # main vm restriction part
                        vm_strs[vm_name] += "%s %s\n" % (key.replace(
                            "_%s" % vm_name, ""), value)
            # NOTE: comma in a parameter sense implies the same as space in config file
            elif key == "vms":
                # NOTE: no restrictions of the required vms are allowed during tests since
                # these are specified by each test (allowed only for manual setup steps)
                selected_vms[:] = value.split(",")
                for vm_name in selected_vms:
                    if vm_name not in available_vms:
                        raise ValueError(
                            "The vm '%s' is not among the supported vms: "
                            "%s" % (vm_name, ", ".join(available_vms)))
            else:
                # NOTE: comma on the command line is space in a config file
                value = value.replace(",", " ")
                param_str += "%s = %s\n" % (key, value)
        except IndexError:
            pass
    args.param_str = param_str
    log.debug("Parsed param string '%s'", param_str)

    # get minimal configurations and parse defaults if no command line arguments
    tests_params = param.prepare_params(base_file="groups-base.cfg",
                                        ovrwrt_file=param.tests_ovrwrt_file,
                                        ovrwrt_str=param_str)
    tests_str += param_str
    if use_tests_default:
        default = tests_params.get("default_only", "all")
        if default not in primary_tests_restrictions:
            raise ValueError(
                "Invalid primary restriction 'only=%s'! It has to be one "
                "of %s" % (default, ", ".join(primary_tests_restrictions)))
        tests_str += "only %s\n" % default
    args.tests_str = tests_str
    log.debug("Parsed tests string '%s'", tests_str)

    vms_params = param.prepare_params(
        base_file="guest-base.cfg",
        ovrwrt_dict={"vms": " ".join(selected_vms)},
        ovrwrt_file=param.vms_ovrwrt_file,
        ovrwrt_str=param_str)
    for vm_name in available_vms:
        # some selected vms might not be restricted on the command line so check again
        if vm_name not in vm_strs:
            vm_strs[vm_name] = ""
        vm_strs[vm_name] += param_str
        if use_vms_default[vm_name]:
            default = vms_params.get("default_only_%s" % vm_name)
            if default is None:
                raise ValueError(
                    "No default variant restriction found for %s!" % vm_name)
            vm_strs[vm_name] += "only %s\n" % default
    args.vm_strs = vm_strs
    log.debug("Parsed vm strings '%s'", vm_strs)

    # control against invoking internal tests
    control_parser = param.prepare_parser(base_file="sets.cfg",
                                          ovrwrt_file=param.tests_ovrwrt_file,
                                          ovrwrt_str=tests_str)
    if with_nontrivial_restrictions:
        for d in control_parser.get_dicts():
            if ".internal." in d["name"] or ".original." in d["name"]:
                # the user should have gotten empty Cartesian product by now but check just in case
                raise ValueError(
                    "You cannot restrict to internal tests from the command line.\n"
                    "Please use the provided manual steps or automated setup policies "
                    "to run an internal test %s." % d["name"])

    # prefix for all tests of the current run making it possible to perform multiple runs in one command
    args.prefix = ""

    # log into files for each major level the way it was done for autotest
    args.store_logging_stream = [":10", ":20", ":30", ":40"]
Beispiel #6
0
def params_from_cmd(config):
    """
    Take care of command line overwriting, parameter preparation,
    setup and cleanup chains, and paths/utilities for all host controls.

    :param config: command line arguments
    :type config: {str, str}
    :raises: :py:class:`ValueError` if a command line selected vm is not available
             from the configuration and thus supported or internal tests are
             restricted from the command line

    .. todo:: Any dynamically created config keys here are usually entire data
        structures like dictionaries and lists and only used internally during
        the run which makes them unfit for displaying to the user and putting
        in a namespace scope like the officially registered plugin settings.
        Let's wait to see if the multi-suite support in avocado would establish
        some standards for doing this first. Until then, the user won't directly
        interact with these keys anyway.
    """
    suite_path = settings.as_dict().get('i2n.common.suite_path', ".")
    sys.path.insert(1, os.path.join(suite_path, "utils"))

    # validate typed vm names and possible vm specific restrictions
    available_vms = param.all_objects("vms")
    available_restrictions = param.all_restrictions()

    # defaults usage vs command line overriding
    use_tests_default = True
    with_nontrivial_restrictions = False
    use_vms_default = {vm_name: True for vm_name in available_vms}
    with_selected_vms = list(available_vms)

    # the run string includes only pure parameters
    param_dict = {}
    # the tests string includes the test restrictions while the vm strings include the ones for the vm variants
    tests_str, vm_strs = "", collections.OrderedDict([(vm, "")
                                                      for vm in available_vms])

    # main tokenizing loop
    for cmd_param in config["params"]:
        re_param = re.match(r"(\w+)=(.*)", cmd_param)
        if re_param is None:
            ui.error(
                "Found malformed parameter on the command line '%s' - "
                "must be of the form <key>=<val>", cmd_param)
            sys.exit(1)
        (key, value) = re_param.group(1, 2)
        if key == "only" or key == "no":
            # detect if this is the primary restriction to escape defaults
            for variant in re.split(r",|\.|\.\.", value):
                if variant in available_restrictions:
                    use_tests_default = False
                # else this is an auxiliary restriction
                else:
                    with_nontrivial_restrictions = True
            # main test restriction part
            tests_str += "%s %s\n" % (key, value)
        elif key.startswith("only_") or key.startswith("no_"):
            for vm_name in available_vms:
                if re.match("(only|no)_%s" % vm_name, key):
                    # escape defaults for this vm and use the command line
                    use_vms_default[vm_name] = False
                    # main vm restriction part
                    vm_strs[vm_name] += "%s %s\n" % (key.replace(
                        "_%s" % vm_name, ""), value)
        # NOTE: comma in a parameter sense implies the same as space in config file
        elif key == "vms":
            # NOTE: no restrictions of the required vms are allowed during tests since
            # these are specified by each test (allowed only for manual setup steps)
            with_selected_vms[:] = value.split(",")
            for vm_name in with_selected_vms:
                if vm_name not in available_vms:
                    raise ValueError(
                        "The vm '%s' is not among the supported vms: "
                        "%s" % (vm_name, ", ".join(available_vms)))
        else:
            # NOTE: comma on the command line is space in a config file
            value = value.replace(",", " ")
            param_dict[key] = value
    config["param_dict"] = param_dict
    log.debug("Parsed param dict '%s'", param_dict)

    # get minimal configurations and parse defaults if no command line arguments
    config["vms_params"], config["vm_strs"] = full_vm_params_and_strs(
        param_dict, vm_strs, use_vms_default)
    config["vms_params"]["vms"] = " ".join(with_selected_vms)
    config["available_vms"] = vm_strs.copy()
    for vm_name in available_vms:
        # the keys of vm strings must be equivalent to the selected vms
        if vm_name not in with_selected_vms:
            del config["vm_strs"][vm_name]
    config["tests_params"], config["tests_str"] = full_tests_params_and_str(
        param_dict, tests_str, use_tests_default)
    config["available_restrictions"] = available_restrictions

    # control against invoking only runnable tests and empty Cartesian products
    control_config = param.Reparsable()
    control_config.parse_next_batch(base_file="sets.cfg",
                                    ovrwrt_file=param.tests_ovrwrt_file(),
                                    ovrwrt_str=config["tests_str"],
                                    ovrwrt_dict=config["param_dict"])
    control_parser = control_config.get_parser()
    if with_nontrivial_restrictions:
        log.info("%s tests with nontrivial restriction %s",
                 len(list(control_parser.get_dicts())), config["tests_str"])

    # prefix for all tests of the current run making it possible to perform multiple runs in one command
    config["prefix"] = ""

    # log into files for each major level the way it was done for autotest
    config["run.store_logging_stream"] = [":10", ":20", ":30", ":40"]

    # set default off and on state backends
    from .states import lvm, qcow2, lxc, btrfs, ramfile, pool, vmnet
    ss.BACKENDS = {
        "lvm": lvm.LVMBackend,
        "qcow2": qcow2.QCOW2Backend,
        "lxc": lxc.LXCBackend,
        "btrfs": btrfs.BtrfsBackend,
        "pool": pool.QCOW2PoolBackend,
        "qcow2vt": qcow2.QCOW2VTBackend,
        "ramfile": ramfile.RamfileBackend,
        "vmnet": vmnet.VMNetBackend
    }

    # attach environment processing hooks
    env_process_hooks()