Ejemplo n.º 1
0
def _get_config_filter():
    config_filter = [
        '__init__',
    ]
    for provider_subdir in asset.get_test_provider_subdirs():
        config_filter.append(os.path.join('%s' % provider_subdir, 'cfg'))
    return config_filter
Ejemplo n.º 2
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Open the environment file
        env_filename = os.path.join(
            data_dir.get_backend_dir(params.get("vm_type")),
            params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    provider = params.get("provider", None)

                    if provider is None:
                        # Verify if we have the correspondent source file for it
                        for generic_subdir in asset.get_test_provider_subdirs('generic'):
                            subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                                bootstrap.test_filter)

                        for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                            subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                                bootstrap.test_filter)
                    else:
                        provider_info = asset.get_test_provider_info(provider)
                        for key in provider_info['backends']:
                            subtest_dirs += data_dir.SubdirList(
                                provider_info['backends'][key]['path'],
                                bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s"
                                             % error_message)

                except Exception, e:
                    if (t_type is not None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Ejemplo n.º 3
0
def _get_config_filter():
    config_filter = ['__init__', ]
    for provider_subdir in asset.get_test_provider_subdirs():
        config_filter.append(os.path.join('%s' % provider_subdir, 'cfg'))
    return config_filter
Ejemplo n.º 4
0
def create_subtests_cfg(t_type):
    root_dir = data_dir.get_root_dir()

    specific_test_list = []
    specific_file_list = []
    specific_subdirs = asset.get_test_provider_subdirs(t_type)
    provider_names_specific = asset.get_test_provider_names(t_type)

    provider_info_specific = []
    for specific_provider in provider_names_specific:
        provider_info_specific.append(asset.get_test_provider_info(specific_provider))

    for subdir in specific_subdirs:
        specific_test_list += data_dir.SubdirGlobList(subdir,
                                                      '*.py',
                                                      test_filter)
        specific_file_list += data_dir.SubdirGlobList(subdir,
                                                      '*.cfg',
                                                      config_filter)

    shared_test_list = []
    shared_file_list = []
    shared_subdirs = asset.get_test_provider_subdirs('generic')
    provider_names_shared = asset.get_test_provider_names('generic')

    provider_info_shared = []
    for shared_provider in provider_names_shared:
        provider_info_shared.append(asset.get_test_provider_info(shared_provider))

    if not t_type == 'lvsb':
        for subdir in shared_subdirs:
            shared_test_list += data_dir.SubdirGlobList(subdir,
                                                        '*.py',
                                                        test_filter)
            shared_file_list += data_dir.SubdirGlobList(subdir,
                                                        '*.cfg',
                                                        config_filter)

    all_specific_test_list = []
    for test in specific_test_list:
        for p in provider_info_specific:
            provider_base_path = p['backends'][t_type]['path']
            if provider_base_path in test:
                provider_name = p['name']
                break

        basename = os.path.basename(test)
        if basename != "__init__.py":
            all_specific_test_list.append("%s.%s" %
                                          (provider_name,
                                           basename.split(".")[0]))
    all_shared_test_list = []
    for test in shared_test_list:
        for p in provider_info_shared:
            provider_base_path = p['backends']['generic']['path']
            if provider_base_path in test:
                provider_name = p['name']
                break

        basename = os.path.basename(test)
        if basename != "__init__.py":
            all_shared_test_list.append("%s.%s" %
                                        (provider_name,
                                         basename.split(".")[0]))

    all_specific_test_list.sort()
    all_shared_test_list.sort()
    all_test_list = set(all_specific_test_list + all_shared_test_list)

    first_subtest_file = []
    last_subtest_file = []
    non_dropin_tests = []
    tmp = []

    for shared_file in shared_file_list:
        provider_name = None
        for p in provider_info_shared:
            provider_base_path = p['backends']['generic']['path']
            if provider_base_path in shared_file:
                provider_name = p['name']
                break

        shared_file_obj = open(shared_file, 'r')
        for line in shared_file_obj.readlines():
            line = line.strip()
            if line.startswith("type"):
                cartesian_parser = cartesian_config.Parser()
                cartesian_parser.parse_string(line)
                td = cartesian_parser.get_dicts().next()
                values = td['type'].split(" ")
                for value in values:
                    if t_type not in non_dropin_tests:
                        non_dropin_tests.append("%s.%s" %
                                                (provider_name, value))

        shared_file_name = os.path.basename(shared_file)
        shared_file_name = shared_file_name.split(".")[0]
        if shared_file_name in first_subtest[t_type]:
            if [provider_name, shared_file] not in first_subtest_file:
                first_subtest_file.append([provider_name, shared_file])
        elif shared_file_name in last_subtest[t_type]:
            if [provider_name, shared_file] not in last_subtest_file:
                last_subtest_file.append([provider_name, shared_file])
        else:
            if [provider_name, shared_file] not in tmp:
                tmp.append([provider_name, shared_file])
    shared_file_list = tmp

    tmp = []
    for shared_file in specific_file_list:
        provider_name = None
        for p in provider_info_specific:
            provider_base_path = p['backends'][t_type]['path']
            if provider_base_path in shared_file:
                provider_name = p['name']
                break

        shared_file_obj = open(shared_file, 'r')
        for line in shared_file_obj.readlines():
            line = line.strip()
            if line.startswith("type"):
                cartesian_parser = cartesian_config.Parser()
                cartesian_parser.parse_string(line)
                td = cartesian_parser.get_dicts().next()
                values = td['type'].split(" ")
                for value in values:
                    if value not in non_dropin_tests:
                        non_dropin_tests.append("%s.%s" %
                                                (provider_name, value))

        shared_file_name = os.path.basename(shared_file)
        shared_file_name = shared_file_name.split(".")[0]
        if shared_file_name in first_subtest[t_type]:
            if [provider_name, shared_file] not in first_subtest_file:
                first_subtest_file.append([provider_name, shared_file])
        elif shared_file_name in last_subtest[t_type]:
            if [provider_name, shared_file] not in last_subtest_file:
                last_subtest_file.append([provider_name, shared_file])
        else:
            if [provider_name, shared_file] not in tmp:
                tmp.append([provider_name, shared_file])
    specific_file_list = tmp

    non_dropin_tests.sort()
    non_dropin_tests = set(non_dropin_tests)
    dropin_tests = all_test_list - non_dropin_tests
    dropin_file_list = []
    tmp_dir = data_dir.get_tmp_dir()
    if not os.path.isdir(tmp_dir):
        os.makedirs(tmp_dir)

    for dropin_test in dropin_tests:
        provider = dropin_test.split(".")[0]
        d_type = dropin_test.split(".")[-1]
        autogen_cfg_path = os.path.join(tmp_dir,
                                        '%s.cfg' % dropin_test)
        autogen_cfg_file = open(autogen_cfg_path, 'w')
        autogen_cfg_file.write("# Drop-in test - auto generated snippet\n")
        autogen_cfg_file.write("- %s:\n" % dropin_test)
        autogen_cfg_file.write("    virt_test_type = %s\n" % t_type)
        autogen_cfg_file.write("    type = %s\n" % d_type)
        autogen_cfg_file.close()
        dropin_file_list.append([provider, autogen_cfg_path])

    dropin_file_list_2 = []
    dropin_tests = os.listdir(os.path.join(data_dir.get_root_dir(), "dropin"))
    dropin_cfg_path = os.path.join(tmp_dir, 'dropin.cfg')
    dropin_cfg_file = open(dropin_cfg_path, 'w')
    dropin_cfg_file.write("# Auto generated snippet for dropin tests\n")
    dropin_cfg_file.write("- dropin:\n")
    dropin_cfg_file.write("    variants:\n")
    for dropin_test in dropin_tests:
        if dropin_test == "README":
            continue
        dropin_cfg_file.write("        - %s:\n" % dropin_test)
        dropin_cfg_file.write("            virt_test_type = %s\n" % t_type)
        dropin_cfg_file.write("            type = dropin\n")
        dropin_cfg_file.write("            start_vm = no\n")
        dropin_cfg_file.write("            dropin_path = %s\n" % dropin_test)
    dropin_cfg_file.close()
    dropin_file_list_2.append(['io-github-autotest-qemu', dropin_cfg_path])

    subtests_cfg = os.path.join(root_dir, 'backends', t_type, 'cfg',
                                'subtests.cfg')
    subtests_file = open(subtests_cfg, 'w')
    subtests_file.write(
        "# Do not edit, auto generated file from subtests config\n")

    subtests_file.write("variants subtest:\n")
    write_subtests_files(first_subtest_file, subtests_file)
    write_subtests_files(specific_file_list, subtests_file, t_type)
    write_subtests_files(shared_file_list, subtests_file)
    write_subtests_files(dropin_file_list, subtests_file)
    write_subtests_files(dropin_file_list_2, subtests_file)
    write_subtests_files(last_subtest_file, subtests_file)

    subtests_file.close()