def create_config_files(options):
    """
    Check if the appropriate configuration files are present.

    If the files are not present, create them.

    :param options: OptParser object with options.
    """
    shared_dir = os.path.dirname(data_dir.get_data_dir())
    test_dir = os.path.dirname(shared_dir)

    if options.vt_type and options.vt_config:
        test_dir = data_dir.get_backend_dir(options.vt_type)
    elif options.vt_type:
        test_dir = data_dir.get_backend_dir(options.vt_type)
    elif options.vt_config:
        parent_config_dir = os.path.dirname(options.vt_config)
        parent_config_dir = os.path.dirname(parent_config_dir)
        options.vt_type = parent_config_dir
        test_dir = os.path.join(test_dir, parent_config_dir)

    if not os.path.exists(os.path.join(test_dir, "cfg")):
        print_stdout("Setup error: %s does not exist" %
                     os.path.join(test_dir, "cfg"))
        print_stdout("Perhaps you have not specified -t?")
        sys.exit(1)
    # lvsb test doesn't use shared configs
    if options.vt_type != 'lvsb':
        bootstrap.create_config_files(test_dir, shared_dir, interactive=False)
        bootstrap.create_guest_os_cfg(options.vt_type)
    bootstrap.create_subtests_cfg(options.vt_type)
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Open the environment file
        env_filename = os.path.join(
            data_dir.get_backend_dir(params.get("vm_type")),
            params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    provider = params.get("provider", None)

                    if provider is None:
                        # Verify if we have the correspondent source file for it
                        for generic_subdir in asset.get_test_provider_subdirs('generic'):
                            subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                                bootstrap.test_filter)

                        for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                            subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                                bootstrap.test_filter)
                    else:
                        provider_info = asset.get_test_provider_info(provider)
                        for key in provider_info['backends']:
                            subtest_dirs += data_dir.SubdirList(
                                provider_info['backends'][key]['path'],
                                bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s"
                                             % error_message)

                except Exception, e:
                    if (t_type is not None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
示例#3
0
    logging.info("")
    step += 1
    logging.info("%d - Verifying directories", step)
    datadir = data_dir.get_data_dir()
    shared_dir = os.path.dirname(datadir)
    sub_dir_list = ["images", "isos", "steps_data", "gpg"]
    for sub_dir in sub_dir_list:
        sub_dir_path = os.path.join(datadir, sub_dir)
        if not os.path.isdir(sub_dir_path):
            logging.debug("Creating %s", sub_dir_path)
            os.makedirs(sub_dir_path)
        else:
            logging.debug("Dir %s exists, not creating",
                          sub_dir_path)

    test_dir = data_dir.get_backend_dir(options.vt_type)
    if options.vt_type == 'libvirt':
        step = create_config_files(test_dir, shared_dir, interactive, step,
                                   force_update=options.vt_update_config)
        create_subtests_cfg(options.vt_type)
        create_guest_os_cfg(options.vt_type)
        # Don't bother checking if changes can't be made
        if os.getuid() == 0:
            verify_selinux(datadir,
                           os.path.join(datadir, 'images'),
                           os.path.join(datadir, 'isos'),
                           data_dir.get_tmp_dir(),
                           interactive, options.vt_selinux_setup)

    # lvsb test doesn't use any shared configs
    elif options.vt_type == 'lvsb':
示例#4
0
def bootstrap_tests(options):
    """
    Bootstrap process (download the appropriate JeOS file to data dir).

    This function will check whether the JeOS is in the right location of the
    data dir, if not, it will download it non interactively.

    :param options: OptParse object with program command line options.
    """
    if options.type:
        test_dir = data_dir.get_backend_dir(options.type)
    elif options.config:
        parent_config_dir = os.path.dirname(os.path.dirname(options.config))
        parent_config_dir = os.path.dirname(parent_config_dir)
        options.type = parent_config_dir
        test_dir = os.path.abspath(parent_config_dir)

    if options.type == 'qemu':
        check_modules = arch.get_kvm_module_list()
    else:
        check_modules = None
    online_docs_url = "https://github.com/autotest/virt-test/wiki"

    if not options.config:
        restore_image = not options.keep_image
    else:
        restore_image = False

    kwargs = {'test_name': options.type,
              'test_dir': test_dir,
              'base_dir': data_dir.get_data_dir(),
              'default_userspace_paths': None,
              'check_modules': check_modules,
              'online_docs_url': online_docs_url,
              'download_image': not options.no_downloads,
              'selinux': options.selinux_setup,
              'restore_image': restore_image,
              'interactive': False,
              'update_providers': options.update_providers}

    # Tolerance we have without printing a message for the user to wait (3 s)
    tolerance = 3
    failed = False
    wait_message_printed = False

    bg = utils.InterruptedThread(bootstrap.bootstrap, kwargs=kwargs)
    t_begin = time.time()
    bg.start()

    while bg.isAlive():
        t_elapsed = time.time() - t_begin
        if t_elapsed > tolerance and not wait_message_printed:
            print_stdout("Running setup. Please wait...")
            wait_message_printed = True
            # if bootstrap takes too long, we temporarily make stdout verbose
            # again, so the user can see what's taking so long
            sys.stdout.restore()
        time.sleep(0.1)

    # in case stdout was restored above, redirect it again
    sys.stdout.redirect()

    reason = None
    try:
        bg.join()
    except Exception, e:
        failed = True
        reason = e
示例#5
0
def bootstrap_tests(options):
    """
    Bootstrap process (download the appropriate JeOS file to data dir).

    This function will check whether the JeOS is in the right location of the
    data dir, if not, it will download it non interactively.

    :param options: OptParse object with program command line options.
    """
    if options.type:
        test_dir = data_dir.get_backend_dir(options.type)
    elif options.config:
        parent_config_dir = os.path.dirname(os.path.dirname(options.config))
        parent_config_dir = os.path.dirname(parent_config_dir)
        options.type = parent_config_dir
        test_dir = os.path.abspath(parent_config_dir)

    if options.type == 'qemu':
        check_modules = arch.get_kvm_module_list()
    else:
        check_modules = None
    online_docs_url = "https://github.com/autotest/virt-test/wiki"

    if not options.config:
        restore_image = not options.keep_image
    else:
        restore_image = False

    os_info = defaults.get_default_guest_os_info()

    kwargs = {
        'test_name': options.type,
        'test_dir': test_dir,
        'base_dir': data_dir.get_data_dir(),
        'default_userspace_paths': None,
        'check_modules': check_modules,
        'online_docs_url': online_docs_url,
        'download_image': not options.no_downloads,
        'selinux': options.selinux_setup,
        'restore_image': restore_image,
        'interactive': False,
        'update_providers': options.update_providers,
        'guest_os': options.guest_os or os_info['variant']
    }

    # Tolerance we have without printing a message for the user to wait (3 s)
    tolerance = 3
    failed = False
    wait_message_printed = False

    bg = utils.InterruptedThread(bootstrap.bootstrap, kwargs=kwargs)
    t_begin = time.time()
    bg.start()

    while bg.isAlive():
        t_elapsed = time.time() - t_begin
        if t_elapsed > tolerance and not wait_message_printed:
            print_stdout("Running setup. Please wait...")
            wait_message_printed = True
            # if bootstrap takes too long, we temporarily make stdout verbose
            # again, so the user can see what's taking so long
            sys.stdout.restore()
        time.sleep(0.1)

    # in case stdout was restored above, redirect it again
    sys.stdout.redirect()

    reason = None
    try:
        bg.join()
    except Exception, e:
        failed = True
        reason = e
示例#6
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Open the environment file
        env_filename = os.path.join(
            data_dir.get_backend_dir(params.get("vm_type")),
            params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(
                            subtestdir, bootstrap.test_filter)

                    provider = params.get("provider", None)

                    if provider is None:
                        # Verify if we have the correspondent source file for it
                        for generic_subdir in asset.get_test_provider_subdirs(
                                'generic'):
                            subtest_dirs += data_dir.SubdirList(
                                generic_subdir, bootstrap.test_filter)

                        for specific_subdir in asset.get_test_provider_subdirs(
                                params.get("vm_type")):
                            subtest_dirs += data_dir.SubdirList(
                                specific_subdir, bootstrap.test_filter)
                    else:
                        provider_info = asset.get_test_provider_info(provider)
                        for key in provider_info['backends']:
                            subtest_dirs += data_dir.SubdirList(
                                provider_info['backends'][key]['path'],
                                bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug(
                        "Searching for test modules that match "
                        "'type = %s' and 'provider = %s' "
                        "on this cartesian dict", params.get("type"),
                        params.get("provider", None))

                    t_types = params.get("type").split()
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception, e:
                    if (t_type is not None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)