Пример #1
0
def cleanup_env(parser, options):
    """
    Clean up virt-test temporary files.

    :param parser: Cartesian parser with run parameters.
    :param options: Test runner options object.
    """
    if options.vt_no_cleanup:
        logging.info("Option --no-cleanup requested, not cleaning temporary "
                     "files and VM processes...")
        logging.info("")
    else:
        logging.info("Cleaning tmp files and VM processes...")
        d = parser.get_dicts().next()
        env_filename = os.path.join(data_dir.get_root_dir(),
                                    options.vt_type, d.get("env", "env"))
        env = utils_env.Env(filename=env_filename, version=Test.env_version)
        env.destroy()
        # Kill all tail_threads which env constructor recreate.
        aexpect.kill_tail_threads()
        aexpect.clean_tmp_files()
        utils_net.clean_tmp_files()
        data_dir.clean_tmp_files()
        qemu_vm.clean_tmp_files()
        logging.info("")
Пример #2
0
def postprocess(test, params, env):
    """
    Postprocess all VMs and images according to the instructions in params.

    :param test: An Autotest test object.
    :param params: Dict containing all VM and image parameters.
    :param env: The environment (a dict-like object).
    """
    error_context.context("postprocessing")
    err = ""

    # Terminate the tcpdump thread
    env.stop_tcpdump()

    # Kill all aexpect tail threads
    aexpect.kill_tail_threads()

    # Execute any post_commands
    if params.get("post_command"):
        try:
            process_command(test, params, env, params.get("post_command"),
                            int(params.get("post_command_timeout", "600")),
                            params.get("post_command_noncritical") == "yes")
        except Exception, details:
            err += "\nPostprocess command: %s" % str(details).replace(
                '\n', '\n  ')
            logging.error(details)
Пример #3
0
                  glob.glob(os.path.join(test.debugdir, '*.webm'))):
            os.unlink(f)

    # Kill all unresponsive VMs
    if params.get("kill_unresponsive_vms") == "yes":
        for vm in env.get_all_vms():
            if vm.is_alive():
                try:
                    session = vm.login()
                    session.close()
                except (remote.LoginError, virt_vm.VMError), e:
                    logging.warn(e)
                    vm.destroy(gracefully=False)

    # Kill all aexpect tail threads
    aexpect.kill_tail_threads()

    # Terminate tcpdump if no VMs are alive
    living_vms = [vm for vm in env.get_all_vms() if vm.is_alive()]
    if not living_vms and "tcpdump" in env:
        env["tcpdump"].close()
        del env["tcpdump"]

    if params.get("setup_hugepages") == "yes":
        h = test_setup.HugePageConfig(params)
        h.cleanup()
        if params.get("vm_type") == "libvirt":
            libvirt_vm.libvirtd_restart()

    if params.get("setup_thp") == "yes":
        thp = test_setup.TransparentHugePageConfig(test, params)
Пример #4
0
    # Kill VMs with deleted disks
    for vm in env.get_all_vms():
        destroy = False
        vm_params = params.object_params(vm.name)
        for image in vm_params.objects('images'):
            if params.object_params(image).get('remove_image') == 'yes':
                destroy = True
        if destroy and not vm.is_dead():
            logging.debug('Image of VM %s was removed, destroing it.', vm.name)
            vm.destroy()

    # Terminate the tcpdump thread
    env.stop_tcpdump()

    # Kill all aexpect tail threads
    aexpect.kill_tail_threads()

    living_vms = [vm for vm in env.get_all_vms() if vm.is_alive()]
    # Close all monitor socket connections of living vm.
    for vm in living_vms:
        if hasattr(vm, "monitors"):
            for m in vm.monitors:
                try:
                    m.close()
                except Exception:
                    pass
        # Close the serial console session, as it'll help
        # keeping the number of filedescriptors used by virt-test honest.
        vm.cleanup_serial_console()

    if params.get("setup_hugepages") == "yes":
Пример #5
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(
                            subtestdir, bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    subtest_dirs += data_dir.SubdirList(
                        self.testdir, bootstrap.test_filter)
                    specific_testdir = os.path.join(self.bindir,
                                                    params.get("vm_type"),
                                                    "tests")
                    subtest_dirs += data_dir.SubdirList(
                        specific_testdir, bootstrap.test_filter)
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug(
                        "Searching for test modules that match "
                        "param 'type = %s' on this cartesian dict",
                        params.get("type"))
                    #ting comment t_types = testname
                    t_types = params.get("type").split()
                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception, e:
                    if (not t_type is None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        #ting add
                        """
                        #origin code
                        env_process.postprocess(self, params, env)
                        """
                        if params.get("vm_type") != "FT_kvm":
                            env_process.postprocess(self, params, env)
                        else:
                            aexpect.kill_tail_threads()
                            vms = env.get_all_vms()
                            for vm in vms:
                                vm.destroy()
                        #end add
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Пример #6
0
def main():
    """
    A tool to execute the same tasks on pre-defined scenarios/
    profiles and store the results together with metadata in
    a suitable structure for compare-perf to compare them.
    """
    args = _parse_args()
    logging_setup(args)

    log = logging.getLogger("controller")
    # create results (or re-use if asked for)
    if os.path.exists(args.output):
        CONTEXT.set_root(
            args.output, "Removing previously existing results: "
            f"{args.output}")
        shutil.rmtree(args.output)
    else:
        CONTEXT.set_root(args.output, f"Creating results: {args.output}")
    try:
        os.makedirs(args.output)
    except FileExistsError:
        pass
    create_metadata(args.output, args)

    hosts = None
    try:
        # Initialize all hosts
        hosts = Controller(args, log)
        _test_defs = list(tests.get(test, extra) for test, extra in args.tests)
        # provision, fetch assets, ...
        hosts.setup()
        try:
            CONTEXT.set(0, "__sysinfo_before__")
            hosts.fetch_logs(CONTEXT.get())
        except Exception as exc:  # pylint: disable=W0703
            utils.record_failure(CONTEXT.get(), exc)
        for profile, profile_args in args.profiles:
            CONTEXT.set_level(0)
            # Check whether this profile changes test set
            test_defs = profile_test_defs(profile_args, _test_defs)
            # Applies profile and set `hosts.workers` to contain list of IP
            # addrs to be used in tests. It might retry on failure
            for i in range(args.retry_tests):
                try:
                    workers = hosts.apply_profile(profile, profile_args)
                    break
                except exceptions.StepFailed:
                    try:
                        hosts.revert_profile()
                    except Exception:  # pylint: disable=W0703
                        pass
            else:
                log.error(
                    "ERROR applying profile %s, all tests will be "
                    "SKIPPED!", profile)
                continue

            # Run all tests under current profile
            profile_path = os.path.join(args.output, hosts.profile)
            for test, extra in test_defs:
                for i in range(args.retry_tests):
                    try:
                        hosts.run_test(test, workers, extra)
                        break
                    except (AssertionError, aexpect.ExpectError,
                            aexpect.ShellError, RuntimeError) as details:
                        msg = (f"test {test.test}@{hosts.profile} attempt {i} "
                               f"execution failure: {details}")
                        utils.record_failure(os.path.join(
                            profile_path, test.test, str(i)),
                                             details,
                                             details=msg)
                else:
                    log.error("ERROR running %s@%s, test will be SKIPPED!",
                              test.test, hosts.profile)
            # Fetch logs
            try:
                CONTEXT.set(1, "__sysinfo__")
                hosts.fetch_logs(CONTEXT.get())
            except Exception as exc:  # pylint: disable=W0703
                utils.record_failure(os.path.join(args.output, hosts.profile),
                                     exc)
            # Revert profile changes. In case manual reboot is required return
            # non-zero.
            CONTEXT.set_level(1, "Reverting profile")
            hosts.revert_profile()
        # Remove unnecessary files
        hosts.cleanup()
        aexpect.kill_tail_threads()
    except Exception as exc:
        CONTEXT.set_level(0, "Handling root exception")
        utils.record_failure(CONTEXT.get(), exc)
        if args.keep_tmp_files:
            log.error("Exception %s, asked not to cleanup by --keep-tmp-files",
                      exc)
        else:
            log.error("Exception %s, cleaning up resources", exc)
            if hosts:
                hosts.cleanup()
        if len(threading.enumerate()) > 1:
            threads = threading.enumerate()
            if any("pydevd.Reader" in str(_) for _ in threads):
                logging.warning(
                    "Background threads %s present but 'pydev' "
                    "thread detected, not killing anything", threads)
            else:
                log.warning("Background threads present, killing: %s",
                            threading.enumerate())
                aexpect.kill_tail_threads()
                os.kill(0, 15)
        raise
Пример #7
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    subtest_dirs += data_dir.SubdirList(self.testdir,
                                                        bootstrap.test_filter)
                    specific_testdir = os.path.join(self.bindir,
                                                    params.get("vm_type"),
                                                    "tests")
                    subtest_dirs += data_dir.SubdirList(specific_testdir,
                                                        bootstrap.test_filter)
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "param 'type = %s' on this cartesian dict",
                                  params.get("type"))
                    #ting comment t_types = testname
                    t_types = params.get("type").split()
                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()


                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s"
                                             % error_message)

                except Exception, e:
                    if (not t_type is None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                         #ting add
                        """
                        #origin code
                        env_process.postprocess(self, params, env)
                        """
                        if params.get("vm_type") != "FT_kvm":
                            env_process.postprocess(self, params, env)
                        else:
                            aexpect.kill_tail_threads()
                            vms = env.get_all_vms()
                            for vm in vms:
                                vm.destroy()
                        #end add
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)