Example #1
0
def cleanup_env(parser, options):
    """
    Clean up virt-test temporary files.

    :param parser: Cartesian parser with run parameters.
    :param options: Test runner options object.
    """
    if options.vt_no_cleanup:
        logging.info("Option --no-cleanup requested, not cleaning temporary "
                     "files and VM processes...")
        logging.info("")
    else:
        logging.info("Cleaning tmp files and VM processes...")
        d = parser.get_dicts().next()
        env_filename = os.path.join(data_dir.get_root_dir(),
                                    options.vt_type, d.get("env", "env"))
        env = utils_env.Env(filename=env_filename, version=Test.env_version)
        env.destroy()
        # Kill all tail_threads which env constructor recreate.
        aexpect.kill_tail_threads()
        aexpect.clean_tmp_files()
        utils_net.clean_tmp_files()
        data_dir.clean_tmp_files()
        qemu_vm.clean_tmp_files()
        logging.info("")
Example #2
0
    def test_locking(self):
        """
        1) Create an env file.
        2) Create a thread that creates a dict as one of env's elements, and
           keeps updating it, using the env save_lock attribute.
        3) Try to save the environment.
        """
        termination_event = threading.Event()
        env = utils_env.Env(filename=self.envfilename)

        def update_env(env):
            @utils_env.lock_safe
            def _update_env(env, key, value):
                env["changing_dict"][key] = value

            if not "changing_dict" in env:
                env["changing_dict"] = {}
            while True:
                key = "%s" % utils_misc.generate_random_string(length=10)
                value = "%s" % utils_misc.generate_random_string(length=10)
                _update_env(env, key, value)
                if termination_event.isSet():
                    break

        changing_thread = threading.Thread(target=update_env, args=(env, ))
        changing_thread.start()
        time.sleep(0.3)
        try:
            env.save()
        finally:
            termination_event.set()
Example #3
0
 def test_register_syncserver(self):
     """
     1) Create an env file.
     2) Create a SyncListenServer object and register it in the env.
     3) Get that SyncListenServer with get_syncserver.
     4) Verify that both objects are the same.
     """
     env = utils_env.Env()
     sync1 = FakeSyncListenServer(port=333)
     env.register_syncserver(333, sync1)
     sync2 = env.get_syncserver(333)
     assert sync1 == sync2
Example #4
0
    def test_save(self):
        """
        1) Verify that calling env.save() with no filename where env doesn't
           specify a filename will throw an EnvSaveError.
        2) Register a VM in environment, save env to a file, recover env from
           that file, get the vm and verify that the instance attribute of the
           2 objects is the same.
        3) Register a SyncListenServer and don't save env. Restore env from
           file and try to get the syncserver, verify it doesn't work.
        4) Now save env to a file, restore env from file and verify that
           the syncserver can be found there, and that the sync server
           instance attribute is equal to the initial sync server instance.
        """
        fname = "/dev/shm/EnvUnittest"
        env = utils_env.Env()

        self.assertRaises(utils_env.EnvSaveError, env.save, {})

        params = utils_params.Params({"main_vm": 'rhel7-migration'})
        vm1 = FakeVm(params['main_vm'], params)
        vm1.is_alive()
        env.register_vm(params['main_vm'], vm1)
        env.save(filename=fname)
        env2 = utils_env.Env(filename=fname)
        vm2 = env2.get_vm(params['main_vm'])
        vm2.is_alive()
        assert vm1.instance == vm2.instance

        sync1 = FakeSyncListenServer(port=222)
        env.register_syncserver(222, sync1)
        env3 = utils_env.Env(filename=fname)
        syncnone = env3.get_syncserver(222)
        assert syncnone is None

        env.save(filename=fname)
        env4 = utils_env.Env(filename=fname)
        sync2 = env4.get_syncserver(222)
        assert sync2.instance == sync1.instance
        if os.path.isfile(fname):
            os.unlink(fname)
Example #5
0
    def worker(self, index, run_test_func):
        """
        The worker function.

        Waits for commands from the scheduler and processes them.

        @param index: The index of this worker (in the range 0..num_workers-1).
        @param run_test_func: A function to be called to run a test
                (e.g. job.run_test).
        """
        r = self.s2w_r[index]
        w = self.w2s_w[index]
        self_dict = self.worker_dicts[index]

        # Inform the scheduler this worker is ready
        w.write("ready\n")

        while True:
            cmd = r.readline().split()
            if not cmd:
                continue

            # The scheduler wants this worker to run a test
            if cmd[0] == "run":
                test_index = int(cmd[1])
                test = self.tests[test_index].copy()
                test.update(self_dict)
                test_iterations = int(test.get("iterations", 1))
                status = run_test_func("kvm",
                                       params=test,
                                       tag=test.get("shortname"),
                                       iterations=test_iterations)
                w.write("done %s %s\n" % (test_index, status))
                w.write("ready\n")

            # The scheduler wants this worker to free its used resources
            elif cmd[0] == "cleanup":
                env_filename = os.path.join(self.bindir, self_dict["env"])
                env = utils_env.Env(env_filename)
                for obj in env.values():
                    if isinstance(obj, virt_vm.BaseVM):
                        obj.destroy()
                    elif isinstance(obj, aexpect.Spawn):
                        obj.close()
                env.save()
                w.write("cleanup_done\n")
                w.write("ready\n")

            # There's no more work for this worker
            elif cmd[0] == "terminate":
                break
Example #6
0
 def test_register_vm(self):
     """
     1) Create an env object.
     2) Create a VM and register it from env.
     3) Get the vm back from the env.
     4) Verify that the 2 objects are the same.
     """
     env = utils_env.Env()
     params = utils_params.Params({"main_vm": 'rhel7-migration'})
     vm1 = FakeVm(params['main_vm'], params)
     vm1.is_alive()
     env.register_vm(params['main_vm'], vm1)
     vm2 = env.get_vm(params['main_vm'])
     vm2.is_alive()
     assert vm1 == vm2
Example #7
0
 def test_unregister_syncserver(self):
     """
     1) Create an env file.
     2) Create and register 2 SyncListenServers in the env.
     4) Get one of the SyncListenServers in the env.
     5) Unregister one of the SyncListenServers.
     6) Verify that the SyncListenServer unregistered can't be retrieved
        anymore with get_syncserver().
     """
     env = utils_env.Env()
     sync1 = FakeSyncListenServer(port=333)
     env.register_syncserver(333, sync1)
     sync2 = FakeSyncListenServer(port=444)
     env.register_syncserver(444, sync2)
     sync3 = env.get_syncserver(333)
     assert sync1 == sync3
     env.unregister_syncserver(444)
     sync4 = env.get_syncserver(444)
     assert sync4 is None
Example #8
0
 def test_unregister_vm(self):
     """
     1) Create an env object.
     2) Register 2 vms to the env.
     3) Verify both vms are in the env.
     4) Remove one of those vms.
     5) Verify that the removed vm is no longer in env.
     """
     env = utils_env.Env()
     params = utils_params.Params({"main_vm": 'rhel7-migration'})
     vm1 = FakeVm(params['main_vm'], params)
     vm1.is_alive()
     vm2 = FakeVm('vm2', params)
     vm2.is_alive()
     env.register_vm(params['main_vm'], vm1)
     env.register_vm('vm2', vm2)
     assert vm1 in env.get_all_vms()
     assert vm2 in env.get_all_vms()
     env.unregister_vm('vm2')
     assert vm1 in env.get_all_vms()
     assert vm2 not in env.get_all_vms()
Example #9
0
 def test_get_all_vms(self):
     """
     1) Create an env object.
     2) Create 2 vms and register them in the env.
     3) Create a SyncListenServer and register it in the env.
     4) Verify that the 2 vms are in the output of get_all_vms.
     5) Verify that the sync server is not in the output of get_all_vms.
     """
     env = utils_env.Env()
     params = utils_params.Params({"main_vm": 'rhel7-migration'})
     vm1 = FakeVm(params['main_vm'], params)
     vm1.is_alive()
     vm2 = FakeVm('vm2', params)
     vm2.is_alive()
     env.register_vm(params['main_vm'], vm1)
     env.register_vm('vm2', vm2)
     sync1 = FakeSyncListenServer(port=333)
     env.register_syncserver(333, sync1)
     assert vm1 in env.get_all_vms()
     assert vm2 in env.get_all_vms()
     assert sync1 not in env.get_all_vms()
Example #10
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Warn of this special condition in related location in output & logs
        if os.getuid() == 0 and params.get('nettype', 'user') == 'user':
            logging.warning("")
            logging.warning("Testing with nettype='user' while running "
                            "as root may produce unexpected results!!!")
            logging.warning("")

        # Open the environment file
        env_filename = os.path.join(
            data_dir.get_backend_dir(params.get("vm_type")),
            params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None
        t_type = None

        try:
            try:
                try:
                    subtest_dirs = []

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(subtestdir,
                                                            bootstrap.test_filter)

                    provider = params.get("provider", None)

                    if provider is None:
                        # Verify if we have the correspondent source file for it
                        for generic_subdir in asset.get_test_provider_subdirs('generic'):
                            subtest_dirs += data_dir.SubdirList(generic_subdir,
                                                                bootstrap.test_filter)

                        for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")):
                            subtest_dirs += data_dir.SubdirList(specific_subdir,
                                                                bootstrap.test_filter)
                    else:
                        provider_info = asset.get_test_provider_info(provider)
                        for key in provider_info['backends']:
                            subtest_dirs += data_dir.SubdirList(
                                provider_info['backends'][key]['path'],
                                bootstrap.test_filter)

                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug("Searching for test modules that match "
                                  "'type = %s' and 'provider = %s' "
                                  "on this cartesian dict",
                                  params.get("type"), params.get("provider", None))

                    t_types = params.get("type").split()
                    # Make sure we can load provider_lib in tests
                    for s in subtest_dirs:
                        if os.path.dirname(s) not in sys.path:
                            sys.path.insert(0, os.path.dirname(s))

                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type in t_types:
                        test_module = test_modules[t_type]
                        run_func = utils_misc.get_test_entrypoint_func(
                            t_type, test_module)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s"
                                             % error_message)

                except Exception, e:
                    if (t_type is not None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error("Exception raised during "
                                      "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Example #11
0
    def run_once(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise error.TestNAError("Test dependency failed")

        # Report virt test version
        logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        logging.info("Starting test %s", self.tag)
        logging.debug("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            logging.debug("    %s = %s", key, params[key])

        # Open the environment file
        env_filename = os.path.join(self.bindir, params.get("vm_type"),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)

        test_passed = False
        t_types = None

        try:
            try:
                try:
                    subtest_dirs = []
                    tests_dir = self.testdir

                    other_subtests_dirs = params.get("other_tests_dirs", "")
                    for d in other_subtests_dirs.split():
                        d = os.path.join(*d.split("/"))
                        subtestdir = os.path.join(self.bindir, d, "tests")
                        if not os.path.isdir(subtestdir):
                            raise error.TestError("Directory %s does not "
                                                  "exist" % (subtestdir))
                        subtest_dirs += data_dir.SubdirList(
                            subtestdir, bootstrap.test_filter)

                    # Verify if we have the correspondent source file for it
                    subtest_dirs += data_dir.SubdirList(
                        self.testdir, bootstrap.test_filter)
                    specific_testdir = os.path.join(self.bindir,
                                                    params.get("vm_type"),
                                                    "tests")
                    subtest_dirs += data_dir.SubdirList(
                        specific_testdir, bootstrap.test_filter)
                    subtest_dir = None

                    # Get the test routine corresponding to the specified
                    # test type
                    logging.debug(
                        "Searching for test modules that match "
                        "param 'type = %s' on this cartesian dict",
                        params.get("type"))
                    t_types = params.get("type").split()
                    test_modules = {}
                    for t_type in t_types:
                        for d in subtest_dirs:
                            module_path = os.path.join(d, "%s.py" % t_type)
                            if os.path.isfile(module_path):
                                logging.debug("Found subtest module %s",
                                              module_path)
                                subtest_dir = d
                                break
                        if subtest_dir is None:
                            msg = ("Could not find test file %s.py on test"
                                   "dirs %s" % (t_type, subtest_dirs))
                            raise error.TestError(msg)
                        # Load the test module
                        f, p, d = imp.find_module(t_type, [subtest_dir])
                        test_modules[t_type] = imp.load_module(t_type, f, p, d)
                        f.close()

                    # Preprocess
                    try:
                        params = env_process.preprocess(self, params, env)
                    finally:
                        env.save()

                    # Run the test function
                    for t_type, test_module in test_modules.items():
                        run_func = getattr(test_module, "run_%s" % t_type)
                        try:
                            run_func(self, params, env)
                            self.verify_background_errors()
                        finally:
                            env.save()
                    test_passed = True
                    error_message = funcatexit.run_exitfuncs(env, t_type)
                    if error_message:
                        raise error.TestWarn("funcatexit failed with: %s" %
                                             error_message)

                except Exception, e:
                    if (not t_type is None):
                        error_message = funcatexit.run_exitfuncs(env, t_type)
                        if error_message:
                            logging.error(error_message)
                    try:
                        env_process.postprocess_on_error(self, params, env)
                    finally:
                        env.save()
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        logging.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    env.save()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            logging.info("Aborting job (%s)", e)
            if params.get("vm_type") == "qemu":
                for vm in env.get_all_vms():
                    if vm.is_dead():
                        continue
                    logging.info("VM '%s' is alive.", vm.name)
                    for m in vm.monitors:
                        logging.info("It has a %s monitor unix socket at: %s",
                                     m.protocol, m.filename)
                    logging.info("The command line used to start it was:\n%s",
                                 vm.make_qemu_command())
                raise error.JobError("Abort requested (%s)" % e)
Example #12
0
    print_header("DEBUG LOG: %s" % debuglog)

    last_index = -1

    logging.info("Starting test job at %s", test_start_time)
    logging.info("")

    logging.info(version.get_pretty_version_info())
    logging.info("")

    logging.debug("Cleaning up previous job tmp files")
    d = parser.get_dicts().next()
    env_filename = os.path.join(data_dir.get_root_dir(), options.type,
                                d.get("env", "env"))
    env = utils_env.Env(env_filename, Test.env_version)
    env.destroy()
    try:
        address_pool_files = glob.glob("/tmp/address_pool*")
        for address_pool_file in address_pool_files:
            os.remove(address_pool_file)
        aexpect_tmp = "/tmp/aexpect_spawn/"
        if os.path.isdir(aexpect_tmp):
            shutil.rmtree("/tmp/aexpect_spawn/")
    except (IOError, OSError):
        pass
    logging.debug("")

    if options.restore_image_between_tests:
        logging.debug("Creating first backup of guest image")
        qemu_img = storage.QemuImg(d, data_dir.get_data_dir(), "image")
Example #13
0
def run_tests(parser, options):
    """
    Runs the sequence of KVM tests based on the list of dctionaries
    generated by the configuration system, handling dependencies.

    @param parser: Config parser object.
    @return: True, if all tests ran passed, False if any of them failed.
    """
    debugdir = os.path.join(data_dir.get_root_dir(), 'logs',
                            'run-%s' % time.strftime('%Y-%m-%d-%H.%M.%S'))
    if not os.path.isdir(debugdir):
        os.makedirs(debugdir)
    debuglog = os.path.join(debugdir, "debug.log")
    configure_file_logging(debuglog)

    print_stdout(bcolors.HEADER +
                 "DATA DIR: %s" % data_dir.get_backing_data_dir() +
                 bcolors.ENDC)

    print_header("DEBUG LOG: %s" % debuglog)

    last_index = -1

    logging.info("Starting test job at %s" % time.strftime('%Y-%m-%d %H:%M:%S'))
    logging.info("")
    logging.debug("Options received from the command line:")
    utils_misc.display_attributes(options)
    logging.debug("")

    logging.debug("Cleaning up previous job tmp files")
    d = parser.get_dicts().next()
    env_filename = os.path.join(data_dir.get_root_dir(),
                                options.type, d.get("env", "env"))
    env = utils_env.Env(env_filename, Test.env_version)
    env.destroy()
    try:
        address_pool_files = glob.glob("/tmp/address_pool*")
        for address_pool_file in address_pool_files:
            os.remove(address_pool_file)
        aexpect_tmp = "/tmp/aexpect_spawn/"
        if os.path.isdir(aexpect_tmp):
            shutil.rmtree("/tmp/aexpect_spawn/")
    except (IOError, OSError):
        pass
    logging.debug("")

    if options.restore_image_between_tests:
        logging.debug("Creating first backup of guest image")
        qemu_img = storage.QemuImg(d, data_dir.get_data_dir(), "image")
        qemu_img.backup_image(d, data_dir.get_data_dir(), 'backup', True)
        logging.debug("")

    if options.type == 'qemu':
        logging.info("We're running the qemu test with:")
        logging.info("qemu binary: %s" % d.get('qemu_binary'))
        logging.info("qemu img binary: %s" % d.get('qemu_img_binary'))
        logging.info("qemu io binary: %s" % d.get('qemu_io_binary'))
        logging.info("")

    logging.info("Defined test set:")
    for i, d in enumerate(parser.get_dicts()):
        if options.config is None and options.type in TEST_TYPES_STRIP_NAMES:
            shortname = ".".join(d['name'].split(".")[12:])
        else:
            shortname = ".".join(d['shortname'].split("."))

        logging.info("Test %4d:  %s" % (i + 1, shortname))
        last_index += 1

    if last_index == -1:
        print_stdout("No tests generated by config file %s" % parser.filename)
        print_stdout("Please check the file for errors (bad variable names, "
                     "wrong indentation)")
        sys.exit(-1)
    logging.info("")

    n_tests = last_index + 1
    print_header("TESTS: %s" % n_tests)

    status_dct = {}
    failed = False
    # Add the parameter decide if setup host env in the test case
    # For some special tests we only setup host in the first and last case
    # When we need to setup host env we need the host_setup_flag as following:
    #    0(00): do nothing
    #    1(01): setup env
    #    2(10): cleanup env
    #    3(11): setup and cleanup env
    index = 0
    setup_flag = 1
    cleanup_flag = 2
    for dct in parser.get_dicts():
        if options.config is None and options.type in TEST_TYPES_STRIP_NAMES:
            shortname = ".".join(d['name'].split(".")[12:])
        else:
            shortname = ".".join(d['shortname'].split("."))

        if index == 0:
            if dct.get("host_setup_flag", None) is not None:
                flag = int(dct["host_setup_flag"])
                dct["host_setup_flag"] = flag | setup_flag
            else:
                dct["host_setup_flag"] = setup_flag
        if index == last_index:
            if dct.get("host_setup_flag", None) is not None:
                flag = int(dct["host_setup_flag"])
                dct["host_setup_flag"] = flag | cleanup_flag
            else:
                dct["host_setup_flag"] = cleanup_flag
        index += 1

        # Add kvm module status
        dct["kvm_default"] = utils_misc.get_module_params(
                                             dct.get("sysfs_dir", "sys"), "kvm")

        if dct.get("skip") == "yes":
            continue

        dependencies_satisfied = True
        for dep in dct.get("dep"):
            for test_name in status_dct.keys():
                if not dep in test_name:
                    continue

                if not status_dct[test_name]:
                    dependencies_satisfied = False
                    break

        current_status = False
        if dependencies_satisfied:
            t = Test(dct, options)
            t.set_debugdir(debugdir)

            pretty_index = "(%d/%d)" % (index, n_tests)
            print_stdout("%s %s:" % (pretty_index, t.tag), end=False)

            try:
                try:
                    t_begin = time.time()
                    t.start_file_logging()
                    current_status = t.run_once()
                    logging.info("PASS %s" % t.tag)
                    logging.info("")
                    t.stop_file_logging()
                finally:
                    t_end = time.time()
                    t_elapsed = t_end - t_begin
            except error.TestNAError, reason:
                logging.info("SKIP %s -> %s: %s", t.tag,
                             reason.__class__.__name__, reason)
                logging.info("")
                t.stop_file_logging()
                print_skip()
                status_dct[dct.get("name")] = False
                continue
            except error.TestWarn, reason:
                logging.info("WARN %s -> %s: %s", t.tag,
                             reason.__class__.__name__,
                             reason)
                logging.info("")
                t.stop_file_logging()
                print_warn(t_elapsed)
                status_dct[dct.get("name")] = True
                continue
            except Exception, reason:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                logging.error("")
                tb_info = traceback.format_exception(exc_type, exc_value,
                                                     exc_traceback.tb_next)
                tb_info = "".join(tb_info)
                for e_line in tb_info.splitlines():
                    logging.error(e_line)
                logging.error("")
                logging.error("FAIL %s -> %s: %s", t.tag,
                              reason.__class__.__name__,
                              reason)
                logging.info("")
                t.stop_file_logging()
                current_status = False
Example #14
0
def run_tests(parser, options):
    """
    Runs the sequence of KVM tests based on the list of dctionaries
    generated by the configuration system, handling dependencies.

    @param parser: Config parser object.
    @return: True, if all tests ran passed, False if any of them failed.
    """
    debugdir = os.path.join(data_dir.get_root_dir(), 'logs',
                            'run-%s' % time.strftime('%Y-%m-%d-%H.%M.%S'))
    if not os.path.isdir(debugdir):
        os.makedirs(debugdir)
    debuglog = os.path.join(debugdir, "debug.log")
    configure_file_logging(debuglog)

    print_stdout(bcolors.HEADER +
                 "DATA DIR: %s" % data_dir.get_backing_data_dir() +
                 bcolors.ENDC)

    print_header("DEBUG LOG: %s" % debuglog)

    last_index = -1

    for i, d in enumerate(parser.get_dicts()):
        if options.config is None:
            shortname = ".".join(d['name'].split(".")[12:])
        else:
            shortname = ".".join(d['shortname'].split("."))

        logging.info("Test %4d:  %s" % (i + 1, shortname))
        last_index += 1

    if last_index == -1:
        print_stdout("No tests generated by config file %s" % parser.filename)
        print_stdout("Please check the file for errors (bad variable names, "
                     "wrong indentation)")
        sys.exit(-1)

    # Clean environment file
    d = parser.get_dicts().next()
    env_filename = os.path.join(data_dir.get_root_dir(), options.type,
                                d.get("env", "env"))
    env = utils_env.Env(env_filename, Test.env_version)
    env.destroy()

    n_tests = last_index + 1
    print_header("TESTS: %s" % n_tests)

    status_dct = {}
    failed = False
    # Add the parameter decide if setup host env in the test case
    # For some special tests we only setup host in the first and last case
    # When we need to setup host env we need the host_setup_flag as following:
    #    0(00): do nothing
    #    1(01): setup env
    #    2(10): cleanup env
    #    3(11): setup and cleanup env
    index = 0
    setup_flag = 1
    cleanup_flag = 2
    for dct in parser.get_dicts():
        if options.config is None:
            shortname = ".".join(d['name'].split(".")[12:])
        else:
            shortname = ".".join(d['shortname'].split("."))

        if index == 0:
            if dct.get("host_setup_flag", None) is not None:
                flag = int(dct["host_setup_flag"])
                dct["host_setup_flag"] = flag | setup_flag
            else:
                dct["host_setup_flag"] = setup_flag
        if index == last_index:
            if dct.get("host_setup_flag", None) is not None:
                flag = int(dct["host_setup_flag"])
                dct["host_setup_flag"] = flag | cleanup_flag
            else:
                dct["host_setup_flag"] = cleanup_flag
        index += 1

        # Add kvm module status
        dct["kvm_default"] = utils_misc.get_module_params(
            dct.get("sysfs_dir", "sys"), "kvm")

        if dct.get("skip") == "yes":
            continue

        dependencies_satisfied = True
        for dep in dct.get("dep"):
            for test_name in status_dct.keys():
                if not dep in test_name:
                    continue

                if not status_dct[test_name]:
                    dependencies_satisfied = False
                    break

        current_status = False
        if dependencies_satisfied:
            t = Test(dct, options)
            t.set_debugdir(debugdir)

            pretty_index = "(%d/%d)" % (index, n_tests)
            print_stdout("%s %s:" % (pretty_index, t.tag), end=False)

            try:
                try:
                    t_begin = time.time()
                    t.start_file_logging()
                    current_status = t.run_once()
                    logging.info("PASS")
                    t.stop_file_logging()
                finally:
                    t_end = time.time()
                    t_elapsed = t_end - t_begin
            except error.TestNAError, reason:
                logging.info("SKIP -> %s: %s", reason.__class__.__name__,
                             reason)
                t.stop_file_logging()
                print_skip()
                status_dct[dct.get("name")] = False
                continue
            except error.TestWarn, reason:
                logging.info("WARN -> %s: %s", reason.__class__.__name__,
                             reason)
                t.stop_file_logging()
                print_warn(t_elapsed)
                status_dct[dct.get("name")] = True
                continue
            except Exception, reason:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                logging.error("")
                tb_info = traceback.format_exception(exc_type, exc_value,
                                                     exc_traceback.tb_next)
                tb_info = "".join(tb_info)
                for e_line in tb_info.splitlines():
                    logging.error(e_line)
                logging.error("")
                logging.error("FAIL -> %s: %s", reason.__class__.__name__,
                              reason)
                t.stop_file_logging()
                current_status = False