def print_guest_list(options): """ Helper function to pretty print the guest list. This function uses a paginator, if possible (inspired on git). @param options: OptParse object with cmdline options. @param cartesian_parser: Cartesian parser object with test options. """ cfg = os.path.join(data_dir.get_root_dir(), options.type, "cfg", "guest-os.cfg") cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_file(cfg) pipe = get_paginator() index = 0 pipe.write("Searched %s for guest images\n" % os.path.join(data_dir.get_data_dir(), 'images')) pipe.write("Available guests:") pipe.write("\n\n") for params in cartesian_parser.get_dicts(): index += 1 image_name = storage.get_image_filename(params, data_dir.get_data_dir()) shortname = ".".join(params['name'].split(".")[1:]) if os.path.isfile(image_name): out = (bcolors.blue + str(index) + bcolors.end + " " + shortname + "\n") else: out = (bcolors.blue + str(index) + bcolors.end + " " + shortname + " " + bcolors.yellow + "(missing %s)" % os.path.basename(image_name) + bcolors.end + "\n") pipe.write(out)
def cleanup_env(parser, options): """ Clean up virt-test temporary files. :param parser: Cartesian parser with run parameters. :param options: Test runner options object. """ if options.vt_no_cleanup: logging.info("Option --no-cleanup requested, not cleaning temporary " "files and VM processes...") logging.info("") else: logging.info("Cleaning tmp files and VM processes...") d = parser.get_dicts().next() env_filename = os.path.join(data_dir.get_root_dir(), options.vt_type, d.get("env", "env")) env = utils_env.Env(filename=env_filename, version=Test.env_version) env.destroy() # Kill all tail_threads which env constructor recreate. aexpect.kill_tail_threads() aexpect.clean_tmp_files() utils_net.clean_tmp_files() data_dir.clean_tmp_files() qemu_vm.clean_tmp_files() logging.info("")
def get_qemu_io_binary(params): """ Get the path to the qemu-img binary currently in use. """ return get_path( os.path.join(data_dir.get_root_dir(), params.get("vm_type")), params.get("qemu_io_binary", "qemu"))
def __init__(self, params, options): self.params = utils_params.Params(params) self.bindir = data_dir.get_root_dir() self.testdir = os.path.join(self.bindir, 'tests') self.virtdir = os.path.join(self.bindir, 'shared') self.builddir = os.path.join(self.bindir, params.get("vm_type")) self.srcdir = os.path.join(self.builddir, 'src') if not os.path.isdir(self.srcdir): os.makedirs(self.srcdir) self.tmpdir = os.path.join(self.bindir, 'tmp') if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.iteration = 0 if options.config is None: self.tag = ".".join(params['name'].split(".")[12:]) else: self.tag = ".".join(params['shortname'].split(".")) self.debugdir = None self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue()
def __init__(self, params, options): self.params = utils_params.Params(params) self.bindir = data_dir.get_root_dir() self.testdir = os.path.join(self.bindir, 'tests') self.virtdir = os.path.join(self.bindir, 'shared') self.builddir = os.path.join(self.bindir, params.get("vm_type")) self.srcdir = os.path.join(self.builddir, 'src') if not os.path.isdir(self.srcdir): os.makedirs(self.srcdir) self.tmpdir = os.path.join(self.bindir, 'tmp') if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.iteration = 0 if options.config is None and options.type in TEST_TYPES_STRIP_NAMES: self.tag = ".".join(params['name'].split(".")[12:]) else: self.tag = ".".join(params['shortname'].split(".")) self.debugdir = None self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue()
def get_qemu_io_binary(params): """ Get the path to the qemu-img binary currently in use. """ return get_path(os.path.join(data_dir.get_root_dir(), params.get("vm_type")), params.get("qemu_io_binary", "qemu"))
def __init__(self, test, params, image_name, blkdebug_cfg="", prompt=r"qemu-io>\s*$", log_filename=None, io_options="", log_func=None): self.type = "" if log_filename: log_filename += "-" + utils_misc.generate_random_string(4) self.output_func = utils_misc.log_line self.output_params = (log_filename, ) else: self.output_func = None self.output_params = () self.output_prefix = "" self.prompt = prompt self.blkdebug_cfg = blkdebug_cfg base_dir = utils_misc.get_path(data_dir.get_root_dir(), params.get("vm_type")) self.qemu_io_cmd = utils_misc.get_path( base_dir, params.get("qemu_io_binary", "qemu-io")) self.io_options = io_options self.run_command = False self.image_name = image_name self.blkdebug_cfg = blkdebug_cfg self.log_func = log_func
def __init__(self, params, options): self.params = utils_params.Params(params) self.bindir = data_dir.get_root_dir() self.testdir = os.path.join(self.bindir, "tests") self.virtdir = os.path.join(self.bindir, "shared") self.builddir = os.path.join(self.bindir, params.get("vm_type")) self.srcdir = os.path.join(self.builddir, "src") if not os.path.isdir(self.srcdir): os.makedirs(self.srcdir) self.tmpdir = os.path.join(self.bindir, "tmp") if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.iteration = 0 if options.config is None and options.type in TEST_TYPES_STRIP_NAMES: self.tag = ".".join(params["name"].split(".")[12:]) else: self.tag = ".".join(params["shortname"].split(".")) self.debugdir = None self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue()
def __init__(self, test, params, image_name, blkdebug_cfg="", prompt=r"qemu-io>\s*$", log_filename=None, io_options="", log_func=None): self.type = "" if log_filename: log_filename += "-" + utils_misc.generate_random_string(4) self.output_func = utils_misc.log_line self.output_params = (log_filename,) else: self.output_func = None self.output_params = () self.output_prefix = "" self.prompt=prompt self.blkdebug_cfg=blkdebug_cfg base_dir = utils_misc.get_path(data_dir.get_root_dir(), params.get("vm_type")) self.qemu_io_cmd = utils_misc.get_path(base_dir, params.get("qemu_io_binary", "qemu-io")) self.io_options = io_options self.run_command = False self.image_name = image_name self.blkdebug_cfg = blkdebug_cfg self.log_func = log_func
def __init__(self, params, options): self.params = utils_params.Params(params) self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') self.builddir = os.path.join(self.bindir, 'backends', params.get("vm_type")) self.srcdir = os.path.join(self.builddir, 'src') if not os.path.isdir(self.srcdir): os.makedirs(self.srcdir) self.tmpdir = os.path.join(self.bindir, 'tmp') if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.iteration = 0 if options.vt_config: self.tag = params.get("shortname") else: self.tag = params.get("_short_name_map_file")["subtests.cfg"] self.debugdir = None self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue()
def get_qemu_best_cpu_model(params): """ Try to find out the best CPU model available for qemu. This function can't be in qemu_vm, because it is used in env_process, where there's no vm object available yet, and env content is synchronized in multi host testing. 1) Get host CPU model 2) Verify if host CPU model is in the list of supported qemu cpu models 3) If so, return host CPU model 4) If not, return the default cpu model set in params, if none defined, return 'qemu64'. """ host_cpu_models = get_host_cpu_models() root_dir = data_dir.get_root_dir() qemu_binary = get_path(os.path.join(root_dir, params.get("vm_type")), params.get("qemu_binary", "qemu")) qemu_cpu_models = get_qemu_cpu_models(qemu_binary) # Let's try to find a suitable model on the qemu list for host_cpu_model in host_cpu_models: if host_cpu_model in qemu_cpu_models: return host_cpu_model # If no host cpu model can be found on qemu_cpu_models, choose the default return params.get("default_cpu_model", "qemu64")
def _variant_only_file(filename): """ Parse file containing flat list of items to append on an 'only' filter """ if not os.path.isabs(filename): filename = os.path.realpath(os.path.join(data_dir.get_root_dir(), filename)) return ", ".join([_.strip() for _ in open(filename) if not _.lstrip().startswith('#')])
def get_guest_name_parser(options): cartesian_parser = cartesian_config.Parser() cfgdir = os.path.join(data_dir.get_root_dir(), options.type, "cfg") cartesian_parser.parse_file(os.path.join(cfgdir, "machines.cfg")) cartesian_parser.parse_file(os.path.join(cfgdir, "guest-os.cfg")) if options.arch: cartesian_parser.only_filter(options.arch) if options.machine_type: cartesian_parser.only_filter(options.machine_type) if options.guest_os: cartesian_parser.only_filter(options.guest_os) return cartesian_parser
def __init__(self, params, root_dir, tag): """ Init the default value for image object. @param params: Dictionary containing the test parameters. @param root_dir: Base directory for relative filenames. @param tag: Image tag defined in parameter images """ storage.QemuImg.__init__(self, params, root_dir, tag) # qemu img binary can be found in the test dir, not data_dir qemu_img_base_dir = os.path.join(data_dir.get_root_dir(), params.get("vm_type")) self.image_cmd = utils_misc.get_path(qemu_img_base_dir, params.get("qemu_img_binary","qemu-img"))
def verify_bsod(self, scrdump_file): # For windows guest if (os.path.exists(scrdump_file) and self.params.get("check_guest_bsod", "no") == 'yes' and ppm_utils.Image is not None): ref_img_path = self.params.get("bsod_reference_img", "") bsod_base_dir = os.path.join(data_dir.get_root_dir(), "shared", "deps", "bsod_img") ref_img = utils_misc.get_path(bsod_base_dir, ref_img_path) if ppm_utils.have_similar_img(scrdump_file, ref_img): err_msg = "Windows Guest appears to have suffered a BSOD," err_msg += " please check %s against %s." % (scrdump_file, ref_img) raise VMDeadKernelCrashError(err_msg)
def get_guest_name_list(options): global GUEST_NAME_LIST if GUEST_NAME_LIST is None: cfg = os.path.join(data_dir.get_root_dir(), options.type, "cfg", "guest-os.cfg") cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_file(cfg) guest_name_list = [] for params in cartesian_parser.get_dicts(): shortname = ".".join(params['name'].split(".")[1:]) guest_name_list.append(shortname) GUEST_NAME_LIST = guest_name_list return GUEST_NAME_LIST
def run_tests(parser, options): """ Runs the sequence of KVM tests based on the list of dctionaries generated by the configuration system, handling dependencies. @param parser: Config parser object. @return: True, if all tests ran passed, False if any of them failed. """ test_start_time = time.strftime('%Y-%m-%d-%H.%M.%S') logdir = options.logdir or os.path.join(data_dir.get_root_dir(), 'logs') debugdir = os.path.join(logdir, 'run-%s' % test_start_time) latestdir = os.path.join(logdir, "latest") if not os.path.isdir(debugdir): os.makedirs(debugdir) try: os.unlink(latestdir) except OSError, detail: pass
def run_tests(parser, options): """ Runs the sequence of KVM tests based on the list of dctionaries generated by the configuration system, handling dependencies. :param parser: Config parser object. :return: True, if all tests ran passed, False if any of them failed. """ test_start_time = time.strftime('%Y-%m-%d-%H.%M.%S') logdir = options.logdir or os.path.join(data_dir.get_root_dir(), 'logs') debugdir = os.path.join(logdir, 'run-%s' % test_start_time) latestdir = os.path.join(logdir, "latest") if not os.path.isdir(debugdir): os.makedirs(debugdir) try: os.unlink(latestdir) except OSError, detail: pass
def cleanup_env(parser, options): """ Clean up virt-test temporary files. :param parser: Cartesian parser with run parameters. :param options: Test runner options object. """ logging.info("Cleaning virt-test temp files...") d = parser.get_dicts().next() env_filename = os.path.join(data_dir.get_root_dir(), options.type, d.get("env", "env")) env = utils_env.Env(filename=env_filename, version=Test.env_version) env.destroy() aexpect.clean_tmp_files() utils_net.clean_tmp_files() data_dir.clean_tmp_files() qemu_vm.clean_tmp_files() logging.info("")
def verify_kernel_crash(self): """ Find kernel crash message on the VM serial console. :raise: VMDeadKernelCrashError, in case a kernel crash message was found. """ panic_re = [r"BUG:.*---\[ end trace .* \]---"] panic_re.append(r"----------\[ cut here.* BUG .*\[ end trace .* \]---") panic_re.append(r"general protection fault:.* RSP.*>") panic_re = "|".join(panic_re) if self.serial_console is not None: data = self.serial_console.get_output() match = re.search(panic_re, data, re.DOTALL | re.MULTILINE | re.I) if match is not None: raise VMDeadKernelCrashError(match.group(0)) #For windows guest if (self.params.get("check_guest_bsod", "no") == 'yes' and ppm_utils.Image is not None): try: scrdump_file = os.path.join("/tmp", "scrdump-img.ppm") ref_img_path = self.params.get("bsod_reference_img", "") bsod_base_dir = os.path.join(data_dir.get_root_dir(), "shared", "deps", "bsod_img") ref_img = utils_misc.get_path(bsod_base_dir, ref_img_path) try: self.screendump(filename=scrdump_file, debug=False) except Exception, err: logging.warn("Cannot catch guest screendump, %s" % err) pass if (os.path.exists(scrdump_file) and ppm_utils.have_similar_img(scrdump_file, ref_img)): err_msg = "Windows Guest appears to have suffered a BSOD," err_msg += " please check test video." raise VMDeadKernelCrashError(err_msg) finally:
def __init__(self, params, options): self.params = utils_params.Params(params) #ting test #sys.stdout.restore() #print "Test params class : ",self.params.__class__,"\n\n" #print "Test params : ",self.params,"\n\n" #end test self.bindir = data_dir.get_root_dir() self.testdir = os.path.join(self.bindir, 'tests') self.virtdir = os.path.join(self.bindir, 'shared') self.builddir = os.path.join(self.bindir, params.get("vm_type")) self.srcdir = os.path.join(self.builddir, 'src') if not os.path.isdir(self.srcdir): os.makedirs(self.srcdir) self.tmpdir = os.path.join(self.bindir, 'tmp') if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.iteration = 0 tag_index = get_tag_index(options, params) self.tag = get_tag(params, tag_index) #ting test #sys.stdout.restore() #print "Test self.params",self.params,"\n\n\n" #print "tag_index: ",tag_index,"\n\n\n" #print "self.tag: ",self.tag,"\n\n\n" #end test self.debugdir = None self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue()
def __init__(self, params, options): self.params = utils_params.Params(params) self.bindir = data_dir.get_root_dir() self.testdir = os.path.join(self.bindir, 'tests') self.virtdir = os.path.join(self.bindir, 'shared') self.builddir = os.path.join(self.bindir, params.get("vm_type")) self.srcdir = os.path.join(self.builddir, 'src') if not os.path.isdir(self.srcdir): os.makedirs(self.srcdir) self.tmpdir = os.path.join(self.bindir, 'tmp') if not os.path.isdir(self.tmpdir): os.makedirs(self.tmpdir) self.iteration = 0 tag_index = get_tag_index(options, params) self.tag = get_tag(params, tag_index) self.debugdir = None self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue()
rather than in a git working copy), then we fall back on reading the contents of the RELEASE-VERSION file. """ __all__ = ("get_git_version", "get_version", "get_top_commit", "get_current_branch", "get_pretty_version_info") import os import sys import common from autotest.client import utils from autotest.client.shared import error import data_dir _ROOT_PATH = data_dir.get_root_dir() RELEASE_VERSION_PATH = os.path.join(_ROOT_PATH, 'RELEASE-VERSION') global _GIT_VERSION_CACHE, _VERSION_CACHE, _TOP_COMMIT_CACHE global _CURRENT_BRANCH_CACHE, _PRETTY_VERSION_CACHE _GIT_VERSION_CACHE = None _VERSION_CACHE = None _TOP_COMMIT_CACHE = None _CURRENT_BRANCH_CACHE = None _PRETTY_VERSION_CACHE = None def _execute_git_command(command): """ As git is sensitive to the $CWD, change to the top dir to execute git cmds.
:copyright: 2012 Red Hat, Inc. """ __author__ = """Lukas Doktor ([email protected])""" import re import unittest import os import common from autotest.client.shared.test_utils import mock from qemu_devices import qdevices, qbuses, qcontainer from qemu_devices.utils import DeviceHotplugError, DeviceRemoveError import data_dir import qemu_monitor UNITTEST_DATA_DIR = os.path.join(data_dir.get_root_dir(), "virttest", "unittest_data") # Dummy variables # qemu-1.5.0 human monitor help output QEMU_HMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__hmp_help")).read() # qemu-1.5.0 QMP monitor commands output QEMU_QMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__qmp_help")).read() # qemu-1.5.0 -help QEMU_HELP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__help")).read() # qemu-1.5.0 -devices ? QEMU_DEVICES = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__devices_help")).read() # qemu-1.5.0 -M ? QEMU_MACHINE = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__machine_help")).read() class ParamsDict(dict):
def run_tests(parser, options): """ Runs the sequence of KVM tests based on the list of dctionaries generated by the configuration system, handling dependencies. @param parser: Config parser object. @return: True, if all tests ran passed, False if any of them failed. """ debugdir = os.path.join(data_dir.get_root_dir(), 'logs', 'run-%s' % time.strftime('%Y-%m-%d-%H.%M.%S')) if not os.path.isdir(debugdir): os.makedirs(debugdir) debuglog = os.path.join(debugdir, "debug.log") configure_file_logging(debuglog) print_stdout(bcolors.HEADER + "DATA DIR: %s" % data_dir.get_backing_data_dir() + bcolors.ENDC) print_header("DEBUG LOG: %s" % debuglog) last_index = -1 logging.info("Starting test job at %s", time.strftime('%Y-%m-%d %H:%M:%S')) logging.info("") logging.debug("Cleaning up previous job tmp files") d = parser.get_dicts().next() env_filename = os.path.join(data_dir.get_root_dir(), options.type, d.get("env", "env")) env = utils_env.Env(env_filename, Test.env_version) env.destroy() try: address_pool_files = glob.glob("/tmp/address_pool*") for address_pool_file in address_pool_files: os.remove(address_pool_file) aexpect_tmp = "/tmp/aexpect_spawn/" if os.path.isdir(aexpect_tmp): shutil.rmtree("/tmp/aexpect_spawn/") except (IOError, OSError): pass logging.debug("") if options.restore_image_between_tests: logging.debug("Creating first backup of guest image") qemu_img = storage.QemuImg(d, data_dir.get_data_dir(), "image") qemu_img.backup_image(d, data_dir.get_data_dir(), 'backup', True) logging.debug("") tag_index = get_tag_index(options, d) for line in get_cartesian_parser_details(parser).splitlines(): logging.info(line) logging.info("Defined test set:") for i, d in enumerate(parser.get_dicts()): shortname = get_tag(d, tag_index) logging.info("Test %4d: %s", i + 1, shortname) last_index += 1 if last_index == -1: print_stdout("No tests generated by config file %s" % parser.filename) print_stdout("Please check the file for errors (bad variable names, " "wrong indentation)") sys.exit(-1) logging.info("") n_tests = last_index + 1 n_tests_failed = 0 n_tests_skipped = 0 print_header("TESTS: %s" % n_tests) status_dct = {} failed = False # Add the parameter decide if setup host env in the test case # For some special tests we only setup host in the first and last case # When we need to setup host env we need the host_setup_flag as following: # 0(00): do nothing # 1(01): setup env # 2(10): cleanup env # 3(11): setup and cleanup env index = 0 setup_flag = 1 cleanup_flag = 2 job_start_time = time.time() for dct in parser.get_dicts(): shortname = get_tag(dct, tag_index) if index == 0: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | setup_flag else: dct["host_setup_flag"] = setup_flag if index == last_index: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | cleanup_flag else: dct["host_setup_flag"] = cleanup_flag index += 1 # Add kvm module status dct["kvm_default"] = utils_misc.get_module_params( dct.get("sysfs_dir", "sys"), "kvm") if dct.get("skip") == "yes": continue dependencies_satisfied = True for dep in dct.get("dep"): for test_name in status_dct.keys(): if not dep in test_name: continue if not status_dct[test_name]: dependencies_satisfied = False break current_status = False if dependencies_satisfied: t = Test(dct, options) t.set_debugdir(debugdir) pretty_index = "(%d/%d)" % (index, n_tests) print_stdout("%s %s:" % (pretty_index, t.tag), end=False) try: try: t_begin = time.time() t.start_file_logging() current_status = t.run_once() logging.info("PASS %s", t.tag) logging.info("") t.stop_file_logging() finally: t_end = time.time() t_elapsed = t_end - t_begin except error.TestError, reason: n_tests_failed += 1 logging.info("ERROR %s -> %s: %s", t.tag, reason.__class__.__name__, reason) logging.info("") t.stop_file_logging() print_error(t_elapsed) status_dct[dct.get("name")] = False continue except error.TestNAError, reason: n_tests_skipped += 1 logging.info("SKIP %s -> %s: %s", t.tag, reason.__class__.__name__, reason) logging.info("") t.stop_file_logging() print_skip() status_dct[dct.get("name")] = False continue except error.TestWarn, reason: logging.info("WARN %s -> %s: %s", t.tag, reason.__class__.__name__, reason) logging.info("") t.stop_file_logging() print_warn(t_elapsed) status_dct[dct.get("name")] = True continue
def create_guest_os_cfg(t_type): root_dir = data_dir.get_root_dir() guest_os_cfg_dir = os.path.join(root_dir, "shared", "cfg", "guest-os") guest_os_cfg_path = os.path.join(root_dir, t_type, "cfg", "guest-os.cfg") guest_os_cfg_file = open(guest_os_cfg_path, "w") get_directory_structure(guest_os_cfg_dir, guest_os_cfg_file)
""" __author__ = """Lukas Doktor ([email protected])""" import re import unittest import os import common from autotest.client.shared.test_utils import mock from qemu_devices import qdevices, qbuses, qcontainer from qemu_devices.utils import DeviceHotplugError, DeviceRemoveError import data_dir import qemu_monitor UNITTEST_DATA_DIR = os.path.join( data_dir.get_root_dir(), "virttest", "unittest_data") # Dummy variables # qemu-1.5.0 human monitor help output QEMU_HMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__hmp_help")).read() # qemu-1.5.0 QMP monitor commands output QEMU_QMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__qmp_help")).read() # qemu-1.5.0 -help QEMU_HELP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__help")).read() # qemu-1.5.0 -devices ? QEMU_DEVICES = open( os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__devices_help")).read() # qemu-1.5.0 -M ? QEMU_MACHINE = open( os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__machine_help")).read()
def run_tests(parser, options): """ Runs the sequence of KVM tests based on the list of dctionaries generated by the configuration system, handling dependencies. @param parser: Config parser object. @return: True, if all tests ran passed, False if any of them failed. """ debugdir = os.path.join(data_dir.get_root_dir(), 'logs', 'run-%s' % time.strftime('%Y-%m-%d-%H.%M.%S')) if not os.path.isdir(debugdir): os.makedirs(debugdir) debuglog = os.path.join(debugdir, "debug.log") configure_file_logging(debuglog) print_stdout(bcolors.HEADER + "DATA DIR: %s" % data_dir.get_backing_data_dir() + bcolors.ENDC) print_header("DEBUG LOG: %s" % debuglog) last_index = -1 for i, d in enumerate(parser.get_dicts()): if options.config is None: shortname = ".".join(d['name'].split(".")[12:]) else: shortname = ".".join(d['shortname'].split(".")) logging.info("Test %4d: %s" % (i + 1, shortname)) last_index += 1 if last_index == -1: print_stdout("No tests generated by config file %s" % parser.filename) print_stdout("Please check the file for errors (bad variable names, " "wrong indentation)") sys.exit(-1) # Clean environment file d = parser.get_dicts().next() env_filename = os.path.join(data_dir.get_root_dir(), options.type, d.get("env", "env")) env = utils_env.Env(env_filename, Test.env_version) env.destroy() n_tests = last_index + 1 print_header("TESTS: %s" % n_tests) status_dct = {} failed = False # Add the parameter decide if setup host env in the test case # For some special tests we only setup host in the first and last case # When we need to setup host env we need the host_setup_flag as following: # 0(00): do nothing # 1(01): setup env # 2(10): cleanup env # 3(11): setup and cleanup env index = 0 setup_flag = 1 cleanup_flag = 2 for dct in parser.get_dicts(): if options.config is None: shortname = ".".join(d['name'].split(".")[12:]) else: shortname = ".".join(d['shortname'].split(".")) if index == 0: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | setup_flag else: dct["host_setup_flag"] = setup_flag if index == last_index: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | cleanup_flag else: dct["host_setup_flag"] = cleanup_flag index += 1 # Add kvm module status dct["kvm_default"] = utils_misc.get_module_params( dct.get("sysfs_dir", "sys"), "kvm") if dct.get("skip") == "yes": continue dependencies_satisfied = True for dep in dct.get("dep"): for test_name in status_dct.keys(): if not dep in test_name: continue if not status_dct[test_name]: dependencies_satisfied = False break current_status = False if dependencies_satisfied: t = Test(dct, options) t.set_debugdir(debugdir) pretty_index = "(%d/%d)" % (index, n_tests) print_stdout("%s %s:" % (pretty_index, t.tag), end=False) try: try: t_begin = time.time() t.start_file_logging() current_status = t.run_once() logging.info("PASS") t.stop_file_logging() finally: t_end = time.time() t_elapsed = t_end - t_begin except error.TestNAError, reason: logging.info("SKIP -> %s: %s", reason.__class__.__name__, reason) t.stop_file_logging() print_skip() status_dct[dct.get("name")] = False continue except error.TestWarn, reason: logging.info("WARN -> %s: %s", reason.__class__.__name__, reason) t.stop_file_logging() print_warn(t_elapsed) status_dct[dct.get("name")] = True continue except Exception, reason: exc_type, exc_value, exc_traceback = sys.exc_info() logging.error("") tb_info = traceback.format_exception(exc_type, exc_value, exc_traceback.tb_next) tb_info = "".join(tb_info) for e_line in tb_info.splitlines(): logging.error(e_line) logging.error("") logging.error("FAIL -> %s: %s", reason.__class__.__name__, reason) t.stop_file_logging() current_status = False
def run_tests(parser, options): """ Runs the sequence of KVM tests based on the list of dctionaries generated by the configuration system, handling dependencies. @param parser: Config parser object. @return: True, if all tests ran passed, False if any of them failed. """ debugdir = os.path.join(data_dir.get_root_dir(), 'logs', 'run-%s' % time.strftime('%Y-%m-%d-%H.%M.%S')) if not os.path.isdir(debugdir): os.makedirs(debugdir) debuglog = os.path.join(debugdir, "debug.log") configure_file_logging(debuglog) print_stdout(bcolors.HEADER + "DATA DIR: %s" % data_dir.get_backing_data_dir() + bcolors.ENDC) print_header("DEBUG LOG: %s" % debuglog) last_index = -1 for i, d in enumerate(parser.get_dicts()): if options.config is None: shortname = ".".join(d['name'].split(".")[12:]) else: shortname = ".".join(d['shortname'].split(".")) logging.info("Test %4d: %s" % (i + 1, shortname)) last_index += 1 if last_index == -1: print_stdout("No tests generated by config file %s" % parser.filename) print_stdout("Please check the file for errors (bad variable names, " "wrong indentation)") sys.exit(-1) # Clean environment file d = parser.get_dicts().next() env_filename = os.path.join(data_dir.get_root_dir(), options.type, d.get("env", "env")) env = utils_env.Env(env_filename, Test.env_version) env.destroy() n_tests = last_index + 1 print_header("TESTS: %s" % n_tests) status_dct = {} failed = False # Add the parameter decide if setup host env in the test case # For some special tests we only setup host in the first and last case # When we need to setup host env we need the host_setup_flag as following: # 0(00): do nothing # 1(01): setup env # 2(10): cleanup env # 3(11): setup and cleanup env index = 0 setup_flag = 1 cleanup_flag = 2 for dct in parser.get_dicts(): if options.config is None: shortname = ".".join(d['name'].split(".")[12:]) else: shortname = ".".join(d['shortname'].split(".")) if index == 0: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | setup_flag else: dct["host_setup_flag"] = setup_flag if index == last_index: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | cleanup_flag else: dct["host_setup_flag"] = cleanup_flag index += 1 # Add kvm module status dct["kvm_default"] = utils_misc.get_module_params( dct.get("sysfs_dir", "sys"), "kvm") if dct.get("skip") == "yes": continue dependencies_satisfied = True for dep in dct.get("dep"): for test_name in status_dct.keys(): if not dep in test_name: continue if not status_dct[test_name]: dependencies_satisfied = False break current_status = False if dependencies_satisfied: t = Test(dct, options) t.set_debugdir(debugdir) print_stdout("%s:" % t.tag, end=False) try: try: t_begin = time.time() t.start_file_logging() current_status = t.run_once() logging.info("PASS") t.stop_file_logging() finally: t_end = time.time() t_elapsed = t_end - t_begin except error.TestNAError, reason: logging.info("SKIP -> %s: %s", reason.__class__.__name__, reason) t.stop_file_logging() print_skip() status_dct[dct.get("name")] = False continue except error.TestWarn, reason: logging.info("WARN -> %s: %s", reason.__class__.__name__, reason) t.stop_file_logging() print_warn(t_elapsed) status_dct[dct.get("name")] = True continue except Exception, reason: exc_type, exc_value, exc_traceback = sys.exc_info() logging.error("") tb_info = traceback.format_exception(exc_type, exc_value, exc_traceback.tb_next) tb_info = "".join(tb_info) for e_line in tb_info.splitlines(): logging.error(e_line) logging.error("") logging.error("FAIL -> %s: %s", reason.__class__.__name__, reason) t.stop_file_logging() current_status = False
def create_subtests_cfg(t_type): root_dir = data_dir.get_root_dir() specific_test = os.path.join(root_dir, t_type, 'tests') specific_test_list = glob.glob(os.path.join(specific_test, '*.py')) shared_test = os.path.join(root_dir, 'tests') shared_test_list = glob.glob(os.path.join(shared_test, '*.py')) all_specific_test_list = [] for test in specific_test_list: basename = os.path.basename(test) if basename != "__init__.py": all_specific_test_list.append(basename.split(".")[0]) all_shared_test_list = [] for test in shared_test_list: basename = os.path.basename(test) if basename != "__init__.py": all_shared_test_list.append(basename.split(".")[0]) all_specific_test_list.sort() all_shared_test_list.sort() all_test_list = set(all_specific_test_list + all_shared_test_list) specific_test_cfg = os.path.join(root_dir, t_type, 'tests', 'cfg') shared_test_cfg = os.path.join(root_dir, 'tests', 'cfg') shared_file_list = glob.glob(os.path.join(shared_test_cfg, "*.cfg")) first_subtest_file = [] last_subtest_file = [] non_dropin_tests = [] tmp = [] for shared_file in shared_file_list: shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if not line.startswith("#"): try: (key, value) = line.split("=") if key.strip() == 'type': value = value.strip() value = value.split(" ") for v in value: if v not in non_dropin_tests: non_dropin_tests.append(v) except: pass shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if shared_file_name not in first_subtest_file: first_subtest_file.append(shared_file) elif shared_file_name in last_subtest[t_type]: if shared_file_name not in last_subtest_file: last_subtest_file.append(shared_file) else: if shared_file_name not in tmp: tmp.append(shared_file) shared_file_list = tmp shared_file_list.sort() specific_file_list = glob.glob(os.path.join(specific_test_cfg, "*.cfg")) tmp = [] for shared_file in specific_file_list: shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if not line.startswith("#"): try: (key, value) = line.split("=") if key.strip() == 'type': value = value.strip() value = value.split(" ") for v in value: if v not in non_dropin_tests: non_dropin_tests.append(v) except: pass shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if shared_file_name not in first_subtest_file: first_subtest_file.append(shared_file) elif shared_file_name in last_subtest[t_type]: if shared_file_name not in last_subtest_file: last_subtest_file.append(shared_file) else: if shared_file_name not in tmp: tmp.append(shared_file) specific_file_list = tmp specific_file_list.sort() non_dropin_tests.sort() non_dropin_tests = set(non_dropin_tests) dropin_tests = all_test_list - non_dropin_tests dropin_file_list = [] tmp_dir = data_dir.get_tmp_dir() if not os.path.isdir(tmp_dir): os.makedirs(tmp_dir) for dropin_test in dropin_tests: autogen_cfg_path = os.path.join(tmp_dir, '%s.cfg' % dropin_test) autogen_cfg_file = open(autogen_cfg_path, 'w') autogen_cfg_file.write("# Drop-in test - auto generated snippet\n") autogen_cfg_file.write("- %s:\n" % dropin_test) autogen_cfg_file.write(" virt_test_type = %s\n" % t_type) autogen_cfg_file.write(" type = %s\n" % dropin_test) autogen_cfg_file.close() dropin_file_list.append(autogen_cfg_path) subtests_cfg = os.path.join(root_dir, t_type, 'cfg', 'subtests.cfg') subtests_file = open(subtests_cfg, 'w') subtests_file.write("# Do not edit, auto generated file from subtests config\n") subtests_file.write("variants:\n") write_subtests_files(first_subtest_file, subtests_file) write_subtests_files(specific_file_list, subtests_file, t_type) write_subtests_files(shared_file_list, subtests_file) write_subtests_files(dropin_file_list, subtests_file) write_subtests_files(last_subtest_file, subtests_file) subtests_file.close()
def create_subtests_cfg(t_type): root_dir = data_dir.get_root_dir() specific_test = os.path.join(root_dir, t_type, 'tests') specific_test_list = data_dir.SubdirGlobList(specific_test, '*.py', test_filter) shared_test = os.path.join(root_dir, 'tests') if t_type == 'lvsb': shared_test_list = [] else: shared_test_list = data_dir.SubdirGlobList(shared_test, '*.py', test_filter) all_specific_test_list = [] for test in specific_test_list: basename = os.path.basename(test) if basename != "__init__.py": all_specific_test_list.append(basename.split(".")[0]) all_shared_test_list = [] for test in shared_test_list: basename = os.path.basename(test) if basename != "__init__.py": all_shared_test_list.append(basename.split(".")[0]) all_specific_test_list.sort() all_shared_test_list.sort() all_test_list = set(all_specific_test_list + all_shared_test_list) specific_test_cfg = os.path.join(root_dir, t_type, 'tests', 'cfg') shared_test_cfg = os.path.join(root_dir, 'tests', 'cfg') # lvsb tests can't use VM shared tests if t_type == 'lvsb': shared_file_list = [] else: shared_file_list = data_dir.SubdirGlobList(shared_test_cfg, "*.cfg", config_filter) first_subtest_file = [] last_subtest_file = [] non_dropin_tests = [] tmp = [] for shared_file in shared_file_list: shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if line.startswith("type"): cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_string(line) td = cartesian_parser.get_dicts().next() values = td['type'].split(" ") for value in values: if t_type not in non_dropin_tests: non_dropin_tests.append(value) shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if shared_file_name not in first_subtest_file: first_subtest_file.append(shared_file) elif shared_file_name in last_subtest[t_type]: if shared_file_name not in last_subtest_file: last_subtest_file.append(shared_file) else: if shared_file_name not in tmp: tmp.append(shared_file) shared_file_list = tmp shared_file_list.sort() specific_file_list = data_dir.SubdirGlobList(specific_test_cfg, "*.cfg", config_filter) tmp = [] for shared_file in specific_file_list: shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if line.startswith("type"): cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_string(line) td = cartesian_parser.get_dicts().next() values = td['type'].split(" ") for value in values: if value not in non_dropin_tests: non_dropin_tests.append(value) shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if shared_file_name not in first_subtest_file: first_subtest_file.append(shared_file) elif shared_file_name in last_subtest[t_type]: if shared_file_name not in last_subtest_file: last_subtest_file.append(shared_file) else: if shared_file_name not in tmp: tmp.append(shared_file) specific_file_list = tmp specific_file_list.sort() non_dropin_tests.sort() non_dropin_tests = set(non_dropin_tests) dropin_tests = all_test_list - non_dropin_tests dropin_file_list = [] tmp_dir = data_dir.get_tmp_dir() if not os.path.isdir(tmp_dir): os.makedirs(tmp_dir) for dropin_test in dropin_tests: autogen_cfg_path = os.path.join(tmp_dir, '%s.cfg' % dropin_test) autogen_cfg_file = open(autogen_cfg_path, 'w') autogen_cfg_file.write("# Drop-in test - auto generated snippet\n") autogen_cfg_file.write("- %s:\n" % dropin_test) autogen_cfg_file.write(" virt_test_type = %s\n" % t_type) autogen_cfg_file.write(" type = %s\n" % dropin_test) autogen_cfg_file.close() dropin_file_list.append(autogen_cfg_path) subtests_cfg = os.path.join(root_dir, t_type, 'cfg', 'subtests.cfg') subtests_file = open(subtests_cfg, 'w') subtests_file.write( "# Do not edit, auto generated file from subtests config\n") subtests_file.write("variants:\n") write_subtests_files(first_subtest_file, subtests_file) write_subtests_files(specific_file_list, subtests_file, t_type) write_subtests_files(shared_file_list, subtests_file) write_subtests_files(dropin_file_list, subtests_file) write_subtests_files(last_subtest_file, subtests_file) subtests_file.close()
:author: Lukas Doktor <*****@*****.**> :copyright: 2012 Red Hat, Inc. """ __author__ = """Lukas Doktor ([email protected])""" import re import unittest import os import common from autotest.client.shared.test_utils import mock import qemu_devices import data_dir import qemu_monitor UNITTEST_DATA_DIR = os.path.join(data_dir.get_root_dir(), "virttest", "unittest_data") # Dummy variables # qemu-1.5.0 human monitor help output QEMU_HMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__hmp_help")).read() # qemu-1.5.0 QMP monitor commands output QEMU_QMP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__qmp_help")).read() # qemu-1.5.0 -help QEMU_HELP = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__help")).read() # qemu-1.5.0 -devices ? QEMU_DEVICES = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__devices_help")).read() # qemu-1.5.0 -M ? QEMU_MACHINE = open(os.path.join(UNITTEST_DATA_DIR, "qemu-1.5.0__machine_help")).read()
def run_tests(parser, options): """ Runs the sequence of KVM tests based on the list of dctionaries generated by the configuration system, handling dependencies. @param parser: Config parser object. @return: True, if all tests ran passed, False if any of them failed. """ debugdir = os.path.join(data_dir.get_root_dir(), 'logs', 'run-%s' % time.strftime('%Y-%m-%d-%H.%M.%S')) if not os.path.isdir(debugdir): os.makedirs(debugdir) debuglog = os.path.join(debugdir, "debug.log") configure_file_logging(debuglog) print_stdout(bcolors.HEADER + "DATA DIR: %s" % data_dir.get_backing_data_dir() + bcolors.ENDC) print_header("DEBUG LOG: %s" % debuglog) last_index = -1 logging.info("Starting test job at %s" % time.strftime('%Y-%m-%d %H:%M:%S')) logging.info("") logging.debug("Options received from the command line:") utils_misc.display_attributes(options) logging.debug("") logging.debug("Cleaning up previous job tmp files") d = parser.get_dicts().next() env_filename = os.path.join(data_dir.get_root_dir(), options.type, d.get("env", "env")) env = utils_env.Env(env_filename, Test.env_version) env.destroy() try: address_pool_files = glob.glob("/tmp/address_pool*") for address_pool_file in address_pool_files: os.remove(address_pool_file) aexpect_tmp = "/tmp/aexpect_spawn/" if os.path.isdir(aexpect_tmp): shutil.rmtree("/tmp/aexpect_spawn/") except (IOError, OSError): pass logging.debug("") if options.restore_image_between_tests: logging.debug("Creating first backup of guest image") qemu_img = storage.QemuImg(d, data_dir.get_data_dir(), "image") qemu_img.backup_image(d, data_dir.get_data_dir(), 'backup', True) logging.debug("") if options.type == 'qemu': logging.info("We're running the qemu test with:") logging.info("qemu binary: %s" % d.get('qemu_binary')) logging.info("qemu img binary: %s" % d.get('qemu_img_binary')) logging.info("qemu io binary: %s" % d.get('qemu_io_binary')) logging.info("") logging.info("Defined test set:") for i, d in enumerate(parser.get_dicts()): if options.config is None and options.type in TEST_TYPES_STRIP_NAMES: shortname = ".".join(d['name'].split(".")[12:]) else: shortname = ".".join(d['shortname'].split(".")) logging.info("Test %4d: %s" % (i + 1, shortname)) last_index += 1 if last_index == -1: print_stdout("No tests generated by config file %s" % parser.filename) print_stdout("Please check the file for errors (bad variable names, " "wrong indentation)") sys.exit(-1) logging.info("") n_tests = last_index + 1 print_header("TESTS: %s" % n_tests) status_dct = {} failed = False # Add the parameter decide if setup host env in the test case # For some special tests we only setup host in the first and last case # When we need to setup host env we need the host_setup_flag as following: # 0(00): do nothing # 1(01): setup env # 2(10): cleanup env # 3(11): setup and cleanup env index = 0 setup_flag = 1 cleanup_flag = 2 for dct in parser.get_dicts(): if options.config is None and options.type in TEST_TYPES_STRIP_NAMES: shortname = ".".join(d['name'].split(".")[12:]) else: shortname = ".".join(d['shortname'].split(".")) if index == 0: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | setup_flag else: dct["host_setup_flag"] = setup_flag if index == last_index: if dct.get("host_setup_flag", None) is not None: flag = int(dct["host_setup_flag"]) dct["host_setup_flag"] = flag | cleanup_flag else: dct["host_setup_flag"] = cleanup_flag index += 1 # Add kvm module status dct["kvm_default"] = utils_misc.get_module_params( dct.get("sysfs_dir", "sys"), "kvm") if dct.get("skip") == "yes": continue dependencies_satisfied = True for dep in dct.get("dep"): for test_name in status_dct.keys(): if not dep in test_name: continue if not status_dct[test_name]: dependencies_satisfied = False break current_status = False if dependencies_satisfied: t = Test(dct, options) t.set_debugdir(debugdir) pretty_index = "(%d/%d)" % (index, n_tests) print_stdout("%s %s:" % (pretty_index, t.tag), end=False) try: try: t_begin = time.time() t.start_file_logging() current_status = t.run_once() logging.info("PASS %s" % t.tag) logging.info("") t.stop_file_logging() finally: t_end = time.time() t_elapsed = t_end - t_begin except error.TestNAError, reason: logging.info("SKIP %s -> %s: %s", t.tag, reason.__class__.__name__, reason) logging.info("") t.stop_file_logging() print_skip() status_dct[dct.get("name")] = False continue except error.TestWarn, reason: logging.info("WARN %s -> %s: %s", t.tag, reason.__class__.__name__, reason) logging.info("") t.stop_file_logging() print_warn(t_elapsed) status_dct[dct.get("name")] = True continue except Exception, reason: exc_type, exc_value, exc_traceback = sys.exc_info() logging.error("") tb_info = traceback.format_exception(exc_type, exc_value, exc_traceback.tb_next) tb_info = "".join(tb_info) for e_line in tb_info.splitlines(): logging.error(e_line) logging.error("") logging.error("FAIL %s -> %s: %s", t.tag, reason.__class__.__name__, reason) logging.info("") t.stop_file_logging() current_status = False
def create_guest_os_cfg(t_type): root_dir = data_dir.get_root_dir() guest_os_cfg_dir = os.path.join(root_dir, 'shared', 'cfg', 'guest-os') guest_os_cfg_path = data_dir.get_backend_cfg_path(t_type, 'guest-os.cfg') guest_os_cfg_file = open(guest_os_cfg_path, 'w') get_directory_structure(guest_os_cfg_dir, guest_os_cfg_file)
def create_subtests_cfg(t_type): root_dir = data_dir.get_root_dir() specific_test_list = [] specific_file_list = [] specific_subdirs = asset.get_test_provider_subdirs(t_type) provider_names_specific = asset.get_test_provider_names(t_type) provider_info_specific = [] for specific_provider in provider_names_specific: provider_info_specific.append(asset.get_test_provider_info(specific_provider)) for subdir in specific_subdirs: specific_test_list += data_dir.SubdirGlobList(subdir, '*.py', test_filter) specific_file_list += data_dir.SubdirGlobList(subdir, '*.cfg', config_filter) shared_test_list = [] shared_file_list = [] shared_subdirs = asset.get_test_provider_subdirs('generic') provider_names_shared = asset.get_test_provider_names('generic') provider_info_shared = [] for shared_provider in provider_names_shared: provider_info_shared.append(asset.get_test_provider_info(shared_provider)) if not t_type == 'lvsb': for subdir in shared_subdirs: shared_test_list += data_dir.SubdirGlobList(subdir, '*.py', test_filter) shared_file_list += data_dir.SubdirGlobList(subdir, '*.cfg', config_filter) all_specific_test_list = [] for test in specific_test_list: for p in provider_info_specific: provider_base_path = p['backends'][t_type]['path'] if provider_base_path in test: provider_name = p['name'] break basename = os.path.basename(test) if basename != "__init__.py": all_specific_test_list.append("%s.%s" % (provider_name, basename.split(".")[0])) all_shared_test_list = [] for test in shared_test_list: for p in provider_info_shared: provider_base_path = p['backends']['generic']['path'] if provider_base_path in test: provider_name = p['name'] break basename = os.path.basename(test) if basename != "__init__.py": all_shared_test_list.append("%s.%s" % (provider_name, basename.split(".")[0])) all_specific_test_list.sort() all_shared_test_list.sort() all_test_list = set(all_specific_test_list + all_shared_test_list) first_subtest_file = [] last_subtest_file = [] non_dropin_tests = [] tmp = [] for shared_file in shared_file_list: provider_name = None for p in provider_info_shared: provider_base_path = p['backends']['generic']['path'] if provider_base_path in shared_file: provider_name = p['name'] break shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if line.startswith("type"): cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_string(line) td = cartesian_parser.get_dicts().next() values = td['type'].split(" ") for value in values: if t_type not in non_dropin_tests: non_dropin_tests.append("%s.%s" % (provider_name, value)) shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if [provider_name, shared_file] not in first_subtest_file: first_subtest_file.append([provider_name, shared_file]) elif shared_file_name in last_subtest[t_type]: if [provider_name, shared_file] not in last_subtest_file: last_subtest_file.append([provider_name, shared_file]) else: if [provider_name, shared_file] not in tmp: tmp.append([provider_name, shared_file]) shared_file_list = tmp tmp = [] for shared_file in specific_file_list: provider_name = None for p in provider_info_specific: provider_base_path = p['backends'][t_type]['path'] if provider_base_path in shared_file: provider_name = p['name'] break shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if line.startswith("type"): cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_string(line) td = cartesian_parser.get_dicts().next() values = td['type'].split(" ") for value in values: if value not in non_dropin_tests: non_dropin_tests.append("%s.%s" % (provider_name, value)) shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if [provider_name, shared_file] not in first_subtest_file: first_subtest_file.append([provider_name, shared_file]) elif shared_file_name in last_subtest[t_type]: if [provider_name, shared_file] not in last_subtest_file: last_subtest_file.append([provider_name, shared_file]) else: if [provider_name, shared_file] not in tmp: tmp.append([provider_name, shared_file]) specific_file_list = tmp non_dropin_tests.sort() non_dropin_tests = set(non_dropin_tests) dropin_tests = all_test_list - non_dropin_tests dropin_file_list = [] tmp_dir = data_dir.get_tmp_dir() if not os.path.isdir(tmp_dir): os.makedirs(tmp_dir) for dropin_test in dropin_tests: provider = dropin_test.split(".")[0] d_type = dropin_test.split(".")[-1] autogen_cfg_path = os.path.join(tmp_dir, '%s.cfg' % dropin_test) autogen_cfg_file = open(autogen_cfg_path, 'w') autogen_cfg_file.write("# Drop-in test - auto generated snippet\n") autogen_cfg_file.write("- %s:\n" % dropin_test) autogen_cfg_file.write(" virt_test_type = %s\n" % t_type) autogen_cfg_file.write(" type = %s\n" % d_type) autogen_cfg_file.close() dropin_file_list.append([provider, autogen_cfg_path]) dropin_file_list_2 = [] dropin_tests = os.listdir(os.path.join(data_dir.get_root_dir(), "dropin")) dropin_cfg_path = os.path.join(tmp_dir, 'dropin.cfg') dropin_cfg_file = open(dropin_cfg_path, 'w') dropin_cfg_file.write("# Auto generated snippet for dropin tests\n") dropin_cfg_file.write("- dropin:\n") dropin_cfg_file.write(" variants:\n") for dropin_test in dropin_tests: if dropin_test == "README": continue dropin_cfg_file.write(" - %s:\n" % dropin_test) dropin_cfg_file.write(" virt_test_type = %s\n" % t_type) dropin_cfg_file.write(" type = dropin\n") dropin_cfg_file.write(" start_vm = no\n") dropin_cfg_file.write(" dropin_path = %s\n" % dropin_test) dropin_cfg_file.close() dropin_file_list_2.append(['io-github-autotest-qemu', dropin_cfg_path]) subtests_cfg = os.path.join(root_dir, 'backends', t_type, 'cfg', 'subtests.cfg') subtests_file = open(subtests_cfg, 'w') subtests_file.write( "# Do not edit, auto generated file from subtests config\n") subtests_file.write("variants subtest:\n") write_subtests_files(first_subtest_file, subtests_file) write_subtests_files(specific_file_list, subtests_file, t_type) write_subtests_files(shared_file_list, subtests_file) write_subtests_files(dropin_file_list, subtests_file) write_subtests_files(dropin_file_list_2, subtests_file) write_subtests_files(last_subtest_file, subtests_file) subtests_file.close()
"DATA DIR: %s" % data_dir.get_backing_data_dir() + bcolors.ENDC) print_header("DEBUG LOG: %s" % debuglog) last_index = -1 logging.info("Starting test job at %s", test_start_time) logging.info("") logging.info(version.get_pretty_version_info()) logging.info("") logging.debug("Cleaning up previous job tmp files") d = parser.get_dicts().next() env_filename = os.path.join(data_dir.get_root_dir(), options.type, d.get("env", "env")) env = utils_env.Env(env_filename, Test.env_version) env.destroy() try: address_pool_files = glob.glob("/tmp/address_pool*") for address_pool_file in address_pool_files: os.remove(address_pool_file) aexpect_tmp = "/tmp/aexpect_spawn/" if os.path.isdir(aexpect_tmp): shutil.rmtree("/tmp/aexpect_spawn/") except (IOError, OSError): pass logging.debug("") if options.restore_image_between_tests:
def create_subtests_cfg(t_type): root_dir = data_dir.get_root_dir() specific_test = os.path.join(root_dir, t_type, 'tests') specific_test_list = data_dir.SubdirGlobList(specific_test, '*.py', test_filter) shared_test = os.path.join(root_dir, 'tests') shared_test_list = data_dir.SubdirGlobList(shared_test, '*.py', test_filter) all_specific_test_list = [] for test in specific_test_list: basename = os.path.basename(test) if basename != "__init__.py": all_specific_test_list.append(basename.split(".")[0]) all_shared_test_list = [] for test in shared_test_list: basename = os.path.basename(test) if basename != "__init__.py": all_shared_test_list.append(basename.split(".")[0]) all_specific_test_list.sort() all_shared_test_list.sort() all_test_list = set(all_specific_test_list + all_shared_test_list) specific_test_cfg = os.path.join(root_dir, t_type, 'tests', 'cfg') shared_test_cfg = os.path.join(root_dir, 'tests', 'cfg') shared_file_list = data_dir.SubdirGlobList(shared_test_cfg, "*.cfg", config_filter) first_subtest_file = [] last_subtest_file = [] non_dropin_tests = [] tmp = [] for shared_file in shared_file_list: shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if line.startswith("type"): cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_string(line) td = cartesian_parser.get_dicts().next() values = td['type'].split(" ") for value in values: if t_type not in non_dropin_tests: non_dropin_tests.append(value) shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if shared_file_name not in first_subtest_file: first_subtest_file.append(shared_file) elif shared_file_name in last_subtest[t_type]: if shared_file_name not in last_subtest_file: last_subtest_file.append(shared_file) else: if shared_file_name not in tmp: tmp.append(shared_file) shared_file_list = tmp shared_file_list.sort() specific_file_list = data_dir.SubdirGlobList(specific_test_cfg, "*.cfg", config_filter) tmp = [] for shared_file in specific_file_list: shared_file_obj = open(shared_file, 'r') for line in shared_file_obj.readlines(): line = line.strip() if line.startswith("type"): cartesian_parser = cartesian_config.Parser() cartesian_parser.parse_string(line) td = cartesian_parser.get_dicts().next() values = td['type'].split(" ") for value in values: if value not in non_dropin_tests: non_dropin_tests.append(value) shared_file_name = os.path.basename(shared_file) shared_file_name = shared_file_name.split(".")[0] if shared_file_name in first_subtest[t_type]: if shared_file_name not in first_subtest_file: first_subtest_file.append(shared_file) elif shared_file_name in last_subtest[t_type]: if shared_file_name not in last_subtest_file: last_subtest_file.append(shared_file) else: if shared_file_name not in tmp: tmp.append(shared_file) specific_file_list = tmp specific_file_list.sort() non_dropin_tests.sort() non_dropin_tests = set(non_dropin_tests) dropin_tests = all_test_list - non_dropin_tests dropin_file_list = [] tmp_dir = data_dir.get_tmp_dir() if not os.path.isdir(tmp_dir): os.makedirs(tmp_dir) for dropin_test in dropin_tests: autogen_cfg_path = os.path.join(tmp_dir, '%s.cfg' % dropin_test) autogen_cfg_file = open(autogen_cfg_path, 'w') autogen_cfg_file.write("# Drop-in test - auto generated snippet\n") autogen_cfg_file.write("- %s:\n" % dropin_test) autogen_cfg_file.write(" virt_test_type = %s\n" % t_type) autogen_cfg_file.write(" type = %s\n" % dropin_test) autogen_cfg_file.close() dropin_file_list.append(autogen_cfg_path) subtests_cfg = os.path.join(root_dir, t_type, 'cfg', 'subtests.cfg') subtests_file = open(subtests_cfg, 'w') subtests_file.write( "# Do not edit, auto generated file from subtests config\n") subtests_file.write("variants:\n") write_subtests_files(first_subtest_file, subtests_file) write_subtests_files(specific_file_list, subtests_file, t_type) write_subtests_files(shared_file_list, subtests_file) write_subtests_files(dropin_file_list, subtests_file) write_subtests_files(last_subtest_file, subtests_file) subtests_file.close()