def setUp(self): ''' Build ebizzy Source: http://liquidtelecom.dl.sourceforge.net/project/ebizzy/ebizzy/0.3 /ebizzy-0.3.tar.gz ''' sm = SoftwareManager() if not sm.check_installed("gcc") and not sm.install("gcc"): self.error("Gcc is needed for the test to be run") tarball = self.fetch_asset('http://liquidtelecom.dl.sourceforge.net' '/project/ebizzy/ebizzy/0.3' '/ebizzy-0.3.tar.gz') data_dir = os.path.abspath(self.datadir) archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.srcdir = os.path.join(self.srcdir, version) patch = self.params.get( 'patch', default='Fix-build-issues-with-ebizzy.patch') os.chdir(self.srcdir) p1 = 'patch -p0 < %s/%s' % (data_dir, patch) process.run(p1, shell=True) process.run('[ -x configure ] && ./configure', shell=True) build.make(self.srcdir)
def __init__(self, brname=None): self.brname = brname self.interfaces=[] process.run("modprobe veth", shell=True) if self.brname and self.checkifbridgeexist(): print "adding bridge " + self.brname self.createbridge()
def mk_cgroup_cgcreate(self, pwd=None, cgroup=None): """ Make a cgroup by executing the cgcreate command :params: cgroup: name of the cgroup to be created :return: last cgroup index """ try: parent_cgroup = self.get_cgroup_name(pwd) if cgroup is None: range = "abcdefghijklmnopqrstuvwxyz0123456789" sub_cgroup = "cgroup-" + "".join(random.sample(range + range.upper(), 6)) else: sub_cgroup = cgroup if parent_cgroup is None: cgroup = sub_cgroup else: # Parent cgroup:test. Created cgroup:test1. # Whole cgroup name is "test/test1" cgroup = os.path.join(parent_cgroup, sub_cgroup) if self.__get_cgroup_pwd(cgroup) in self.cgroups: raise exceptions.TestFail("%s exists!" % cgroup) cgcreate_cmd = "cgcreate -g %s:%s" % (self.module, cgroup) process.run(cgcreate_cmd, ignore_status=False) pwd = self.__get_cgroup_pwd(cgroup) self.cgroups.append(pwd) return len(self.cgroups) - 1 except process.CmdError: raise exceptions.TestFail("Make cgroup by cgcreate failed!")
def cleanup(self, force_stop=False): error_context.context("Cleaning up test NFS share", logging.info) process.run("umount -l -f %s" % self.mnt_dir, shell=True) process.run("exportfs -u %s:%s" % (self.nfs_ip, self.nfs_dir), shell=True) if force_stop: self.stop_service()
def cgset_property(self, prop, value, pwd=None, check=True, checkprop=None): """ Sets the property value by cgset command :param prop: property name (file) :param value: desired value :param pwd: cgroup directory :param check: check the value after setup / override checking value :param checkprop: override prop when checking the value """ if pwd is None: pwd = self.root if isinstance(pwd, int): pwd = self.cgroups[pwd] try: cgroup = self.get_cgroup_name(pwd) cgset_cmd = "cgset -r %s='%s' %s" % (prop, value, cgroup) process.run(cgset_cmd, ignore_status=False) except process.CmdError as detail: raise exceptions.TestFail( "Modify %s failed!:\n%s" % (prop, detail)) if check is not False: if check is True: check = value if checkprop is None: checkprop = prop _values = self.get_property(checkprop, self.get_cgroup_index(cgroup)) # Sanitize non printable characters before check check = " ".join(check.split()) if check not in _values: raise exceptions.TestError("cg.set_property(): Setting failed: " "desired = %s, real values = %s" % (repr(check), repr(_values)))
def setUp(self): self.tlbflush_max_entries = self.params.get('entries', default=200) self.tlbflush_iteration = self.params.get('iterations', default=50) self.nr_threads = self.params.get('nr_threads', default=50) # Check for basic utilities smm = SoftwareManager() if not smm.check_installed("gcc") and not smm.install("gcc"): self.error( "Fail to install %s required for this test." % package) data_dir = os.path.abspath(self.datadir) shutil.copyfile(os.path.join(data_dir, 'tlbflush.c'), os.path.join(self.srcdir, 'tlbflush.c')) os.chdir(self.srcdir) os.system('cp tlbflush.c /root/pp/tlbflush.c') tlbflush_patch = 'patch -p1 < %s' % ( os.path.join(data_dir, 'tlbflush.patch')) process.run(tlbflush_patch, shell=True) cmd = 'gcc -DFILE_SIZE=$((128*1048576)) -g -O2 tlbflush.c \ -lpthread -o tlbflush' process.run(cmd, shell=True)
def clone_image(params, vm_name, image_name, root_dir): """ Clone master image to vm specific file. :param params: Dictionary containing the test parameters. :param vm_name: Vm name. :param image_name: Master image name. :param root_dir: Base directory for relative filenames. """ if not params.get("image_name_%s_%s" % (image_name, vm_name)): m_image_name = params.get("image_name", "image") vm_image_name = "%s_%s" % (m_image_name, vm_name) if params.get("clone_master", "yes") == "yes": image_params = params.object_params(image_name) image_params["image_name"] = vm_image_name m_image_fn = get_image_filename(params, root_dir) image_fn = get_image_filename(image_params, root_dir) force_clone = params.get("force_image_clone", "no") if not os.path.exists(image_fn) or force_clone == "yes": logging.info("Clone master image for vms.") process.run(params.get("image_clone_command") % (m_image_fn, image_fn)) params["image_name_%s_%s" % (image_name, vm_name)] = vm_image_name
def test10SosReport(self): self.login() self.wait_id("sidebar") self.wait_id("tools-panel",cond=invisible) self.click(self.wait_link('Tools', cond=clickable)) self.wait_id("tools-panel") self.click(self.wait_link('Diagnostic report', cond=clickable)) self.wait_frame("sosreport") self.wait_text("This tool will collect system configuration and diagnostic") self.click(self.wait_xpath('//button[@data-target="#sos"]', cond=clickable)) self.wait_id("sos") self.wait_text("Generating report") process.run("pgrep sosreport", shell=True) # duration of report generation depends on the target system - as along as sosreport is active, we don't want to timeout # it is also important to call some selenium method there to ensure that connection to HUB will not be lost with Timeout(seconds=240, error_message="Timeout: sosreport did not finish"): while True: try: process.run("pgrep sosreport", shell=True) self.wait_text("Generating report", overridetry=5) except: break time.sleep(1) element = self.wait_id("sos-download") self.wait_xpath('//button[contains(text(), "%s")]' % "Download", cond=clickable, baseelement=element) self.click(self.wait_id("sos-cancel", cond=clickable)) self.wait_text("This tool will collect system configuration and diagnostic") self.mainframe() self.error = False
def test_scpandssh(self): ''' check scp and ssh ''' cmd = "ssh %s@%s \"echo hi\"" % (self.user, self.peer) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("unable to ssh into peer machine") process.run("dd if=/dev/zero of=/tmp/tempfile bs=1024000000 count=1", shell=True) time.sleep(15) md_val1 = hashlib.md5(open('/tmp/tempfile', 'rb').read()).hexdigest() time.sleep(5) cmd = "timeout 600 scp /tmp/tempfile %s@%s:/tmp" %\ (self.user, self.peer) ret = process.system(cmd, shell=True, verbose=True, ignore_status=True) time.sleep(15) if ret != 0: self.fail("unable to copy into peer machine") cmd = "timeout 600 scp %s@%s:/tmp/tempfile /tmp" %\ (self.user, self.peer) ret = process.system(cmd, shell=True, verbose=True, ignore_status=True) time.sleep(15) if ret != 0: self.fail("unable to copy from peer machine") md_val2 = hashlib.md5(open('/tmp/tempfile', 'rb').read()).hexdigest() time.sleep(5) if md_val1 != md_val2: self.fail("Test Failed")
def test_basic_workflow(self): """ Check the basic workflow works using ramdisk """ ramdisk_filename = vg_ramdisk_dir = loop_device = None vg_name = "avocado_testing_vg_e5kj3erv11a" lv_name = "avocado_testing_lv_lk0ff33al5h" ramdisk_basedir = os.path.join(self.tmpdir, "foo", "bar") mount_loc = os.path.join(self.tmpdir, "lv_mount_location") os.mkdir(mount_loc) try: # Create ramdisk vg self.assertFalse(os.path.exists(ramdisk_basedir)) self.assertFalse(lv_utils.vg_check(vg_name)) spec = lv_utils.vg_ramdisk(False, vg_name, 10, ramdisk_basedir, "sparse_file") ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device = spec # Check it was created properly self.assertTrue(ramdisk_filename) self.assertTrue(vg_ramdisk_dir) self.assertTrue(vg_name) self.assertTrue(loop_device) self.assertTrue(os.path.exists(ramdisk_basedir)) self.assertTrue(glob.glob(os.path.join(ramdisk_basedir, "*"))) self.assertTrue(lv_utils.vg_check(vg_name)) vgs = lv_utils.vg_list() self.assertIn(vg_name, vgs) # Can't create existing vg self.assertRaises(lv_utils.LVException, lv_utils.vg_create, vg_name, loop_device) # Create and check LV lv_utils.lv_create(vg_name, lv_name, 1) lv_utils.lv_check(vg_name, lv_name) self.assertIn(vg_name, process.run("lvs --all", sudo=True).stdout_text) self.assertIn(lv_name, lv_utils.lv_list()) lv_utils.lv_mount(vg_name, lv_name, mount_loc, "ext2") lv_utils.lv_umount(vg_name, lv_name) lv_utils.lv_remove(vg_name, lv_name) self.assertNotIn(lv_name, lv_utils.lv_list()) # Cleanup ramdisk vgs lv_utils.vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device) self.assertTrue(os.path.exists(ramdisk_basedir)) self.assertFalse(glob.glob(os.path.join(ramdisk_basedir, "*"))) except BaseException as details: try: process.run("mountpoint %s && umount %s" % (mount_loc, mount_loc), shell=True, sudo=True) except BaseException as details: print("Fail to unmount LV: %s" % details) try: lv_utils.lv_remove(vg_name, lv_name) except BaseException as details: print("Fail to cleanup LV: %s" % details) try: lv_utils.vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device) except BaseException as details: print("Fail to cleanup vg_ramdisk: %s" % details)
def test_run_replay_remotefail(self): """ Runs a replay job using remote plugin (not supported). """ cmd_line = ('avocado run passtest.py ' '-m examples/tests/sleeptest.py.data/sleeptest.yaml ' '--job-results-dir %s --sysinfo=off --json -' % self.tmpdir) result = process.run(cmd_line, ignore_status=True) jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*'))) idfile = ''.join(os.path.join(jobdir, 'id')) with open(idfile, 'r') as f: jobid = f.read().strip('\n') cmd_line = ('avocado run --replay %s --remote-hostname ' 'localhost --job-results-dir %s --sysinfo=off' % (jobid, self.tmpdir)) expected_rc = exit_codes.AVOCADO_FAIL result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, expected_rc, "Command %s did not return rc " "%d:\n%s" % (cmd_line, expected_rc, result)) msg = b"Currently we don't replay jobs in remote hosts." self.assertIn(msg, result.stderr)
def setUp(self): ''' Build interbench Source: http://ck.kolivas.org/apps/interbench/interbench-0.31.tar.bz2 ''' sm_manager = SoftwareManager() for pkg in ['gcc', 'patch']: if (not sm_manager.check_installed(pkg) and not sm_manager.install(pkg)): self.cancel("%s is needed for the test to be run" % pkg) disk_free_mb = (disk.freespace(self.teststmpdir) / 1024) / 1024 if memory.memtotal()/1024 > disk_free_mb: self.cancel('Disk space is less than total memory. Skipping test') tarball = self.fetch_asset('http://slackware.cs.utah.edu/pub/kernel' '.org/pub/linux/kernel/people/ck/apps/' 'interbench/interbench-0.31.tar.gz') data_dir = os.path.abspath(self.datadir) archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.srcdir, version) # Patch for make file os.chdir(self.sourcedir) makefile_patch = 'patch -p1 < %s ' % ( os.path.join(data_dir, 'makefile_fix.patch')) process.run(makefile_patch, shell=True) build.make(self.sourcedir)
def test(self): failures = False os.chdir(self.teststmpdir) if not self.test_list: self.log.info('Running all tests') args = '' if self.exclude or self.gen_exclude: args = ' -E %s' % self.exclude_file cmd = './check %s -g auto' % args result = process.run(cmd, ignore_status=True, verbose=True) if result.exit_status == 0: self.log.info('OK: All Tests passed.') else: msg = self._parse_error_message(result.stdout) self.log.info('ERR: Test(s) failed. Message: %s', msg) failures = True else: self.log.info('Running only specified tests') for test in self.test_list: test = '%s/%s' % (self.fs_to_test, test) cmd = './check %s' % test result = process.run(cmd, ignore_status=True, verbose=True) if result.exit_status == 0: self.log.info('OK: Test %s passed.', test) else: msg = self._parse_error_message(result.stdout) self.log.info('ERR: %s failed. Message: %s', test, msg) failures = True if failures: self.fail('One or more tests failed. Please check the logs.')
def install_packages(self): ''' Install necessary packages ''' smm = SoftwareManager() detected_distro = distro.detect() self.log.info("Test is running on %s", detected_distro.name) if not smm.check_installed("ksh") and not smm.install("ksh"): self.cancel('ksh is needed for the test to be run') if detected_distro.name == "Ubuntu": if not smm.check_installed("python-paramiko") and not \ smm.install("python-paramiko"): self.cancel('python-paramiko is needed for the test to be run') ubuntu_url = self.params.get('ubuntu_url', default=None) debs = self.params.get('debs', default=None) if not ubuntu_url or not debs: self.cancel("No url specified") for deb in debs: deb_url = os.path.join(ubuntu_url, deb) deb_install = self.fetch_asset(deb_url, expire='7d') shutil.copy(deb_install, self.workdir) process.system("dpkg -i %s/%s" % (self.workdir, deb), ignore_status=True, sudo=True) else: url = self.params.get('url', default=None) if not url: self.cancel("No url specified") rpm_install = self.fetch_asset(url, expire='7d') shutil.copy(rpm_install, self.workdir) os.chdir(self.workdir) process.run('chmod +x ibmtools') process.run('./ibmtools --install --managed')
def test_mount(self): mount_point = self.params.get("mountpoint", default=self.DEFAULT_MOUNT_POINT) if mount_point in open("/proc/mounts").read(): process.run("umount %s" % mount_point) process.run("mount %s" % mount_point) self.assertIn(mount_point, open("/proc/mounts").read())
def setUp(self): self.test_file = self.params.get('tmp_file', default='/tmp/dummy') self.duration = self.params.get('duration', default='30') self.threads = self.params.get( 'threads', default=cpu.online_cpus_count()) self.size = self.params.get( 'memory_to_test', default=int(0.9 * memory.meminfo.MemFree.m)) smm = SoftwareManager() for package in ['gcc', 'libtool', 'autoconf', 'automake', 'make']: if not smm.check_installed(package) and not smm.install(package): self.cancel("Failed to install %s, which is needed for" "the test to be run" % package) if not os.path.exists(self.test_file): try: os.mknod(self.test_file) except OSError: self.cancel("Skipping test since test file creation failed") loc = ["https://github.com/stressapptest/" "stressapptest/archive/master.zip"] tarball = self.fetch_asset("stressapp.zip", locations=loc, expire='7d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, 'stressapptest-master') os.chdir(self.sourcedir) process.run('./configure', shell=True) build.make(self.sourcedir)
def setUp(self): ''' Build FileBench Source: https://github.com/filebench/filebench/releases/download/1.5-alpha3/filebench-1.5-alpha3.tar.gz ''' # Check for basic utilities smm = SoftwareManager() deps = ['libtool', 'automake', 'autoconf', 'bison', 'gcc', 'flex'] for package in deps: if not smm.check_installed(package) and not smm.install(package): self.error(package + ' is needed for the test to be run') self._testfile = self.params.get('testfile', default='fileserver.f') tarball = self.fetch_asset('https://github.com/filebench/' 'filebench/releases/ownload/1.5-alpha3/' 'filebench-1.5-alpha3.tar.gz', expire='7d') archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.srcdir = os.path.join(self.srcdir, version) os.chdir(self.srcdir) process.run('./configure', shell=True, sudo=True) build.make(self.srcdir) build.make(self.srcdir, extra_args='install') # Setup test file t_dir = '/usr/local/share/filebench/workloads/' shutil.copyfile(os.path.join(t_dir, self._testfile), os.path.join(self.srcdir, self._testfile))
def __init__(self, disk): try: import guestfs except ImportError: install_cmd = "yum -y install python-libguestfs" try: process.run(install_cmd) import guestfs except Exception: raise exceptions.TestSkipError('We need python-libguestfs (or ' 'the equivalent for your ' 'distro) for this particular ' 'feature (modifying guest ' 'files with libguestfs)') self.g = guestfs.GuestFS() self.disk = disk self.g.add_drive(disk) libvirtd = SpecificServiceManager("libvirtd") libvirtd_status = libvirtd.status() if libvirtd_status is None: raise exceptions.TestError('libvirtd: service not found') if (not libvirtd_status) and (not libvirtd.start()): raise exceptions.TestError('libvirtd: failed to start') logging.debug("Launch the disk %s, wait..." % self.disk) self.g.launch()
def setUp(self): ''' Build Stutter Test Source: https://github.com/gaowanlong/stutter/archive/master.zip ''' # Check for basic utilities smm = SoftwareManager() if not smm.check_installed("gcc") and not smm.install("gcc"): self.error('Gcc is needed for the test to be run') locations = ["https://github.com/gaowanlong/stutter/archive/" "master.zip"] tarball = self.fetch_asset("stutter.zip", locations=locations, expire='7d') archive.extract(tarball, self.srcdir) self.srcdir = os.path.join(self.srcdir, 'stutter-master') mem_byte = str(memory.memtotal()) print mem_byte self._memory = self.params.get('memory', default=mem_byte) self._iteration = self.params.get('iteration', default='10') self._logdir = self.params.get('logdir', default='/var/tmp/logdir') self._rundir = self.params.get('rundir', default='/tmp') process.run('mkdir -p %s' % self._logdir) # export env variable, used by test script os.environ['MEMTOTAL_BYTES'] = self._memory os.environ['ITERATIONS'] = self._iteration os.environ['LOGDIR_RESULTS'] = self._logdir os.environ['TESTDISK_DIR'] = self._rundir build.make(self.srcdir)
def setUp(self): ''' Build Bcc Test Source: https://github.com/iovisor/bcc ''' # Check for basic utilities detected_distro = distro.detect().name.lower() smm = SoftwareManager() # TODO: Add support for other distributions if not detected_distro == "ubuntu": self.cancel("Upsupported OS %s" % detected_distro) for package in ['bison', 'build-essential', 'cmake', 'flex', 'libedit-dev', 'libllvm3.8', 'llvm-3.8-dev', 'libclang-3.8-dev', 'python', 'zlib1g-dev', 'libelf-dev', 'clang-format-3.8', 'python-netaddr', 'python-pyroute2', 'arping', 'iperf', 'netperf', 'ethtool']: if not smm.check_installed(package) and not smm.install(package): self.cancel("Failed to install %s, which is needed for" "the test to be run" % package) locations = ["https://github.com/iovisor/bcc/archive/master.zip"] tarball = self.fetch_asset("bcc.zip", locations=locations, expire='7d') archive.extract(tarball, self.srcdir) self.sourcedir = os.path.join(self.srcdir, 'bcc-master') os.makedirs('%s/build' % self.sourcedir) self.builddir = '%s/build' % self.sourcedir os.chdir(self.builddir) process.run('cmake .. -DCMAKE_INSTALL_PREFIX=/usr', shell=True) build.make(self.builddir)
def recover(self, params=None): """ Recover test environment """ cpu_enable = True if self.cpu_status else False utils_misc.set_cpu_status(self.cpu_num, cpu_enable) tmp_c_file = params.get("tmp_c_file", "/tmp/test.c") tmp_exe_file = params.get("tmp_exe_file", "/tmp/test") if os.path.exists(tmp_c_file): os.remove(tmp_c_file) if os.path.exists(tmp_exe_file): os.remove(tmp_exe_file) if 'memory_pid' in params: pid = int(params.get('memory_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) process.run("swapon -a", shell=True) if 'cpu_pid' in params: pid = int(params.get('cpu_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) tmp_sh_file = params.get("tmp_sh_file") if os.path.exists(tmp_sh_file): os.remove(tmp_sh_file) virsh.destroy(self.vm_name) if len(self.snp_list) < len(self.current_snp_list): self.diff_snp_list = list(set(self.current_snp_list) - set(self.snp_list)) for item in self.diff_snp_list: virsh.snapshot_delete(self.vm_name, item) remove_machine_cgroup()
def vg_ramdisk(vg_name, ramdisk_vg_size, ramdisk_basedir, ramdisk_sparse_filename): """ Create vg on top of ram memory to speed up lv performance. """ error_context.context("Creating virtual group on top of ram memory", logging.info) vg_size = ramdisk_vg_size vg_ramdisk_dir = os.path.join(ramdisk_basedir, vg_name) ramdisk_filename = os.path.join(vg_ramdisk_dir, ramdisk_sparse_filename) vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, "") result = "" if not os.path.exists(vg_ramdisk_dir): os.mkdir(vg_ramdisk_dir) try: logging.info("Mounting tmpfs") result = process.run("mount -t tmpfs tmpfs " + vg_ramdisk_dir) logging.info("Converting and copying /dev/zero") cmd = ("dd if=/dev/zero of=" + ramdisk_filename + " bs=1M count=1 seek=" + vg_size) result = process.run(cmd, verbose=True) logging.info("Finding free loop device") result = process.run("losetup --find", verbose=True) except process.CmdError, ex: logging.error(ex) vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, "") raise ex
def addtarget(self, name): process.run("targetcli /backstores/fileio/ create file_or_dev=/var/tmp/%s_%s.target size=10G sparse=true name=%s_%s" % (self.domain, name, self.domain, name), shell = True) process.run("targetcli /iscsi/ create %s.%s:%s" % (self.prefix, self.domain, name), shell=True) process.run("targetcli /iscsi/%s.%s:%s/tpg1/acls/ create %s" % (self.prefix, self.domain, name, self.initiatorname), shell = True) process.run("targetcli /iscsi/%s.%s:%s/tpg1/luns/ create /backstores/fileio/%s_%s" % (self.prefix, self.domain, name, self.domain, name), shell = True) process.run("targetcli saveconfig", shell = True) self.targetlist.append(name)
def setUp(self): ''' Build linsched Test Source: https://github.com/thejinxters/linux-scheduler-testing ''' # Check for basic utilities smm = SoftwareManager() deps = ['gcc', 'make', 'patch'] if distro.detect().name == "SuSE": deps.append('git-core') else: deps.append('git') for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel( "Fail to install %s required for this test." % package) self.args = self.params.get('args', default='pi 100') git.get_repo('https://github.com/thejinxters/linux-scheduler-testing', destination_dir=self.srcdir) os.chdir(self.srcdir) fix_patch = 'patch -p1 < %s' % ( os.path.join(self.datadir, 'fix.patch')) process.run(fix_patch, shell=True, ignore_status=True) build.make(self.srcdir)
def test_tap_parser(self): perl_script = script.TemporaryScript("tap_parser.pl", PERL_TAP_PARSER_SNIPPET % self.tmpdir) perl_script.save() os.chdir(basedir) process.run("perl %s" % perl_script)
def test(self): """ Performs ezfio test on the block device'. """ os.chdir(self.ezfio_path) cmd = "./ezfio.py -d %s -o %s -u %s --yes" % (self.device, self.outputdir, self.utilization) process.run(cmd, shell=True)
def setUp(self): """ Build xfstest Source: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git """ sm = SoftwareManager() packages = ['xfslibs-dev', 'uuid-dev', 'libtool-bin', 'e2fsprogs', 'automake', 'gcc', 'libuuid1', 'quota', 'attr', 'libattr1-dev', 'make', 'libacl1-dev', 'xfsprogs', 'libgdbm-dev', 'gawk', 'fio', 'dbench', 'uuid-runtime'] for package in packages: if not sm.check_installed(package) and not sm.install(package): self.error("Fail to install %s required for this test." % package) self.test_range = self.params.get('test_range', default=None) if self.test_range is None: self.fail('Please provide a test_range.') self.skip_dangerous = self.params.get('skip_dangerous', default=True) git.get_repo('git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git', destination_dir=self.srcdir) data_dir = os.path.abspath(self.datadir) shutil.copyfile(os.path.join(data_dir, 'group'), os.path.join(self.srcdir, 'group')) build.make(self.srcdir) self.available_tests = self._get_available_tests() self.test_list = self._create_test_list() self.log.info("Tests available in srcdir: %s", ", ".join(self.available_tests)) process.run('useradd fsgqa', sudo=True) process.run('useradd 123456-fsgqa', sudo=True)
def lv_take_snapshot(vg_name, lv_name, lv_snapshot_name, lv_snapshot_size): """ Take a snapshot of the original logical volume. """ error_context.context("Taking snapshot from original logical volume", logging.info) if not vg_check(vg_name): raise exceptions.TestError("Volume group could not be found") if lv_check(vg_name, lv_snapshot_name): raise exceptions.TestError("Snapshot already exists") if not lv_check(vg_name, lv_name): raise exceptions.TestError("Snapshot's origin could not be found") cmd = ("lvcreate --size " + lv_snapshot_size + " --snapshot " + " --name " + lv_snapshot_name + " /dev/" + vg_name + "/" + lv_name) try: result = process.run(cmd) except process.CmdError, ex: if ('Logical volume "%s" already exists in volume group "%s"' % (lv_snapshot_name, vg_name) in ex.result.stderr and re.search(re.escape(lv_snapshot_name + " [active]"), process.run("lvdisplay").stdout)): # the above conditions detect if merge of snapshot was postponed logging.warning(("Logical volume %s is still active! " + "Attempting to deactivate..."), lv_name) lv_reactivate(vg_name, lv_name) result = process.run(cmd) else: raise ex
def setUp(self): ''' Build IOZone Source: http://www.iozone.org/src/current/iozone3_434.tar ''' self.base_dir = os.path.abspath(self.basedir) smm = SoftwareManager() for package in ['gcc', 'make', 'patch']: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) tarball = self.fetch_asset( 'http://www.iozone.org/src/current/iozone3_434.tar') archive.extract(tarball, self.teststmpdir) version = os.path.basename(tarball.split('.tar')[0]) self.sourcedir = os.path.join(self.teststmpdir, version) make_dir = os.path.join(self.sourcedir, 'src', 'current') os.chdir(make_dir) patch = self.params.get('patch', default='makefile.patch') patch = os.path.join(self.datadir, patch) process.run('patch -p3 < %s' % patch, shell=True) d_distro = distro.detect() arch = d_distro.arch if arch == 'ppc': build.make(make_dir, extra_args='linux-powerpc') elif arch == 'ppc64' or arch == 'ppc64le': build.make(make_dir, extra_args='linux-powerpc64') elif arch == 'x86_64': build.make(make_dir, extra_args='linux-AMD64') else: build.make(make_dir, extra_args='linux')
def test_get_diskspace(self): """ Use scsi_debug device to check disk size """ pre = glob.glob("/dev/sd*") try: process.system("modprobe scsi_debug", sudo=True) disks = set(glob.glob("/dev/sd*")).difference(pre) self.assertEqual(len(disks), 1, "pre: %s\npost: %s" % (disks, glob.glob("/dev/sd*"))) disk = disks.pop() self.assertEqual(lv_utils.get_diskspace(disk), "8388608") except BaseException: for _ in xrange(10): res = process.run("rmmod scsi_debug", ignore_status=True, sudo=True) if not res.exit_status: print("scsi_debug removed") break else: print("Fail to remove scsi_debug: %s" % res) for _ in xrange(10): res = process.run("rmmod scsi_debug", ignore_status=True, sudo=True) if not res.exit_status: break else: self.fail("Fail to remove scsi_debug after testing: %s" % res)
if vm_cpu_list != cpu_list: raise error.TestFail("vm node %s cpu list %s not expected" % (i, vm_cpu_list)) if topology: vm_topo_tuple = ("Socket(s)", "Core(s) per socket", "Thread(s) per core") for i in range(len(topo_tuple)): topo_info = vm_cpu_info[vm_topo_tuple[i]] if topo_info != topology[topo_tuple[i]]: raise error.TestFail("%s in vm topology not expected." % topo_tuple[i]) finally: if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if page_list: for i in backup_list: hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'], i['size']) if deallocate: hp_cl.deallocate = deallocate hp_cl.cleanup() if qemu_conf_restore: qemu_conf.restore() libvirtd.restart() for mt_path in mount_path: try: process.run("umount %s" % mt_path, shell=True) except process.CmdError: logging.warning("umount %s failed" % mt_path)
def event_stat(self, cmd): return process.run('%s%s' % (self.perf_stat, cmd), ignore_status=True)
def test_domain_chip_offset(self): cmd = "perf stat -r 10 -x ' ' perf stat -r 10 -x ' ' \ -e hv_24x7/domain=2,offset=0xe0,core=0/ sleep 1" process.run(cmd)
def run(test, params, env): """ Integration test of backup and backing_chain. Steps: 1. create a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. create some data on vdb 5. start a pull mode incremental backup 6. repeat step 5 to 7 7. before the last round of backup job, do a blockcommit/pull/copy 8. check the full/incremental backup file data """ def run_blk_cmd(): """ Run blockcommit/blockpull/blockcopy command. """ def run_blockpull(): """ Run blockpull command. """ if from_to == "mid_to_top": cmd_option = ("--base {0}[{1}] --wait").format( original_disk_target, middle_layer1_index) elif from_to == "base_to_top": cmd_option = ("--base {0}[{1}] --wait").format( original_disk_target, base_layer_index) virsh.blockpull(vm_name, original_disk_target, cmd_option, debug=True, ignore_status=False) def run_blockcommit(): """ Run blockcommit command. """ if from_to == "top_to_base": # Do blockcommit from top layer to base layer cmd_option = ( "--top {0}[{1}] --base {0}[{2}] --active --pivot " "--wait".format(original_disk_target, top_layer_index, base_layer_index)) elif from_to == "mid_to_mid": # Do blockcommit from middle layer to another middle layer if len(indice) < 4: test.fail( "At lease 4 layers required for the test 'mid_to_mid'") cmd_option = ("--top {0}[{1}] --base {0}[{2}] " "--wait".format(original_disk_target, middle_layer1_index, middle_layer2_index)) elif from_to == "top_to_mid": # Do blockcommit from top layer to middle layer cmd_option = ( "--top {0}[{1}] --base {0}[{2}] --active --pivot " "--wait".format(original_disk_target, top_layer_index, middle_layer1_index)) elif from_to == "mid_to_base": # Do blockcommit from middle layer to base layer cmd_option = ("--top {0}[{1}] --base {0}[{2}] " "--wait".format(original_disk_target, middle_layer1_index, base_layer_index)) virsh.blockcommit(vm_name, original_disk_target, cmd_option, debug=True, ignore_stauts=False) def run_blockcopy(): """ Run blockcopy command. """ copy_dest = os.path.join(tmp_dir, "copy_dest.qcow2") cmd_option = "--wait --verbose --transient-job --pivot" if blockcopy_method == "shallow_copy": cmd_option += " --shallow" if blockcopy_reuse == "reuse_external": cmd_option += " --reuse-external" if blockcopy_method == "shallow_copy": create_img_cmd = "qemu-img create -f qcow2 -F qcow2 -b %s %s" create_img_cmd %= (backend_img, copy_dest) else: create_img_cmd = "qemu-img create -f qcow2 %s %s" create_img_cmd %= (copy_dest, original_disk_size) process.run(create_img_cmd, shell=True, ignore_status=False) virsh.blockcopy(vm_name, original_disk_target, copy_dest, cmd_option, debug=True, ignore_status=False) # Get disk backing store indice info in vm disk xml cur_vm_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) cur_disk_xmls = cur_vm_xml.get_devices(device_type="disk") cur_test_disk_xml = '' for disk_xml in cur_disk_xmls: if disk_xml.target['dev'] == original_disk_target: cur_test_disk_xml = disk_xml logging.debug("Current disk xml for %s is:\n %s", original_disk_target, cur_test_disk_xml) break indice = re.findall(r".*index=['|\"](\d+)['|\"].*", str(cur_test_disk_xml)) logging.debug("backing store indice for %s is: %s", original_disk_target, indice) if len(indice) < 3: test.fail("At least 3 layers required for the test.") top_layer_index = indice[0] middle_layer1_index = indice[1] middle_layer2_index = indice[-2] base_layer_index = indice[-1] logging.debug( "Following backing store will be used: %s", "top:%s; middle_1: %s, middle_2:%s, base: %s" % (top_layer_index, middle_layer1_index, middle_layer2_index, base_layer_index)) # Start the block command if blockcommand == "blockpull": run_blockpull() if blockcommand == "blockcommit": run_blockcommit() if blockcommand == "blockcopy": run_blockcopy() def create_shutoff_snapshot(original_img, snapshot_img): """ Create shutoff snapshot, which means the disk snapshot is not controlled by libvirt, but created directly by qemu command. :param original_img: The image we will take shutoff snapshot for. :param snapshot_img: The newly created shutoff snapshot image. """ cmd = "qemu-img info --output=json -f qcow2 {}".format(original_img) img_info = process.run(cmd, shell=True, ignore_status=False).stdout_text json_data = json.loads(img_info) cmd = "qemu-img create -f qcow2 -F qcow2 -b {0} {1}".format( original_img, snapshot_img) process.run(cmd, shell=True, ignore_status=False) try: bitmaps = json_data['format-specific']['data']['bitmaps'] for bitmap in bitmaps: bitmap_flags = bitmap['flags'] bitmap_name = bitmap['name'] if 'auto' in bitmap_flags and 'in-use' not in bitmap_flags: cmd = "qemu-img bitmap -f qcow2 {0} --add {1}".format( snapshot_img, bitmap_name) process.run(cmd, shell=True, ignore_status=False) except Exception as bitmap_error: logging.debug("Cannot add bitmap to new image, skip it: %s", bitmap_error) # Cancel the test if libvirt version is too low if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") # vm's original disk config original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") # pull mode backup config scratch_type = params.get("scratch_type", "file") nbd_protocol = params.get("nbd_protocol", "tcp") nbd_tcp_port = params.get("nbd_tcp_port", "10809") # test config backup_rounds = int(params.get("backup_rounds", 4)) shutoff_snapshot = "yes" == params.get("shutoff_snapshot") blockcommand = params.get("blockcommand") from_to = params.get("from_to") blockcopy_method = params.get("blockcopy_method") blockcopy_reuse = params.get("blockcopy_reuse") backup_error = "yes" == params.get("backup_error") tmp_dir = data_dir.get_tmp_dir() try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() disks_not_tested = list(vmxml.get_disk_all().keys()) logging.debug("Not tested disks are: %s", disks_not_tested) utils_backup.enable_inc_backup_for_vm(vm) # Destroy vm before test if vm.is_alive(): vm.destroy(gracefully=False) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "%s_image.qcow2" % original_disk_target disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target else: logging.cancel("The disk type '%s' not supported in this script.", original_disk_type) disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_file_list = [] snapshot_list = [] cur_disk_xml = disk_xml cur_disk_path = disk_path cur_disk_params = disk_params backend_img = "" for backup_index in range(backup_rounds): # Do external snapshot if shutoff_snapshot: virsh.detach_disk(vm.name, original_disk_target, extra="--persistent", ignore_status=False, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) shutoff_snapshot_name = "shutoff_snap_%s" % str(backup_index) shutoff_snapshot_path = os.path.join(tmp_dir, shutoff_snapshot_name) create_shutoff_snapshot(cur_disk_path, shutoff_snapshot_path) cur_disk_params["source_file"] = shutoff_snapshot_path cur_disk_xml = libvirt.create_disk_xml(cur_disk_params) virsh.attach_device(vm.name, cur_disk_xml, flagstr="--config", ignore_status=False, debug=True) vm.start() vm.wait_for_login().close() cur_disk_path = shutoff_snapshot_path else: snapshot_name = "snap_%s" % str(backup_index) snapshot_option = "" snapshot_file_name = os.path.join(tmp_dir, snapshot_name) for disk_name in disks_not_tested: snapshot_option += "--diskspec %s,snapshot=no " % disk_name snapshot_option += "--diskspec %s,file=%s" % ( original_disk_target, snapshot_file_name) virsh.snapshot_create_as(vm_name, "%s --disk-only %s" % (snapshot_name, snapshot_option), debug=True) snapshot_list.append(snapshot_name) # Prepare backup xml backup_params = {"backup_mode": "pull"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {"name": "localhost", "port": nbd_tcp_port} backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Prepare nbd scratch file/dev params scratch_params = {"attrs": {}} scratch_file_name = "scratch_file_%s" % backup_index scratch_file_path = os.path.join(tmp_dir, scratch_file_name) scratch_params["attrs"]["file"] = scratch_file_path logging.debug("scratch_params: %s", scratch_params) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() backup_result = virsh.backup_begin(vm_name, backup_options, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) backup_file_path = os.path.join( tmp_dir, "backup_file_%s.qcow2" % str(backup_index)) backup_file_list.append(backup_file_path) nbd_params = { "nbd_protocol": nbd_protocol, "nbd_hostname": "localhost", "nbd_export": nbd_export_name, "nbd_tcp_port": nbd_tcp_port } if not is_incremental: # Do full backup utils_backup.pull_full_backup_to_file(nbd_params, backup_file_path) logging.debug("Full backup to: %s", backup_file_path) else: # Do incremental backup utils_backup.pull_incremental_backup_to_file( nbd_params, backup_file_path, nbd_bitmap_name, original_disk_size) virsh.domjobabort(vm_name, debug=True) # Start to run the blockcommit/blockpull cmd before the last round # of backup job, this is to test if the block command will keep the # dirty bitmap data. if backup_index == backup_rounds - 2: run_blk_cmd() cur_disk_path = vm.get_blk_devices( )[original_disk_target]['source'] if backup_index == backup_rounds - 3: backend_img = vm.get_blk_devices( )[original_disk_target]['source'] # Get current active image for the test disk vm_disks = vm.get_blk_devices() current_active_image = vm_disks[original_disk_target]['source'] logging.debug("The current active image for '%s' is '%s'", original_disk_target, current_active_image) for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True, ignore_status=False) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % ( current_active_image, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_file in backup_file_list: if not utils_backup.cmp_backup_data(original_data_file, backup_file): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (current_active_image, backup_file)) else: logging.debug("'%s' contains correct backup data", backup_file) except utils_backup.BackupBeginError as details: if backup_error: logging.debug("Backup failed as expected.") else: test.fail(details) finally: # Remove checkpoints' metadata again to make sure vm has no checkpoints if "checkpoint_list" in locals(): for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, options="--metadata") # Remove snapshots if "snapshot_list" in locals(): for snapshot_name in snapshot_list: virsh.snapshot_delete(vm_name, "%s --metadata" % snapshot_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() for file_name in os.listdir(tmp_dir): file_path = os.path.join(tmp_dir, file_name) if 'env' not in file_path: if os.path.isfile(file_path): os.remove(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path)
def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) print(dom.name) xmlfile = dom.backup_xml() new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name) dest_path = os.path.join(data_dir.get_data_dir(), "copy") try: for event in events_list: logging.debug("Current event is: %s", event) if event in ['start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash', 'device-removal-failed', 'watchdog', 'io-error', 'domrename']: if dom.is_alive(): dom.destroy() if event in ['create', 'define']: dom.undefine() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "resume": dom.pause() if event == "undefine": virsh.undefine(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Undefined Removed") elif event == "create": virsh.create(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "define": virsh.define(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Defined Added") elif event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "suspend": virsh.suspend(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") if not libvirt_version.version_compare(5, 3, 0): expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") elif event == "resume": virsh.resume(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") expected_events_list.append("'lifecycle' for %s:" " Resumed Snapshot") elif event == "edit": #Check whether 'description' element exists. domxml = virsh.dumpxml(dom.name).stdout.strip() find_desc = parseString(domxml).getElementsByTagName("description") if find_desc == []: #If not exists, add one for it. logging.info("Adding <description> to guest") virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs) #The edit operation is to delete 'description' element. edit_cmd = [r":g/<description.*<\/description>/d"] utlv.exec_virsh_edit(dom.name, edit_cmd) expected_events_list.append("'lifecycle' for %s:" " Defined Updated") elif event == "shutdown": if signal_name is None: virsh.shutdown(dom.name, **virsh_dargs) # Wait a few seconds for shutdown finish time.sleep(3) if utils_misc.compare_qemu_version(2, 9, 0): #Shutdown reason distinguished from qemu_2.9.0-9 expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after guest request") else: os.kill(dom.get_pid(), getattr(signal, signal_name)) if utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished after host request") if not utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished") wait_for_shutoff(dom) expected_events_list.append("'lifecycle' for %s:" " Stopped Shutdown") elif event == "crash": if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() panic_dev.model = panic_model panic_dev.addr_type = addr_type panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() logging.info("Guest xml now is: %s", vmxml) dom.start() session = dom.wait_for_login() # Stop kdump in the guest session.cmd("systemctl stop kdump", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=90) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'lifecycle' for %s:" " Crashed Panicked") expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "device-added-removed": add_disk(dom.name, new_disk, 'vdb', '') expected_events_list.append("'device-added' for %s:" " virtio-disk1") def _check_disk(target): return target not in dom.get_blk_devices() utils_misc.wait_for(lambda: not _check_disk('vdb'), 10, 3) virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") iface_xml_obj = create_iface_xml() iface_xml_obj.xmltreefile.write() virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " net0") time.sleep(2) virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-added' for %s:" " net0") elif event == "block-threshold": add_disk(dom.name, new_disk, 'vdb', '', format=disk_format) logging.debug(process.run('qemu-img info %s -U' % new_disk)) virsh.domblkthreshold(vm_name, 'vdb', '100M') session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync") time.sleep(5) session.close() expected_events_list.append("'block-threshold' for %s:" " dev: vdb(%s) 104857600 29368320") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) elif event == "change-media": target_device = "hdc" device_target_bus = params.get("device_target_bus", "ide") disk_blk = vm_xml.VMXML.get_disk_blk(dom.name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding cdrom to guest") if dom.is_alive(): dom.destroy() add_disk(dom.name, new_disk, target_device, ("--type cdrom --sourcetype file --driver qemu " + "--config --targetbus %s" % device_target_bus)) dom.start() all_options = new_disk + " --insert" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " closed") all_options = new_disk + " --eject" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") elif event == "hwclock": session = dom.wait_for_login() try: session.cmd("hwclock --systohc", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'rtc-change' for %s:") elif event == "metadata_set": metadata_uri = params.get("metadata_uri") metadata_key = params.get("metadata_key") metadata_value = params.get("metadata_value") virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, new_metadata=metadata_value, **virsh_dargs) if not libvirt_version.version_compare(7, 10, 0): expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") else: expected_events_list.append("'metadata-change' for %s: " "type element, uri http://app.org/") elif event == "metadata_edit": metadata_uri = "http://herp.derp/" metadata_key = "herp" metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>" virsh_cmd = r"virsh metadata %s --uri %s --key %s %s" virsh_cmd = virsh_cmd % (dom.name, metadata_uri, metadata_key, "--edit") session = aexpect.ShellSession("sudo -s") logging.info("Running command: %s", virsh_cmd) try: session.sendline(virsh_cmd) session.sendline(r":insert") session.sendline(metadata_value) session.sendline(".") session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True, timeout=60) except Exception as e: test.error("Error occurred: %s" % e) session.close() # Check metadata after edit virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, **virsh_dargs) if not libvirt_version.version_compare(7, 10, 0): expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") else: expected_events_list.append("'metadata-change' for %s: " "type element, uri http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) if not libvirt_version.version_compare(7, 10, 0): expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") else: expected_events_list.append("'metadata-change' for %s: " "type element, uri http://app.org/") elif event == "blockcommit": disk_path = dom.get_blk_devices()['vda']['source'] virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs) snapshot_path = dom.get_blk_devices()['vda']['source'] virsh.blockcommit(dom.name, "vda", "--active --pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % snapshot_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda ready") expected_events_list.append("'block-job' for %s: " "Active Block Commit for " + "%s" % disk_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Active Block Commit for vda completed") os.unlink(snapshot_path) elif event == "blockcopy": disk_path = dom.get_blk_devices()['vda']['source'] dom.undefine() virsh.blockcopy(dom.name, "vda", dest_path, "--pivot", **virsh_dargs) expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % disk_path + " ready") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda ready") expected_events_list.append("'block-job' for %s: " "Block Copy for " + "%s" % dest_path + " completed") expected_events_list.append("'block-job-2' for %s: " "Block Copy for vda completed") elif event == "detach-dimm": prepare_vmxml_mem(vmxml) tg_size = params.get("dimm_size") tg_sizeunit = params.get("dimm_unit") dimm_xml = utils_hotplug.create_mem_xml(tg_size, None, None, tg_sizeunit) virsh.attach_device(dom.name, dimm_xml.xml, flagstr="--config", **virsh_dargs) vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml with plugged dimm dev is %s\n" % vmxml_dimm) virsh.start(dom.name, **virsh_dargs) dom.wait_for_login().close() result = virsh.detach_device(dom.name, dimm_xml.xml, debug=True, ignore_status=True) expected_fails = params.get("expected_fails") utlv.check_result(result, expected_fails) vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name) logging.debug("Current vmxml after hot-unplug dimm is %s\n" % vmxml_live) expected_events_list.append("'device-removal-failed' for %s: dimm0") elif event == "watchdog": vmxml.remove_all_device_by_type('watchdog') watchdog_dev = Watchdog() watchdog_dev.model_type = params.get("watchdog_model") action = params.get("action") watchdog_dev.action = action vmxml.add_device(watchdog_dev) vmxml.sync() logging.debug("Current vmxml with watchdog dev is %s\n" % vmxml) virsh.start(dom.name, **virsh_dargs) session = dom.wait_for_login() watchdog_dev.try_modprobe(session) try: session.cmd("echo 0 > /dev/watchdog") except (ShellTimeoutError, ShellProcessTerminatedError) as details: test.fail("Failed to trigger watchdog: %s" % details) session.close() # watchdog acts slowly, waiting for it. time.sleep(30) expected_events_list.append("'watchdog' for %s: " + "%s" % action) if action == 'pause': expected_events_list.append("'lifecycle' for %s: Suspended Watchdog") virsh.resume(dom.name, **virsh_dargs) else: # action == 'reset' expected_events_list.append("'reboot' for %s") elif event == "io-error": part_size = params.get("part_size") resume_event = params.get("resume_event") suspend_event = params.get("suspend_event") process.run("truncate -s %s %s" % (part_size, small_part), shell=True) utlv.mkfs(small_part, part_format) utils_misc.mount(small_part, mount_point, None) add_disk(dom.name, new_disk, 'vdb', '--subdriver qcow2 --config', 'qcow2') dom.start() session = dom.wait_for_login() session.cmd("mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/zero of=/mnt/test.img bs=1M count=50", ignore_all_errors=True) time.sleep(5) session.close() expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) process.run("df -hT") virsh.resume(dom.name, **virsh_dargs) time.sleep(5) expected_events_list.append(resume_event) expected_events_list.append("'io-error' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause") expected_events_list.append("'io-error-reason' for %s: " + "%s" % new_disk + r" \(virtio-disk1\) pause due to enospc") expected_events_list.append(suspend_event) ret = virsh.domstate(dom.name, "--reason", **virsh_dargs) if ret.stdout.strip() != "paused (I/O error)": test.fail("Domain state should still be paused due to I/O error!") elif event == "domrename": virsh.domrename(dom.name, dom_newname, **virsh_dargs) expected_events_list.append("'lifecycle' for %s: Undefined Renamed") expected_events_list.append("'lifecycle' for %s: Defined Renamed") elif event == "kill_qemu": os.kill(dom.get_pid(), getattr(signal, signal_name)) expected_events_list.append("'lifecycle' for %s:" " Stopped Failed") else: test.error("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) if os.path.exists(dest_path): os.unlink(dest_path) return [(dom.name, event) for event in expected_events_list]
def run(test, params, env): """ convert specific kvm guest to rhev """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: test.error('Missing command: virt-v2v') # Guest name might be changed, we need a new variant to save the original # name vm_name = params['original_vm_name'] = params.get('main_vm', 'EXAMPLE') unprivileged_user = params.get('unprivileged_user') target = params.get('target') input_mode = params.get('input_mode') input_file = params.get('input_file') output_mode = params.get('output_mode') output_format = params.get('output_format') os_pool = output_storage = params.get('output_storage', 'default') bridge = params.get('bridge') network = params.get('network') address_cache = env.get('address_cache') v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') skip_vm_check = params.get('skip_vm_check', 'no') skip_virsh_pre_conn = params.get('skip_virsh_pre_conn', 'no') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) checkpoint = params.get('checkpoint', '') datastore = params.get('datastore') esxi_host = params.get('esx_hostname') esxi_password = params.get('esxi_password') hypervisor = params.get("hypervisor") input_transport = params.get("input_transport") vmx_nfs_src = params.get("vmx_nfs_src") # for construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") vpx_dc = params.get("vpx_dc") vpx_hostname = params.get("vpx_hostname") vpx_password = params.get("vpx_password") src_uri_type = params.get('src_uri_type') v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on' ] else '' v2v_sasl = '' if params.get('v2v_opts'): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") error_list = [] # create different sasl_user name for different job if output_mode == 'rhev': params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def check_BSOD(): """ Check if boot up into BSOD """ bar = 0.999 match_img = params.get('image_to_match') screenshot = '%s/BSOD_screenshot.ppm' % data_dir.get_tmp_dir() if match_img is None: test.error('No BSOD screenshot to match!') cmd_man_page = 'man virt-v2v|grep -i "Boot failure: 0x0000007B"' if process.run(cmd_man_page, shell=True).exit_status != 0: log_fail('Man page not contain boot failure msg') for i in range(100): virsh.screenshot(vm_name, screenshot) similar = ppm_utils.image_histogram_compare(screenshot, match_img) if similar > bar: logging.info('Meet BSOD with similarity %s' % similar) return time.sleep(1) log_fail('No BSOD as expected') def check_result(result, status_error): """ Check virt-v2v command result """ def vm_check(): """ Checking the VM """ if output_mode == 'json' and not check_json_output(params): test.fail('check json output failed') if output_mode == 'local' and not check_local_output(params): test.fail('check local output failed') if output_mode in ['null', 'json', 'local']: return # Create vmchecker before virsh.start so that the vm can be undefined # if started failed. vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after convertion if params.get('skip_vm_check') != 'yes': if checkpoint != 'win2008r2_ostk': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'win2008r2_ostk': check_BSOD() # Merge 2 error lists error_list.extend(vmchecker.errors) libvirt.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text if not status_error: vm_check() log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: if checkpoint == 'regular_user_sudo': regular_sudo_config = '/etc/sudoers.d/v2v_test' with open(regular_sudo_config, 'w') as fd: fd.write('%s ALL=(ALL) NOPASSWD: ALL' % unprivileged_user) # create user try: pwd.getpwnam(unprivileged_user) except KeyError: process.system("useradd %s" % unprivileged_user) # generate ssh-key rsa_private_key_path = '/home/%s/.ssh/id_rsa' % unprivileged_user rsa_public_key_path = '/home/%s/.ssh/id_rsa.pub' % unprivileged_user process.system('su - %s -c \'ssh-keygen -t rsa -q -N "" -f %s\'' % (unprivileged_user, rsa_private_key_path)) with open(rsa_public_key_path) as fd: pub_key = fd.read() v2v_params = { 'main_vm': vm_name, 'target': target, 'v2v_opts': v2v_opts, 'os_storage': output_storage, 'network': network, 'bridge': bridge, 'input_mode': input_mode, 'input_file': input_file, 'new_name': 'ova_vm_' + utils_misc.generate_random_string(3), 'datastore': datastore, 'esxi_host': esxi_host, 'esxi_password': esxi_password, 'input_transport': input_transport, 'vmx_nfs_src': vmx_nfs_src, 'output_method': output_method, 'os_storage_name': storage_name, 'os_pool': os_pool, 'rhv_upload_opts': rhv_upload_opts, 'params': params } if input_mode == 'vmx': v2v_params.update({ 'new_name': vm_name + utils_misc.generate_random_string(3), 'hypervisor': hypervisor, 'vpx_dc': vpx_dc, 'password': vpx_password if src_uri_type != 'esx' else esxi_password, 'hostname': vpx_hostname, 'skip_virsh_pre_conn': skip_virsh_pre_conn }) if checkpoint == 'regular_user_sudo': v2v_params.update({'pub_key': pub_key}) # copy ova from nfs storage before v2v conversion if input_mode == 'ova': src_dir = params.get('ova_dir') dest_dir = params.get('ova_copy_dir') if os.path.isfile(src_dir) and not os.path.exists(dest_dir): os.makedirs(dest_dir, exist_ok=True) if os.path.isdir(src_dir) and os.path.exists(dest_dir): shutil.rmtree(dest_dir) if os.path.isdir(src_dir): shutil.copytree(src_dir, dest_dir) else: shutil.copy(src_dir, dest_dir) logging.info('Copy ova from %s to %s', src_dir, dest_dir) if output_format: v2v_params.update({'of_format': output_format}) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') # Build rhev related options if output_mode == 'rhev': # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_mode == 'local': v2v_params['os_directory'] = data_dir.get_tmp_dir() if checkpoint == 'ova_relative_path': logging.debug('Current dir: %s', os.getcwd()) ova_dir = params.get('ova_dir') logging.info('Change to dir: %s', ova_dir) os.chdir(ova_dir) # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint == 'permission': os.environ['LIBGUESTFS_BACKEND'] = '' process.run('echo $LIBGUESTFS_BACKEND', shell=True) v2v_result = utils_v2v.v2v_cmd(v2v_params) if 'new_name' in v2v_params: vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: # Cleanup constant files utils_v2v.cleanup_constant_files(params) if input_mode == 'ova' and os.path.exists(dest_dir): shutil.rmtree(dest_dir) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'regular_user_sudo' and os.path.exists( regular_sudo_config): os.remove(regular_sudo_config) if unprivileged_user: process.system("userdel -fr %s" % unprivileged_user) if input_mode == 'vmx' and input_transport == 'ssh': process.run("killall ssh-agent")
def test_download_image_fail(self): cmd_line = "%s --config %s vmimage get --distro=SHOULD_NEVER_EXIST " \ "999 --arch zzz_64" % (AVOCADO, self.config_file.name) result = process.run(cmd_line, ignore_status=True) self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)
def run_sig_segv(params, libvirtd, vm): """ Kill libvirtd with signal SEGV. """ process.run('pkill libvirtd --signal 11')
def test_tap_parser(self): perl_script = script.TemporaryScript("tap_parser.pl", PERL_TAP_PARSER_SNIPPET % self.tmpdir) perl_script.save() process.run("perl %s" % perl_script)
def run(test, params, env): """ Test nbd disk option. 1.Prepare backend storage 2.Use nbd to export the backend storage with or without TLS 3.Prepare a disk xml indicating to the backend storage 4.Start VM with disk hotplug/coldplug 5.Start snapshot or save/restore operations on ndb disk 6.Check some behaviours on VM 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} def check_disk_save_restore(save_file): """ Check domain save and restore operation. :param save_file: the path to saved file """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) def check_snapshot(): """ Check domain snapshot operations. """ # Cleaup dirty data if exists if os.path.exists(snapshot_name1_file): os.remove(snapshot_name1_file) if os.path.exists(snapshot_name2_mem_file): os.remove(snapshot_name2_mem_file) if os.path.exists(snapshot_name2_disk_file): os.remove(snapshot_name2_disk_file) device_target = 'vda' snapshot_name1_option = "--diskspec %s,file=%s,snapshot=external --disk-only --atomic" % ( device_target, snapshot_name1_file) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name1, snapshot_name1_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name1 not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name1) # Check file can be created after snapshot def _check_file_create(filename): """ Check whether file with specified filename exists or not. :param filename: finename """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) cmd = ("echo" " teststring > /tmp/{0}".format(filename)) status, output = session.cmd_status_output(cmd) if status != 0: test.fail("Failed to touch one file on VM internal") except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise finally: if session: session.close() _check_file_create("disk.txt") # Create memory snapshot. snapshot_name2_mem_option = "--memspec file=%s,snapshot=external" % ( snapshot_name2_mem_file) snapshot_name2_disk_option = "--diskspec %s,file=%s,snapshot=external --atomic" % ( device_target, snapshot_name2_disk_file) snapshot_name2_option = "%s %s" % (snapshot_name2_mem_option, snapshot_name2_disk_option) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name2, snapshot_name2_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name2 not in snap_lists: test.fail("Snapshot: %s doesn't exist" % snapshot_name2) _check_file_create("mem.txt") def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdb") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") image_path = params.get("emulated_image") # Get config parameters status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") tls_enabled = "yes" == params.get("enable_tls", "no") enable_private_key_encryption = "yes" == params.get( "enable_private_key_encryption", "no") private_key_encrypt_passphrase = params.get("private_key_password") domain_operation = params.get("domain_operation") secret_uuid = None # Get snapshot attributes. snapshot_name1 = params.get("snapshot_name1") snapshot_name1_file = params.get("snapshot_name1_file") snapshot_name2 = params.get("snapshot_name2") snapshot_name2_mem_file = params.get("snapshot_name2_mem_file") snapshot_name2_disk_file = params.get("snapshot_name2_disk_file") # Initialize one NbdExport object nbd = None # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Get server hostname. hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") export_name = params.get("export_name", None) deleteExisted = "yes" == params.get("deleteExisted", "yes") tls_bit = "no" if tls_enabled: tls_bit = "yes" # Create secret if enable_private_key_encryption: # this feature is enabled after libvirt 6.6.0 if not libvirt_version.version_compare(6, 6, 0): test.cancel( "current libvirt version doesn't support client private key encryption" ) utils_secret.clean_up_secrets() private_key_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", private_key_sec_uuid) private_key_sec_passwd = params.get("private_key_password", "redhat") ret = virsh.secret_set_value(private_key_sec_uuid, private_key_sec_passwd, encode=True, use_file=True, debug=True) libvirt.check_exit_status(ret) secret_uuid = private_key_sec_uuid # Initialize special test environment config for snapshot operations. if domain_operation == "snap_shot": first_disk = vm.get_first_disk_devices() image_path = first_disk['source'] device_target = 'vda' # Remove previous xml disks = vmxml.get_devices(device_type="disk") for disk_ in disks: if disk_.target['dev'] == device_target: vmxml.del_device(disk_) break # Create NbdExport object nbd = NbdExport( image_path, image_format=device_format, port=nbd_server_port, export_name=export_name, tls=tls_enabled, deleteExisted=deleteExisted, private_key_encrypt_passphrase=private_key_encrypt_passphrase, secret_uuid=secret_uuid) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd", "tls": "%s" % tls_bit} if export_name: source_attrs_dict.update({"name": "%s" % export_name}) disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update( {"hosts": [{ "name": nbd_server_host, "port": nbd_server_port }]}) # Add disk xml. disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": 'raw'} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) # Hotplug disk. if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) # Check save and restore operation and its result if domain_operation == 'save_restore': save_file = "/tmp/%s.save" % vm_name check_disk_save_restore(save_file) # Check attached nbd disk if check_partitions and not status_error: logging.debug("wait seconds for starting in checking vm part") time.sleep(2) if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") # Check snapshot operation and its result if domain_operation == 'snap_shot': check_snapshot() # Unplug disk. if hotplug_disk: result = virsh.detach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(result, status_error) finally: if enable_private_key_encryption: utils_secret.clean_up_secrets() # Clean up backend storage and TLS try: if nbd: nbd.cleanup() # Clean up snapshots if exist if domain_operation == 'snap_shot': snap_lists = virsh.snapshot_list(vm_name, debug=True) for snap_name in snap_lists: virsh.snapshot_delete(vm_name, snap_name, "--metadata", debug=True, ignore_status=True) # Cleaup dirty data if exists if os.path.exists(snapshot_name1_file): os.remove(snapshot_name1_file) if os.path.exists(snapshot_name2_mem_file): os.remove(snapshot_name2_mem_file) if os.path.exists(snapshot_name2_disk_file): os.remove(snapshot_name2_disk_file) except Exception as ndbEx: logging.info("Clean Up nbd failed: %s" % str(ndbEx)) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test of virt-edit. 1) Get and init parameters for test. 2) Prepare environment. 3) Run virt-edit command and get result. 5) Recover environment. 6) Check result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) remote_host = params.get("virt_edit_remote_host", "HOST.EXAMPLE") remote_user = params.get("virt_edit_remote_user", "root") remote_passwd = params.get("virt_edit_remote_passwd", "PASSWD.EXAMPLE") connect_uri = params.get("virt_edit_connect_uri") if connect_uri is not None: uri = "qemu+ssh://%s@%s/system" % (remote_user, remote_host) if uri.count("EXAMPLE"): test.cancel("Please config host and passwd first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) else: uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) start_vm = params.get("start_vm", "no") vm_ref = params.get("virt_edit_vm_ref", vm_name) file_ref = params.get("virt_edit_file_ref", "/etc/hosts") created_img = params.get("virt_edit_created_img", "/tmp/foo.img") foo_line = params.get("foo_line", "") options = params.get("virt_edit_options") options_suffix = params.get("virt_edit_options_suffix") status_error = params.get("status_error", "no") backup_extension = params.get("virt_edit_backup_extension") test_format = params.get("virt_edit_format") # virt-edit should not be used when vm is running. # (for normal test) if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=True) dom_disk_dict = vm.get_disk_devices() # TODO dom_uuid = vm.get_uuid() # Disk format: raw or qcow2 disk_format = None # If object is a disk file path is_disk = False if vm_ref == "domdisk": if len(dom_disk_dict) != 1: test.error("Only one disk device should exist on " "%s:\n%s." % (vm_name, dom_disk_dict)) disk_detail = list(dom_disk_dict.values())[0] vm_ref = disk_detail['source'] logging.info("disk to be edit:%s", vm_ref) if test_format: # Get format:raw or qcow2 info = process.run("qemu-img info %s" % vm_ref, shell=True).stdout_text for line in info.splitlines(): comps = line.split(':') if comps[0].count("format"): disk_format = comps[-1].strip() break if disk_format is None: test.error("Cannot get disk format:%s" % info) is_disk = True elif vm_ref == "domname": vm_ref = vm_name elif vm_ref == "domuuid": vm_ref = dom_uuid elif vm_ref == "createdimg": vm_ref = created_img process.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img, shell=True) is_disk = True # Decide whether pass a exprt for virt-edit command. if foo_line != "": expr = "s/$/%s/" % foo_line else: expr = "" if backup_extension is not None: if options is None: options = "" options += " -b %s" % backup_extension # Stop libvirtd if test need. libvirtd = params.get("libvirtd", "on") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test result = lgf.virt_edit_cmd(vm_ref, file_ref, is_disk=is_disk, disk_format=disk_format, options=options, extra=options_suffix, expr=expr, connect_uri=uri, debug=True) status = result.exit_status # Recover libvirtd. if libvirtd == "off": utils_libvirtd.libvirtd_start() process.run("rm -f %s" % created_img, shell=True) # Remove backup file in vm if it exists if backup_extension is not None: backup_file = file_ref + backup_extension cleanup_file_in_vm(test, vm, backup_file) status_error = (status_error == "yes") if status != 0: if not status_error: test.fail("Command executed failed.") else: if (expr != "" and (not login_to_check_foo_line(test, vm, file_ref, foo_line))): test.fail("Virt-edit to add %s in %s failed." "Test failed." % (foo_line, file_ref))
def run(test, params, env): """ Run various regression tests and check whether libvirt daemon crashes. """ func_name = 'run_' + params.get("func_name", "default") post_func_name = 'post_' + params.get("func_name", "default") repeat = int(params.get("repeat", "1")) vm_name = params.get("main_vm", "virt-tests-vm1") bug_url = params.get("bug_url", None) vm = env.get_vm(vm_name) # Run virtlogd foreground try: path.find_command('virtlogd') process.run("systemctl stop virtlogd", ignore_status=True) process.run("virtlogd -d") except path.CmdNotFoundError: pass libvirtd = LibvirtdSession(gdb=True) process.run("rm -rf /var/run/libvirt/libvirt-*", shell=True, ignore_status=True) try: libvirtd.start() run_func = globals()[func_name] for i in xrange(repeat): run_func(params, libvirtd, vm) stopped = libvirtd.wait_for_stop(timeout=5) if stopped: logging.debug('Backtrace:') for line in libvirtd.back_trace(): logging.debug(line) if bug_url: logging.error( "You might met a regression bug. Please reference %s" % bug_url) test.fail("Libvirtd stops with %s" % libvirtd.bundle['stop-info']) if post_func_name in globals(): post_func = globals()[post_func_name] post_func(params, libvirtd, vm) finally: try: path.find_command('virtlogd') process.run('pkill virtlogd', ignore_status=True) process.run('systemctl restart virtlogd.socket', ignore_status=True) process.run('systemctl restart libvirtd.socket', ignore_status=True) except path.CmdNotFoundError: pass libvirtd.exit()
def run(test, params, env): """ Test virsh interface related commands. (1) If using given exist interface for testing(eg. lo or ethX): 1.1 Dumpxml for the interface(with --inactive option) 1.2 Destroy the interface 1.3 Undefine the interface (2) Define an interface from XML file (3) List interfaces with '--inactive' optioin (4) Start the interface (5) List interfaces with no option (6) Dumpxml for the interface (7) Get interface MAC address by interface name (8) Get interface name by interface MAC address (9) Delete interface if not use the exist interface for testing 9.1 Destroy the interface 9.2 Undefine the interface Caveat, this test may affect the host network, so using the loopback(lo) device by default. You can specify the interface which you want, but be careful. """ iface_name = params.get("iface_name", "ENTER.BRIDGE.NAME") iface_xml = params.get("iface_xml") iface_type = params.get("iface_type", "ethernet") iface_pro = params.get("iface_pro", "") iface_eth = params.get("iface_eth", "") iface_tag = params.get("iface_tag", "0") if iface_type == "vlan": iface_name = iface_eth + "." + iface_tag iface_eth_using = "yes" == params.get("iface_eth_using", "no") ping_ip = params.get("ping_ip", "localhost") use_exist_iface = "yes" == params.get("use_exist_iface", "no") status_error = "yes" == params.get("status_error", "no") net_restart = "yes" == params.get("iface_net_restart", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") if ping_ip.count("ENTER"): test.cancel("Please input a valid ip address") if iface_name.count("ENTER"): test.cancel("Please input a existing bridge/ethernet name") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user', "EXAMPLE") if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_dargs = {'debug': True} list_dumpxml_dargs = {'debug': True} if params.get('setup_libvirt_polkit') == 'yes': if not list_dumpxml_acl: virsh_dargs['uri'] = uri virsh_dargs['unprivileged_user'] = unprivileged_user else: list_dumpxml_dargs['uri'] = uri list_dumpxml_dargs['unprivileged_user'] = unprivileged_user list_dumpxml_dargs['ignore_status'] = False # acl api negative testing params write_save_status_error = "yes" == params.get("write_save_status_error", "no") start_status_error = "yes" == params.get("start_status_error", "no") stop_status_error = "yes" == params.get("stop_status_error", "no") delete_status_error = "yes" == params.get("delete_status_error", "no") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if vm: xml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_script = NETWORK_SCRIPT + iface_name iface_script_bk = os.path.join(data_dir.get_tmp_dir(), "iface-%s.bk" % iface_name) net_bridge = utils_net.Bridge() if use_exist_iface: if iface_type == "bridge": if iface_name not in net_bridge.list_br(): test.error("Bridge '%s' not exists" % iface_name) ifaces = net_bridge.get_structure()[iface_name] if len(ifaces) < 1: # In this situation, dhcp maybe cannot get ip address # Unless you use static, we'd better skip such case test.cancel("Bridge '%s' has no interface" " bridged, perhaps cannot get" " ipaddress" % iface_name) net_iface = utils_net.Interface(name=iface_name) iface_is_up = True list_option = "--all" if use_exist_iface: if not libvirt.check_iface(iface_name, "exists", "--all"): test.error("Interface '%s' not exists" % iface_name) iface_xml = os.path.join(data_dir.get_tmp_dir(), "iface.xml.tmp") iface_is_up = net_iface.is_up() else: # Note, if not use the interface which already exists, iface_name must # be equal to the value specified in XML file if libvirt.check_iface(iface_name, "exists", "--all"): test.error("Interface '%s' already exists" % iface_name) if not iface_xml: test.error("XML file is needed.") iface_xml = os.path.join(data_dir.get_tmp_dir(), iface_xml) create_xml_file(iface_xml, params) # Stop NetworkManager as which may conflict with virsh iface commands try: NM = utils_path.find_command("NetworkManager") except utils_path.CmdNotFoundError: logging.debug("No NetworkManager service.") NM = None NM_is_running = False if NM is not None: NM_service = service.Factory.create_service("NetworkManager") NM_is_running = NM_service.status() if NM_is_running: NM_service.stop() # run test cases try: if use_exist_iface: # back up the interface script process.run("cp %s %s" % (iface_script, iface_script_bk), shell=True) # step 1.1 # dumpxml for interface if list_dumpxml_acl: virsh.iface_list(**list_dumpxml_dargs) xml = virsh.iface_dumpxml(iface_name, "--inactive", to_file=iface_xml, **list_dumpxml_dargs) # Step 1.2 # Destroy interface if iface_is_up: result = virsh.iface_destroy(iface_name, **virsh_dargs) if (params.get('setup_libvirt_polkit') == 'yes' and stop_status_error): # acl_test negative test libvirt.check_exit_status(result, stop_status_error) virsh.iface_destroy(iface_name, debug=True) else: libvirt.check_exit_status(result, status_error) # Step 1.3 # Undefine interface result = virsh.iface_undefine(iface_name, **virsh_dargs) if (params.get('setup_libvirt_polkit') == 'yes' and delete_status_error): # acl_test negative test libvirt.check_exit_status(result, delete_status_error) virsh.iface_undefine(iface_name, debug=True) else: libvirt.check_exit_status(result, status_error) if not status_error: if libvirt.check_iface(iface_name, "exists", list_option): test.fail("%s is still present." % iface_name) # Step 2 # Define interface result = virsh.iface_define(iface_xml, **virsh_dargs) if (params.get('setup_libvirt_polkit') == 'yes' and write_save_status_error): # acl_test negative test libvirt.check_exit_status(result, write_save_status_error) virsh.iface_define(iface_xml, debug=True) elif iface_type == "bond" and not ping_ip: libvirt.check_exit_status(result, True) return else: libvirt.check_exit_status(result, status_error) if net_restart: network = service.Factory.create_service("network") network.restart() # After network restart, (ethernet)interface will be started if (not net_restart and iface_type in ("bridge", "ethernet")) or\ (not use_exist_iface and iface_type in ("vlan", "bond")): # Step 3 # List inactive interfaces list_option = "--inactive" if not status_error: if not libvirt.check_iface(iface_name, "exists", list_option): test.fail("Fail to find %s." % iface_name) # Step 4 # Start interface result = virsh.iface_start(iface_name, **virsh_dargs) if (params.get('setup_libvirt_polkit') == 'yes' and start_status_error): # acl_test negative test libvirt.check_exit_status(result, start_status_error) virsh.iface_start(iface_name, debug=True) elif (not net_restart and not use_exist_iface and (iface_type == "ethernet" and iface_pro in ["", "dhcp"] or iface_type == "bridge" and iface_pro == "dhcp")): libvirt.check_exit_status(result, True) else: libvirt.check_exit_status(result, status_error) if not status_error: iface_ip = net_iface.get_ip() ping_ip = ping_ip if not iface_ip else iface_ip if ping_ip: if not libvirt.check_iface(iface_name, "ping", ping_ip): test.fail("Ping %s fail." % ping_ip) # Step 5 # List active interfaces if use_exist_iface or\ (iface_pro != "dhcp" and iface_type == "bridge") or\ (iface_eth_using and iface_type == "vlan"): list_option = "" if not status_error: if not libvirt.check_iface(iface_name, "exists", list_option): test.fail("Fail to find %s in active " "interface list" % iface_name) if vm: if vm.is_alive(): vm.destroy() iface_index = 0 iface_mac_list = vm_xml.VMXML.get_iface_dev(vm_name) # Before test, detach all interfaces in guest for mac in iface_mac_list: iface_info = vm_xml.VMXML.get_iface_by_mac(vm_name, mac) type = iface_info.get('type') virsh.detach_interface( vm_name, "--type %s --mac %s" " --config" % (type, mac)) # After detach interface, vm.virtnet also need update, the # easy way is free these mac addresses before start VM vm.free_mac_address(iface_index) iface_index += 1 virsh.attach_interface( vm_name, "--type %s --source %s" " --config" % (iface_type, iface_name)) vm.start() try: # Test if guest can be login vm.wait_for_login() except remote.LoginError: test.fail("Cannot login guest with %s" % iface_name) # Step 6 # Dumpxml for interface if list_dumpxml_acl: virsh.iface_list(**list_dumpxml_dargs) xml = virsh.iface_dumpxml(iface_name, "", to_file="", **list_dumpxml_dargs) logging.debug("Interface '%s' XML:\n%s", iface_name, xml) # Step 7 # Get interface MAC address by name result = virsh.iface_mac(iface_name, debug=True) libvirt.check_exit_status(result, status_error) if not status_error and result.stdout.strip(): if not libvirt.check_iface(iface_name, "mac", result.stdout.strip()): test.fail("Mac address check fail") # Step 8 # Get interface name by MAC address # Bridge's Mac equal to bridged interface's mac if iface_type not in ("bridge", "vlan") and result.stdout.strip(): iface_mac = net_iface.get_mac() result = virsh.iface_name(iface_mac, debug=True) libvirt.check_exit_status(result, status_error) # Step 9 if not use_exist_iface: # Step 9.0 # check if interface's state is active before destroy if libvirt.check_iface(iface_name, "state", "--all"): # Step 9.1 # Destroy interface result = virsh.iface_destroy(iface_name, **virsh_dargs) if (params.get('setup_libvirt_polkit') == 'yes' and stop_status_error): # acl_test negative test libvirt.check_exit_status(result, stop_status_error) virsh.iface_destroy(iface_name, debug=True) elif (not net_restart and iface_type == "ethernet" and iface_pro in ["", "dhcp"] or iface_type == "bridge" and iface_pro == "dhcp"): libvirt.check_exit_status(result, True) else: libvirt.check_exit_status(result, status_error) # Step 9.2 # Undefine interface result = virsh.iface_undefine(iface_name, **virsh_dargs) if (params.get('setup_libvirt_polkit') == 'yes' and delete_status_error): # acl_test negative test libvirt.check_exit_status(result, delete_status_error) virsh.iface_undefine(iface_name, debug=True) else: libvirt.check_exit_status(result, status_error) list_option = "--all" if not status_error: if libvirt.check_iface(iface_name, "exists", list_option): test.fail("%s is still present." % iface_name) finally: if os.path.exists(iface_xml): os.remove(iface_xml) if os.path.exists(iface_script): os.remove(iface_script) if use_exist_iface: if not os.path.exists(iface_script): process.run("mv %s %s" % (iface_script_bk, iface_script), shell=True) if iface_is_up and\ not libvirt.check_iface(iface_name, "exists", ""): # Need reload script process.run("ifup %s" % iface_name, shell=True) elif not iface_is_up and libvirt.check_iface( iface_name, "exists", ""): net_iface.down() if vm: xml_bak.sync() else: if libvirt.check_iface(iface_name, "exists", "--all"): # Remove the interface try: utils_net.bring_down_ifname(iface_name) except utils_net.TAPBringDownError: pass if iface_type == "bridge": if iface_name in net_bridge.list_br(): try: net_bridge.del_bridge(iface_name) except IOError: pass if NM_is_running: NM_service.start()
def run(test, params, env): """ Convert specific esx guest """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') vpx_hostname = params.get('vpx_hostname') esx_ip = params.get('esx_hostname') vpx_dc = params.get('vpx_dc') vm_name = params.get('main_vm') output_mode = params.get('output_mode') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '') error_list = [] remote_host = vpx_hostname def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def check_device_exist(check, virsh_session_id): """ Check if device exist after convertion """ xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout if check == 'cdrom': if "device='cdrom'" not in xml: log_fail('CDROM no longer exists') def check_vmtools(vmcheck, check): """ Check whether vmware tools packages have been removed, or vmware-tools service has stopped :param vmcheck: VMCheck object for vm checking :param check: Checkpoint of different cases :return: None """ if check == 'vmtools': logging.info('Check if packages been removed') pkgs = vmcheck.session.cmd('rpm -qa').strip() removed_pkgs = params.get('removed_pkgs').strip().split(',') if not removed_pkgs: test.error('Missing param "removed_pkgs"') for pkg in removed_pkgs: if pkg in pkgs: log_fail('Package "%s" not removed' % pkg) elif check == 'vmtools_service': logging.info('Check if service stopped') vmtools_service = params.get('service_name') status = utils_misc.get_guest_service_status( vmcheck.session, vmtools_service) logging.info('Service %s status: %s', vmtools_service, status) if status != 'inactive': log_fail('Service "%s" is not stopped' % vmtools_service) def check_modprobe(vmcheck): """ Check whether content of /etc/modprobe.conf meets expectation """ content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip() logging.debug(content) cfg_content = params.get('cfg_content') if not cfg_content: test.error('Missing content for search') logging.info('Search "%s" in /etc/modprobe.conf', cfg_content) pattern = '\s+'.join(cfg_content.split()) if not re.search(pattern, content): log_fail('Not found "%s"' % cfg_content) def check_device_map(vmcheck): """ Check if the content of device.map meets expectation. """ logging.info(vmcheck.session.cmd('fdisk -l').strip()) device_map = params.get('device_map_path') content = vmcheck.session.cmd('cat %s' % device_map) logging.debug('Content of device.map:\n%s', content) logging.info('Found device: %d', content.count('/dev/')) logging.info('Found virtio device: %d', content.count('/dev/vd')) if content.count('/dev/') != content.count('/dev/vd'): log_fail('Content of device.map not correct') else: logging.info('device.map has been remaped to "/dev/vd*"') def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if checkpoint == 'empty_cdrom': if status_error: log_fail('Virsh dumpxml failed for empty cdrom image') elif not status_error: if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': virsh.start(vm_name, debug=True) # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if checkpoint not in ['GPO_AV', 'ovmf']: ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") # Check specific checkpoints if checkpoint == 'cdrom': virsh_session = utils_sasl.VirshSessionSASL(params) virsh_session_id = virsh_session.get_id() check_device_exist('cdrom', virsh_session_id) if checkpoint.startswith('vmtools'): check_vmtools(vmchecker.checker, checkpoint) if checkpoint == 'modprobe': check_modprobe(vmchecker.checker) if checkpoint == 'device_map': check_device_map(vmchecker.checker) # Merge 2 error lists error_list.extend(vmchecker.errors) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'hostname': remote_host, 'hypervisor': 'esx', 'main_vm': vm_name, 'vpx_dc': vpx_dc, 'esx_ip': esx_ip, 'new_name': vm_name + utils_misc.generate_random_string(4), 'v2v_opts': '-v -x', 'input_mode': 'libvirt', 'storage': params.get('output_storage', 'default'), 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target') } os.environ['LIBGUESTFS_BACKEND'] = 'direct' v2v_uri = utils_v2v.Uri('esx') remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip) # Create password file for access to ESX hypervisor vpx_passwd = params.get("vpx_password") logging.debug("vpx password is %s" % vpx_passwd) vpx_passwd_file = params.get("vpx_passwd_file") with open(vpx_passwd_file, 'w') as pwd_f: pwd_f.write(vpx_passwd) v2v_params['v2v_opts'] += " --password-file %s" % vpx_passwd_file if params.get('output_format'): v2v_params.update({'output_format': params['output_format']}) # Rename guest with special name while converting to rhev if '#' in vm_name and output_mode == 'rhev': v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_') # Create SASL user on the ovirt host if output_mode == 'rhev': user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'ovmf': utils_package.package_install('OVMF') if checkpoint == 'root_ask': v2v_params['v2v_opts'] += ' --root ask' v2v_params['custom_inputs'] = params.get('choice', '1') if checkpoint.startswith('root_') and checkpoint != 'root_ask': root_option = params.get('root_option') v2v_params['v2v_opts'] += ' --root %s' % root_option if checkpoint == 'copy_to_local': esx_password = params.get('esx_password') esx_passwd_file = os.path.join(data_dir.get_tmp_dir(), "esx_passwd") logging.info('Prepare esx password file') with open(esx_passwd_file, 'w') as pwd_f: pwd_f.write(esx_password) esx_uri = 'esx://root@%s/?no_verify=1' % esx_ip copy_cmd = 'virt-v2v-copy-to-local -ic %s %s --password-file %s' %\ (esx_uri, vm_name, esx_passwd_file) process.run(copy_cmd) v2v_params['input_mode'] = 'libvirtxml' v2v_params['input_file'] = '%s.xml' % vm_name if checkpoint == 'with_proxy': http_proxy = params.get('esx_http_proxy') https_proxy = params.get('esx_https_proxy') logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy, https_proxy) os.environ['http_proxy'] = http_proxy os.environ['https_proxy'] = https_proxy if checkpoint == 'empty_cdrom': virsh_dargs = { 'uri': remote_uri, 'remote_ip': remote_host, 'remote_user': '******', 'remote_pwd': vpx_passwd, 'debug': True } remote_virsh = virsh.VirshPersistent(**virsh_dargs) v2v_result = remote_virsh.dumpxml(vm_name) else: v2v_result = utils_v2v.v2v_cmd(v2v_params) if 'new_name' in v2v_params: vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'with_proxy': logging.info('Unset http_proxy&https_proxy') os.environ.pop('http_proxy') os.environ.pop('https_proxy') # Cleanup constant files utils_v2v.cleanup_constant_files(params)
def restart(self, service_name): process.run("systemctl restart %s.service" % (service_name))
"utils", ("core", "plugins"), ("avocado.rst", "modules.rst"), ), "Internal (Core) APIs": ("core", "Internal APIs that may be of interest to " "Avocado hackers.", "core", ("utils", "plugins"), ("avocado.rst", "modules.rst")), "Extension (plugin) APIs": ("plugins", "Extension APIs that may be of interest to " "plugin writers.", "plugins", ("core", "utils"), ("avocado.rst", "modules.rst")) } # clean up all previous rst files. RTD is known to keep them from previous runs process.run("find %s -name '*.rst' -delete" % base_api_output_dir) for (section, params) in API_SECTIONS.iteritems(): output_dir = os.path.join(base_api_output_dir, params[2]) exclude_dirs = [os.path.join(api_source_dir, d) for d in params[3]] exclude_dirs = " ".join(exclude_dirs) files_to_remove = [ os.path.join(base_api_output_dir, output_dir, d) for d in params[4] ] # generate all rst files if apidoc: cmd = apidoc_template % locals() process.run(cmd) # remove unnecessary ones for f in files_to_remove: os.unlink(f)
def restart(self, service_name): process.run("/etc/init.d/%s restart" % (service_name))
def status(self, service_name): process.run("systemctl show %s.service" % (service_name))
def run(test, params, env): """ Test svirt in adding disk to VM. (1).Init variables for test. (2).Config qemu conf if need (3).Label the VM and disk with proper label. (4).Start VM and check the context. (5).Destroy VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic") sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux") sec_label = params.get("svirt_start_destroy_vm_sec_label", None) sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None) security_driver = params.get("security_driver", None) security_default_confined = params.get("security_default_confined", None) security_require_confined = params.get("security_require_confined", None) no_sec_model = 'yes' == params.get("no_sec_model", 'no') xattr_check = 'yes' == params.get("xattr_check", 'no') sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'relabel': sec_relabel} sec_dict_list = [] def _set_sec_model(model): """ Set sec_dict_list base on given sec model type """ sec_dict_copy = sec_dict.copy() sec_dict_copy['model'] = model if sec_type != "none": if sec_type == "dynamic" and sec_baselabel: sec_dict_copy['baselabel'] = sec_baselabel else: sec_dict_copy['label'] = sec_label sec_dict_list.append(sec_dict_copy) if not no_sec_model: if "," in sec_model: sec_models = sec_model.split(",") for model in sec_models: _set_sec_model(model) else: _set_sec_model(sec_model) else: sec_dict_list.append(sec_dict) logging.debug("sec_dict_list is: %s" % sec_dict_list) poweroff_with_destroy = ("destroy" == params.get( "svirt_start_destroy_vm_poweroff", "destroy")) # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get variables about image. img_label = params.get('svirt_start_destroy_disk_label') # Backup disk Labels. disks = vm.get_disk_devices() backup_labels_of_disks = {} backup_ownership_of_disks = {} for disk in list(disks.values()): disk_path = disk['source'] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file( filename=disk_path) stat_re = os.stat(disk_path) backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) # Backup selinux of host. backup_sestatus = utils_selinux.get_status() qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() def _resolve_label(label_string): labels = label_string.split(":") label_type = labels[2] if len(labels) == 4: label_range = labels[3] elif len(labels) > 4: label_range = "%s:%s" % (labels[3], labels[4]) else: label_range = None return (label_type, label_range) def _check_label_equal(label1, label2): label1s = label1.split(":") label2s = label2.split(":") for i in range(len(label1s)): if label1s[i] != label2s[i]: return False return True try: # Set disk label (img_label_type, img_label_range) = _resolve_label(img_label) for disk in list(disks.values()): disk_path = disk['source'] dir_path = "%s(/.*)?" % os.path.dirname(disk_path) try: utils_selinux.del_defcon(img_label_type, pathregex=dir_path) except Exception as err: logging.debug("Delete label failed: %s", err) # Using semanage set context persistently utils_selinux.set_defcon(context_type=img_label_type, pathregex=dir_path, context_range=img_label_range) utils_selinux.verify_defcon(pathname=disk_path, readonly=False, forcedesc=True) if sec_relabel == "no" and sec_type == 'none': os.chown(disk_path, 107, 107) # Set selinux of host. utils_selinux.set_status(host_sestatus) # Set qemu conf if security_driver: qemu_conf.set_string('security_driver', security_driver) if security_default_confined: qemu_conf.security_default_confined = security_default_confined if security_require_confined: qemu_conf.security_require_confined = security_require_confined if (security_driver or security_default_confined or security_require_confined): logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Set the context of the VM. vmxml.set_seclabel(sec_dict_list) vmxml.sync() logging.debug("the domain xml is: %s" % vmxml.xmltreefile) # restart libvirtd libvirtd.restart() # Start VM to check the VM is able to access the image or not. try: # Need another guest to test the xattr added by libvirt if xattr_check: blklist = virsh.domblklist(vm_name, debug=True) target_disk = re.findall(r"[v,s]d[a-z]", blklist.stdout.strip())[0] guest_name = "%s_%s" % (vm_name, '1') cmd = "virt-clone --original %s --name %s " % (vm_name, guest_name) cmd += "--auto-clone --skip-copy=%s" % target_disk process.run(cmd, shell=True, verbose=True) vm.start() # Start VM successfully. # VM with seclabel can access the image with the context. if status_error: test.fail("Test succeeded in negative case.") # Start another vm with the same disk image. # The xattr will not be changed. if xattr_check: virsh.start(guest_name, ignore_status=True, debug=True) # Check the label of VM and image when VM is running. vm_context = utils_selinux.get_context_of_process(vm.get_pid()) if (sec_type == "static") and (not vm_context == sec_label): test.fail("Label of VM is not expected after " "starting.\n" "Detail: vm_context=%s, sec_label=%s" % (vm_context, sec_label)) disk_context = utils_selinux.get_context_of_file( filename=list(disks.values())[0]['source']) if (sec_relabel == "no") and (not disk_context == img_label): test.fail("Label of disk is not expected after VM " "starting.\n" "Detail: disk_context=%s, img_label=%s." % (disk_context, img_label)) if sec_relabel == "yes" and not no_sec_model: vmxml = VMXML.new_from_dumpxml(vm_name) imagelabel = vmxml.get_seclabel()[0]['imagelabel'] # the disk context is 'system_u:object_r:svirt_image_t:s0', # when VM started, the MLS/MCS Range will be added automatically. # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx' # but we shouldn't check the MCS range. if not _check_label_equal(disk_context, imagelabel): test.fail("Label of disk is not relabeled by " "VM\nDetal: disk_context=" "%s, imagelabel=%s" % (disk_context, imagelabel)) expected_results = "trusted.libvirt.security.ref_dac=\"1\"\n" expected_results += "trusted.libvirt.security.ref_selinux=\"1\"" cmd = "getfattr -m trusted.libvirt.security -d %s " % list( disks.values())[0]['source'] utils_test.libvirt.check_cmd_output(cmd, content=expected_results) # Check the label of disk after VM being destroyed. if poweroff_with_destroy: vm.destroy(gracefully=False) else: vm.wait_for_login() vm.shutdown() filename = list(disks.values())[0]['source'] img_label_after = utils_selinux.get_context_of_file(filename) stat_re = os.stat(filename) ownership_of_disk = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) logging.debug("The ownership of disk after guest starting is:\n") logging.debug(ownership_of_disk) logging.debug("The ownership of disk before guest starting is:\n") logging.debug(backup_ownership_of_disks[filename]) if not (sec_relabel == "no" and sec_type == 'none'): if not libvirt_version.version_compare(5, 6, 0): if img_label_after != img_label: # Bug 547546 - RFE: the security drivers must remember original # permissions/labels and restore them after # https://bugzilla.redhat.com/show_bug.cgi?id=547546 err_msg = "Label of disk is not restored in VM shutting down.\n" err_msg += "Detail: img_label_after=%s, " % img_label_after err_msg += "img_label_before=%s.\n" % img_label err_msg += "More info in https://bugzilla.redhat.com/show_bug" err_msg += ".cgi?id=547546" test.fail(err_msg) elif (img_label_after != img_label or ownership_of_disk != backup_ownership_of_disks[filename]): err_msg = "Label of disk is not restored in VM shutting down.\n" err_msg += "Detail: img_label_after=%s, %s " % ( img_label_after, ownership_of_disk) err_msg += "img_label_before=%s, %s\n" % ( img_label, backup_ownership_of_disks[filename]) test.fail(err_msg) # The xattr should be cleaned after guest shutoff. cmd = "getfattr -m trusted.libvirt.security -d %s " % list( disks.values())[0]['source'] utils_test.libvirt.check_cmd_output(cmd, content="") except virt_vm.VMStartError as e: # Starting VM failed. # VM with seclabel can not access the image with the context. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) finally: # clean up for path, label in list(backup_labels_of_disks.items()): # Using semanage set context persistently dir_path = "%s(/.*)?" % os.path.dirname(path) (img_label_type, img_label_range) = _resolve_label(label) try: utils_selinux.del_defcon(img_label_type, pathregex=dir_path) except Exception as err: logging.debug("Delete label failed: %s", err) utils_selinux.set_defcon(context_type=img_label_type, pathregex=dir_path, context_range=img_label_range) utils_selinux.verify_defcon(pathname=path, readonly=False, forcedesc=True) for path, label in list(backup_ownership_of_disks.items()): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) backup_xml.sync() if xattr_check: virsh.undefine(guest_name, ignore_status=True) utils_selinux.set_status(backup_sestatus) if (security_driver or security_default_confined or security_require_confined): qemu_conf.restore() libvirtd.restart()
def stop(self, service_name): process.run("systemctl stop %s.service" % (service_name))
def test_gdb_multiple_prerun_commands(self): os.chdir(basedir) cmd_line = ( './scripts/avocado run --job-results-dir %s --sysinfo=off --gdb-prerun-commands=/dev/null ' '--gdb-prerun-commands=foo:/dev/null passtest' % self.tmpdir) process.run(cmd_line)
def stop(self, service_name): process.run("/etc/init.d/%s stop" % (service_name))
def run_subtest(self): """Run daos_test with a subtest argument.""" subtest = self.get_test_param("daos_test") num_clients = self.get_test_param("num_clients") if num_clients is None: num_clients = self.params.get("num_clients", '/run/daos_tests/*') scm_size = self.params.get("scm_size", '/run/pool/*') nvme_size = self.params.get("nvme_size", '/run/pool/*') args = self.get_test_param("args", "") stopped_ranks = self.get_test_param("stopped_ranks", []) pools_created = self.get_test_param("pools_created", 1) self.increment_timeout(POOL_TIMEOUT_INCREMENT * pools_created) dmg = self.get_dmg_command() dmg_config_file = dmg.yaml.filename if self.hostlist_clients: dmg.copy_certificates( get_log_file("daosCA/certs"), self.hostlist_clients) dmg.copy_configuration(self.hostlist_clients) cmd = " ".join( [ "-x", "=".join(["D_LOG_FILE", get_log_file(self.client_log)]), "--map-by node", "-x", "D_LOG_MASK=DEBUG", "-x", "DD_MASK=mgmt,io,md,epc,rebuild", "-x", "COVFILE=/tmp/test.cov", self.daos_test, "-n", dmg_config_file, "".join(["-", subtest]), str(args) ] ) job_cmd = ExecutableCommand(namespace=None, command=cmd) job = get_job_manager(self, "Orterun", job_cmd, mpi_type="openmpi") # Assign the test to run job.hostfile.update(self.hostfile_clients) job.processes.update(num_clients) job_str = str(job) env = {} env['CMOCKA_XML_FILE'] = os.path.join(self.outputdir, "%g_cmocka_results.xml") env['CMOCKA_MESSAGE_OUTPUT'] = "xml" env['POOL_SCM_SIZE'] = "{}".format(scm_size) if not nvme_size: nvme_size = 0 env['POOL_NVME_SIZE'] = "{}".format(nvme_size) # Update the expected status for each ranks that will be stopped by this # test to avoid a false failure during tearDown(). if "random" in stopped_ranks: # Set each expected rank state to be either stopped or running for manager in self.server_managers: manager.update_expected_states( None, ["Joined", "Stopped", "Excluded"]) else: # Set the specific expected rank state to stopped for rank in stopped_ranks: for manager in self.server_managers: manager.update_expected_states( rank, ["Stopped", "Excluded"]) try: process.run(job_str, env=env) except process.CmdError as result: if result.result.exit_status != 0: # fake a JUnit failure output self.create_results_xml(self.subtest_name, result, "Failed to run {}.".format( self.daos_test)) self.fail( "{0} failed with return code={1}.\n".format( job_str, result.result.exit_status))
def run(test, params, env): """ convert specific kvm guest to rhev """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') hypervisor = params.get("hypervisor") vm_name = params.get('main_vm', 'EXAMPLE') target = params.get('target') remote_host = params.get('remote_host', 'EXAMPLE') input_mode = params.get("input_mode") output_mode = params.get('output_mode') output_format = params.get('output_format') source_user = params.get("username", "root") storage = params.get('output_storage') storage_name = params.get('storage_name') bridge = params.get('bridge') network = params.get('network') ntp_server = params.get('ntp_server') vpx_dc = params.get("vpx_dc") esx_ip = params.get("esx_hostname") address_cache = env.get('address_cache') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = utlv.PoolVolumeTest(test, params) v2v_opts = params.get('v2v_opts', '-v -x') v2v_timeout = int(params.get('v2v_timeout', 3600)) skip_check = 'yes' == params.get('skip_check', 'no') status_error = 'yes' == params.get('status_error', 'no') checkpoint = params.get('checkpoint', '') debug_kernel = 'debug_kernel' == checkpoint backup_list = ['floppy', 'floppy_devmap', 'fstab_cdrom', 'sata_disk', 'network_rtl8139', 'network_e1000', 'spice', 'spice_encrypt', 'spice_qxl', 'spice_cirrus', 'vnc_qxl', 'vnc_cirrus', 'blank_2nd_disk', 'listen_none', 'listen_socket', 'only_net', 'only_br'] error_list = [] # Prepare step for different hypervisor if hypervisor == "esx": source_ip = params.get("vpx_hostname") source_pwd = params.get("vpx_password") vpx_passwd_file = params.get("vpx_passwd_file") # Create password file to access ESX hypervisor with open(vpx_passwd_file, 'w') as f: f.write(source_pwd) elif hypervisor == "xen": source_ip = params.get("xen_hostname") source_pwd = params.get("xen_host_passwd") # Set up ssh access using ssh-agent and authorized_keys ssh_key.setup_ssh_key(source_ip, source_user, source_pwd) try: utils_misc.add_identities_into_ssh_agent() except Exception as e: process.run("ssh-agent -k") test.error("Fail to setup ssh-agent \n %s" % str(e)) elif hypervisor == "kvm": source_ip = None source_pwd = None else: test.cancel("Unspported hypervisor: %s" % hypervisor) # Create libvirt URI v2v_uri = utils_v2v.Uri(hypervisor) remote_uri = v2v_uri.get_uri(source_ip, vpx_dc, esx_ip) logging.debug("libvirt URI for converting: %s", remote_uri) # Make sure the VM exist before convert v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = {'uri': remote_uri, 'remote_ip': source_ip, 'remote_user': source_user, 'remote_pwd': source_pwd, 'debug': True} v2v_virsh = virsh.VirshPersistent(**virsh_dargs) close_virsh = True if not v2v_virsh.domain_exists(vm_name): test.error("VM '%s' not exist" % vm_name) def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def vm_shell(func): """ Decorator of shell session to vm """ def wrapper(*args, **kwargs): vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_dead(): logging.info('VM is down. Starting it now.') vm.start() session = vm.wait_for_login() kwargs['session'] = session kwargs['vm'] = vm func(*args, **kwargs) if session: session.close() vm.shutdown() return wrapper def check_disks(vmcheck): """ Check disk counts inside the VM """ # Initialize windows boot up os_type = params.get("os_type", "linux") expected_disks = int(params.get("ori_disks", "1")) logging.debug("Expect %s disks im VM after convert", expected_disks) # Get disk counts if os_type == "linux": cmd = "lsblk |grep disk |wc -l" disks = int(vmcheck.session.cmd(cmd).strip()) else: cmd = r"echo list disk > C:\list_disk.txt" vmcheck.session.cmd(cmd) cmd = r"diskpart /s C:\list_disk.txt" output = vmcheck.session.cmd(cmd).strip() logging.debug("Disks in VM: %s", output) disks = len(re.findall('Disk\s\d', output)) logging.debug("Find %s disks in VM after convert", disks) if disks == expected_disks: logging.info("Disk counts is expected") else: log_fail("Disk counts is wrong") def check_vmlinuz_initramfs(v2v_output): """ Check if vmlinuz matches initramfs on multi-kernel case """ logging.debug('Checking if vmlinuz matches initramfs') kernel_strs = re.findall('(\* kernel.*?\/boot\/config){1,}', v2v_output, re.DOTALL) if len(kernel_strs) == 0: test.error("Not find kernel information") # Remove duplicate items by set logging.debug('Boots and kernel info: %s' % set(kernel_strs)) for str_i in set(kernel_strs): # Fine all versions kernel_vers = re.findall('((?:\d+\.){1,}\d+-(?:\d+\.){1,}\w+)', str_i) logging.debug('kernel related versions: %s' % kernel_vers) # kernel_vers = [kernel, vmlinuz, initramfs] and they should be same if len(kernel_vers) < 3 or len(set(kernel_vers)) != 1: log_fail("kernel versions does not match: %s" % kernel_vers) def check_boot_kernel(vmcheck): """ Check if converted vm use the latest kernel """ _, current_kernel = vmcheck.run_cmd('uname -r') if 'debug' in current_kernel: log_fail('Current kernel is a debug kernel: %s' % current_kernel) # 'sort -V' can satisfy our testing, even though it's not strictly perfect. # The last one is always the latest kernel version kernel_normal_list = vmcheck.run_cmd('rpm -q kernel | sort -V')[1].strip().splitlines() status, kernel_debug = vmcheck.run_cmd('rpm -q kernel-debug') if status != 0: test.error('Not found kernel-debug package') all_kernel_list = kernel_normal_list + kernel_debug.strip().splitlines() logging.debug('All kernels: %s' % all_kernel_list) if len(all_kernel_list) < 3: test.error('Needs at least 2 normal kernels and 1 debug kernel in VM') # The latest non-debug kernel must be kernel_normal_list[-1] if current_kernel.strip() != kernel_normal_list[-1].lstrip('kernel-'): log_fail('Check boot kernel failed') def check_floppy_exist(vmcheck): """ Check if floppy exists after convertion """ blk = vmcheck.session.cmd('lsblk') logging.info(blk) if not re.search('fd0', blk): log_fail('Floppy not found') def attach_removable_media(type, source, dev): bus = {'cdrom': 'ide', 'floppy': 'fdc', 'disk': 'virtio'} args = {'driver': 'qemu', 'subdriver': 'raw', 'sourcetype': 'file', 'type': type, 'targetbus': bus[type]} if type == 'cdrom': args.update({'mode': 'readonly'}) config = '' # Join all options together to get command line for key in list(args.keys()): config += ' --%s %s' % (key, args[key]) config += ' --current' virsh.attach_disk(vm_name, source, dev, extra=config) def change_disk_bus(dest): """ Change all disks' bus type to $dest """ bus_list = ['ide', 'sata', 'virtio'] if dest not in bus_list: test.error('Bus type not support') dev_prefix = ['h', 's', 'v'] dev_table = dict(list(zip(bus_list, dev_prefix))) logging.info('Change disk bus to %s' % dest) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all() index = 0 for disk in list(disks.values()): if disk.get('device') != 'disk': continue target = disk.find('target') target.set('bus', dest) target.set('dev', dev_table[dest] + 'd' + string.ascii_lowercase[index]) disk.remove(disk.find('address')) index += 1 vmxml.sync() def change_network_model(model): """ Change network model to $model """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) network_list = vmxml.get_iface_all() for node in list(network_list.values()): if node.get('type') == 'network': node.find('model').set('type', model) vmxml.sync() def attach_network_card(model): """ Attach network card based on model """ if model not in ('e1000', 'virtio', 'rtl8139'): test.error('Network model not support') options = {'type': 'network', 'source': 'default', 'model': model} line = '' for key in options: line += ' --' + key + ' ' + options[key] line += ' --current' logging.debug(virsh.attach_interface(vm_name, option=line)) def check_multi_netcards(mac_list, virsh_instance): """ Check if number and type of network cards meet expectation """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) iflist = vmxml.get_iface_all() logging.debug('MAC list before v2v: %s' % mac_list) logging.debug('MAC list after v2v: %s' % list(iflist.keys())) if set(mac_list).difference(list(iflist.keys())): log_fail('Missing network interface') for mac in iflist: if iflist[mac].find('model').get('type') != 'virtio': log_fail('Network not convert to virtio') @vm_shell def insert_floppy_devicemap(**kwargs): """ Add an entry of floppy to device.map """ session = kwargs['session'] line = '(fd0) /dev/fd0' devmap = '/boot/grub/device.map' if session.cmd_status('ls %s' % devmap): devmap = '/boot/grub2/device.map' cmd_exist = 'grep \'(fd0)\' %s' % devmap cmd_set = 'sed -i \'2i%s\' %s' % (line, devmap) if session.cmd_status(cmd_exist): session.cmd(cmd_set) def make_label(session): """ Label a volume, swap or root volume """ # swaplabel for rhel7 with xfs, e2label for rhel6 or ext* cmd_map = {'root': 'e2label %s ROOT', 'swap': 'swaplabel -L SWAPPER %s'} if not session.cmd_status('swaplabel --help'): blk = 'swap' elif not session.cmd_status('which e2label'): blk = 'root' else: test.error('No tool to make label') entry = session.cmd('blkid|grep %s' % blk).strip() path = entry.split()[0].strip(':') cmd_label = cmd_map[blk] % path if 'LABEL' not in entry: session.cmd(cmd_label) return blk @vm_shell def specify_fstab_entry(type, **kwargs): """ Specify entry in fstab file """ type_list = ['cdrom', 'uuid', 'label', 'sr0', 'invalid'] if type not in type_list: test.error('Not support %s in fstab' % type) session = kwargs['session'] # Specify cdrom device if type == 'cdrom': line = '/dev/cdrom /media/CDROM auto exec' if 'grub2' in utils_misc.get_bootloader_cfg(session): line += ',nofail' line += ' 0 0' logging.debug('fstab entry is "%s"', line) cmd = [ 'mkdir -p /media/CDROM', 'mount /dev/cdrom /media/CDROM', 'echo "%s" >> /etc/fstab' % line ] for i in range(len(cmd)): session.cmd(cmd[i]) elif type == 'sr0': line = params.get('fstab_content') session.cmd('echo "%s" >> /etc/fstab' % line) elif type == 'invalid': line = utils_misc.generate_random_string(6) session.cmd('echo "%s" >> /etc/fstab' % line) else: map = {'uuid': 'UUID', 'label': 'LABEL'} logging.info(type) if session.cmd_status('cat /etc/fstab|grep %s' % map[type]): # Specify device by UUID if type == 'uuid': entry = session.cmd( 'blkid -s UUID|grep swap').strip().split() # Replace path for UUID origin = entry[0].strip(':') replace = entry[1].replace('"', '') # Specify device by label elif type == 'label': blk = make_label(session) entry = session.cmd('blkid|grep %s' % blk).strip() # Remove " from LABEL="****" replace = entry.split()[1].strip().replace('"', '') # Replace the original id/path with label origin = entry.split()[0].strip(':') cmd_fstab = "sed -i 's|%s|%s|' /etc/fstab" % (origin, replace) session.cmd(cmd_fstab) fstab = session.cmd_output('cat /etc/fstab') logging.debug('Content of /etc/fstab:\n%s', fstab) def create_large_file(session, left_space): """ Create a large file to make left space of root less than $left_space MB """ cmd_df = "df -m / --output=avail" df_output = session.cmd(cmd_df).strip() logging.debug('Command output: %s', df_output) avail = int(df_output.strip().split('\n')[-1]) logging.info('Available space: %dM' % avail) if avail > left_space - 1: tmp_dir = data_dir.get_tmp_dir() if session.cmd_status('ls %s' % tmp_dir) != 0: session.cmd('mkdir %s' % tmp_dir) large_file = os.path.join(tmp_dir, 'file.large') cmd_create = 'dd if=/dev/zero of=%s bs=1M count=%d' % \ (large_file, avail - left_space + 2) session.cmd(cmd_create, timeout=v2v_timeout) logging.info('Available space: %sM' % session.cmd(cmd_df).strip()) @vm_shell def corrupt_rpmdb(**kwargs): """ Corrupt rpm db """ session = kwargs['session'] # If __db.* exist, remove them, then touch _db.001 to corrupt db. if not session.cmd_status('ls /var/lib/rpm/__db.001'): session.cmd('rm -f /var/lib/rpm/__db.*') session.cmd('touch /var/lib/rpm/__db.001') if not session.cmd_status('yum update'): test.error('Corrupt rpmdb failed') @vm_shell def grub_serial_terminal(**kwargs): """ Edit the serial and terminal lines of grub.conf """ session = kwargs['session'] vm = kwargs['vm'] grub_file = utils_misc.get_bootloader_cfg(session) if 'grub2' in grub_file: test.cancel('Skip this case on grub2') cmd = "sed -i '1iserial -unit=0 -speed=115200\\n" cmd += "terminal -timeout=10 serial console' %s" % grub_file session.cmd(cmd) @vm_shell def set_selinux(value, **kwargs): """ Set selinux stat of guest """ session = kwargs['session'] current_stat = session.cmd_output('getenforce').strip() logging.debug('Current selinux status: %s', current_stat) if current_stat != value: cmd = "sed -E -i 's/(^SELINUX=).*?/\\1%s/' /etc/selinux/config" % value logging.info('Set selinux stat with command %s', cmd) session.cmd(cmd) @vm_shell def get_firewalld_status(**kwargs): """ Return firewalld service status of vm """ session = kwargs['session'] # Example: Active: active (running) since Fri 2019-03-15 01:03:39 CST; 3min 48s ago firewalld_status = session.cmd('systemctl status firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group() logging.info('Status of firewalld: %s', firewalld_status) params[checkpoint] = firewalld_status def check_firewalld_status(vmcheck, expect_status): """ Check if status of firewalld meets expectation """ firewalld_status = vmcheck.session.cmd('systemctl status ' 'firewalld.service|grep Active:', ok_status=[0, 3]).strip() # Exclude the time string because time changes if vm restarts firewalld_status = re.search('Active:\s\w*\s\(\w*\)', firewalld_status).group() logging.info('Status of firewalld after v2v: %s', firewalld_status) if firewalld_status != expect_status: log_fail('Status of firewalld changed after conversion') @vm_shell def vm_cmd(cmd_list, **kwargs): """ Excecute a list of commands on guest. """ session = kwargs['session'] for cmd in cmd_list: logging.info('Send command "%s"', cmd) # 'chronyc waitsync' needs more than 2mins to sync clock, # We set timeout to 300s will not have side-effects for other # commands. status, output = session.cmd_status_output(cmd, timeout=300) logging.debug('Command output:\n%s', output) if status != 0: test.error('Command "%s" failed' % cmd) logging.info('All commands executed') def check_time_keep(vmcheck): """ Check time drift after convertion. """ logging.info('Check time drift') output = vmcheck.session.cmd('chronyc tracking') logging.debug(output) if 'Not synchronised' in output: log_fail('Time not synchronised') lst_offset = re.search('Last offset *?: *(.*) ', output).group(1) drift = abs(float(lst_offset)) logging.debug('Time drift is: %f', drift) if drift > 3: log_fail('Time drift exceeds 3 sec') def check_boot(): """ Check if guest can boot up after configuration """ try: vm = libvirt_vm.VM(vm_name, params, test.bindir, env.get('address_cache')) if vm.is_alive(): vm.shutdown() logging.info('Booting up %s' % vm_name) vm.start() vm.wait_for_login() vm.shutdown() logging.info('%s is down' % vm_name) except Exception as e: test.error('Bootup guest and login failed: %s', str(e)) def check_result(result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout + result.stderr if skip_check: logging.info('Skip checking vm after conversion') elif not status_error: if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after convertion vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if params.get('skip_check') != 'yes': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=vmchecker.virsh_instance) logging.debug(vmxml) if checkpoint == 'multi_kernel': check_boot_kernel(vmchecker.checker) check_vmlinuz_initramfs(output) if checkpoint == 'floppy': # Convert to rhv will remove all removeable devices(floppy, cdrom) if output_mode in ['local', 'libvirt']: check_floppy_exist(vmchecker.checker) if checkpoint == 'multi_disks': check_disks(vmchecker.checker) if checkpoint == 'multi_netcards': check_multi_netcards(params['mac_address'], vmchecker.virsh_instance) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': vmchecker.check_graphics(params[checkpoint]) else: graph_type = checkpoint.split('_')[0] vmchecker.check_graphics({'type': graph_type}) video_type = vmxml.get_devices('video')[0].model_type if video_type.lower() != 'qxl': log_fail('Video expect QXL, actual %s' % video_type) if checkpoint.startswith('listen'): listen_type = vmxml.get_devices('graphics')[0].listen_type logging.info('listen type is: %s', listen_type) if listen_type != checkpoint.split('_')[-1]: log_fail('listen type changed after conversion') if checkpoint.startswith('selinux'): status = vmchecker.checker.session.cmd( 'getenforce').strip().lower() logging.info('Selinux status after v2v:%s', status) if status != checkpoint[8:]: log_fail('Selinux status not match') if checkpoint == 'guest_firewalld_status': check_firewalld_status(vmchecker.checker, params[checkpoint]) if checkpoint in ['ntpd_on', 'sync_ntp']: check_time_keep(vmchecker.checker) # Merge 2 error lists error_list.extend(vmchecker.errors) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'target': target, 'hypervisor': hypervisor, 'main_vm': vm_name, 'input_mode': input_mode, 'network': network, 'bridge': bridge, 'storage': storage, 'hostname': source_ip, 'v2v_opts': v2v_opts, 'new_name': vm_name + utils_misc.generate_random_string(3)} if vpx_dc: v2v_params.update({"vpx_dc": vpx_dc}) if esx_ip: v2v_params.update({"esx_ip": esx_ip}) output_format = params.get('output_format') if output_format: v2v_params.update({'output_format': output_format}) # Build rhev related options if output_mode == 'rhev': # Create different sasl_user name for different job params.update({'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3)}) logging.info('sals user name is %s' % params.get("sasl_user")) # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_mode == 'local': v2v_params['storage'] = data_dir.get_tmp_dir() if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' # Save origin graphic type for result checking if source is KVM if hypervisor == 'kvm': ori_vm_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) params['ori_graphic'] = ori_vm_xml.xmltreefile.find( 'devices').find('graphics').get('type') backup_xml = None # Only kvm guest's xml needs to be backup currently if checkpoint in backup_list and hypervisor == 'kvm': backup_xml = ori_vm_xml if checkpoint == 'multi_disks': new_xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) disk_count = 0 for disk in list(new_xml.get_disk_all().values()): if disk.get('device') == 'disk': disk_count += 1 if disk_count <= 1: test.error('Not enough disk devices') params['ori_disks'] = disk_count if checkpoint == 'sata_disk': change_disk_bus('sata') if checkpoint.startswith('floppy'): img_path = data_dir.get_tmp_dir() + '/floppy.img' utlv.create_local_disk('floppy', img_path) attach_removable_media('floppy', img_path, 'fda') if checkpoint == 'floppy_devmap': insert_floppy_devicemap() if checkpoint.startswith('fstab'): if checkpoint == 'fstab_cdrom': img_path = data_dir.get_tmp_dir() + '/cdrom.iso' utlv.create_local_disk('iso', img_path) attach_removable_media('cdrom', img_path, 'hdc') specify_fstab_entry(checkpoint[6:]) if checkpoint == 'running': virsh.start(vm_name) logging.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'paused': virsh.start(vm_name, '--paused') logging.info('VM state: %s' % virsh.domstate(vm_name).stdout.strip()) if checkpoint == 'serial_terminal': grub_serial_terminal() check_boot() if checkpoint == 'no_space': @vm_shell def take_space(**kwargs): create_large_file(kwargs['session'], 20) take_space() if checkpoint.startswith('host_no_space'): session = aexpect.ShellSession('sh') create_large_file(session, 1000) if checkpoint == 'host_no_space_setcache': logging.info('Set LIBGUESTFS_CACHEDIR=/home') os.environ['LIBGUESTFS_CACHEDIR'] = '/home' if checkpoint == 'corrupt_rpmdb': corrupt_rpmdb() if checkpoint.startswith('network'): change_network_model(checkpoint[8:]) if checkpoint == 'multi_netcards': params['mac_address'] = [] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) network_list = vmxml.get_iface_all() for mac in network_list: if network_list[mac].get('type') in ['bridge', 'network']: params['mac_address'].append(mac) if len(params['mac_address']) < 2: test.error('Not enough network interface') logging.debug('MAC address: %s' % params['mac_address']) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': spice_passwd = {'type': 'spice', 'passwd': params.get('spice_passwd', 'redhat')} vm_xml.VMXML.set_graphics_attr(vm_name, spice_passwd) params[checkpoint] = {'type': 'spice', 'passwdValidTo': '1970-01-01T00:00:01'} else: graphic_video = checkpoint.split('_') graphic = graphic_video[0] logging.info('Set graphic type to %s', graphic) vm_xml.VMXML.set_graphics_attr(vm_name, {'type': graphic}) if len(graphic_video) > 1: video_type = graphic_video[1] logging.info('Set video type to %s', video_type) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) video = vmxml.xmltreefile.find( 'devices').find('video').find('model') video.set('type', video_type) # cirrus doesn't support 'ram' and 'vgamem' attribute if video_type == 'cirrus': [video.attrib.pop(attr_i) for attr_i in [ 'ram', 'vgamem'] if attr_i in video.attrib] vmxml.sync() if checkpoint.startswith('listen'): listen_type = checkpoint.split('_')[-1] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) listen = vmxml.xmltreefile.find( 'devices').find('graphics').find('listen') listen.set('type', listen_type) vmxml.sync() if checkpoint == 'host_selinux_on': params['selinux_stat'] = utils_selinux.get_status() utils_selinux.set_status('enforcing') if checkpoint.startswith('selinux'): set_selinux(checkpoint[8:]) if checkpoint.startswith('host_firewalld'): service_mgr = service.ServiceManager() logging.info('Backing up firewall services status') params['bk_firewalld_status'] = service_mgr.status('firewalld') if 'start' in checkpoint: service_mgr.start('firewalld') if 'stop' in checkpoint: service_mgr.stop('firewalld') if checkpoint == 'guest_firewalld_status': get_firewalld_status() if checkpoint == 'remove_securetty': logging.info('Remove /etc/securetty file from guest') cmd = ['rm -f /etc/securetty'] vm_cmd(cmd) if checkpoint == 'ntpd_on': logging.info('Set service chronyd on') cmd = ['yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server] vm_cmd(cmd) if checkpoint == 'sync_ntp': logging.info('Sync time with %s', ntp_server) cmd = ['yum -y install chrony', 'systemctl start chronyd', 'chronyc add server %s' % ntp_server, 'chronyc waitsync'] vm_cmd(cmd) if checkpoint == 'blank_2nd_disk': disk_path = os.path.join(data_dir.get_tmp_dir(), 'blank.img') logging.info('Create blank disk %s', disk_path) process.run('truncate -s 1G %s' % disk_path) logging.info('Attach blank disk to vm') attach_removable_media('disk', disk_path, 'vdc') logging.debug(virsh.dumpxml(vm_name)) if checkpoint in ['only_net', 'only_br']: logging.info('Detatch all networks') virsh.detach_interface(vm_name, 'network --current', debug=True) logging.info('Detatch all bridges') virsh.detach_interface(vm_name, 'bridge --current', debug=True) if checkpoint == 'only_net': logging.info('Attach network') virsh.attach_interface( vm_name, 'network default --current', debug=True) v2v_params.pop('bridge') if checkpoint == 'only_br': logging.info('Attatch bridge') virsh.attach_interface( vm_name, 'bridge virbr0 --current', debug=True) v2v_params.pop('network') if checkpoint == 'no_libguestfs_backend': os.environ.pop('LIBGUESTFS_BACKEND') if checkpoint == 'file_image': vm = env.get_vm(vm_name) disk = vm.get_first_disk_devices() logging.info('Disk type is %s', disk['type']) if disk['type'] != 'file': test.error('Guest is not with file image') v2v_result = utils_v2v.v2v_cmd(v2v_params) if v2v_params.get('new_name'): vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: if close_virsh: v2v_virsh.close_session() if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if backup_xml: backup_xml.sync() if params.get('selinux_stat') and params['selinux_stat'] != 'disabled': utils_selinux.set_status(params['selinux_stat']) if 'bk_firewalld_status' in params: service_mgr = service.ServiceManager() if service_mgr.status('firewalld') != params['bk_firewalld_status']: if params['bk_firewalld_status']: service_mgr.start('firewalld') else: service_mgr.stop('firewalld') if checkpoint.startswith('host_no_space'): large_file = os.path.join(data_dir.get_tmp_dir(), 'file.large') if os.path.isfile(large_file): os.remove(large_file) # Cleanup constant files utils_v2v.cleanup_constant_files(params)
PERL_TAP_PARSER_SNIPPET = """#!/bin/env perl use TAP::Parser; my $parser = TAP::Parser->new( { exec => ['%s', 'run', 'passtest.py', 'errortest.py', 'warntest.py', '--tap', '-', '--sysinfo', 'off', '--job-results-dir', '%%s'] } ); while ( my $result = $parser->next ) { $result->is_unknown && die "Unknown line \\"" . $result->as_string . "\\" in the TAP output!\n"; } $parser->parse_errors == 0 || die "Parser errors!\n"; $parser->is_good_plan || die "Plan is not a good plan!\n"; $parser->plan eq '1..3' || die "Plan does not match what was expected!\n"; """ % AVOCADO OUTPUT_TEST_CONTENT = """#!/bin/env python import sys from avocado import Test from avocado.utils import process print "top_print" sys.stdout.write("top_stdout\\n") sys.stderr.write("top_stderr\\n") process.run("/bin/echo top_process") class OutputTest(Test): def __init__(self, *args, **kwargs): super(OutputTest, self).__init__(*args, **kwargs) print "init_print" sys.stdout.write("init_stdout\\n")
def test_archive_gpg_encrypted(self): self.assertOutputContains('/.*sosreport-.*tar.*\.gpg') _cmd = "file %s" % self.encrypted_path res = process.run(_cmd) self.assertTrue("GPG symmetrically encrypted data" in res.stdout.decode())
class QEMUBaseInstaller(base_installer.BaseInstaller): """ Base class for KVM installations """ # # Name of acceptable QEMU binaries that may be built or installed. # We'll look for one of these binaries when linking the QEMU binary # to the test directory # qemu_system = 'qemu-system-' + process.run('uname -i').stdout_text ACCEPTABLE_QEMU_BIN_NAMES = ['qemu-kvm', 'qemu-system-ppc64', qemu_system] # # The default names for the binaries # QEMU_BIN = 'qemu' QEMU_IMG_BIN = 'qemu-img' QEMU_IO_BIN = 'qemu-io' QEMU_FS_PROXY_BIN = 'virtfs-proxy-helper' def _kill_qemu_processes(self): """ Kills all qemu processes and all processes holding /dev/kvm down :return: None """ logging.debug("Killing any qemu processes that might be left behind") process.system("pkill qemu", ignore_status=True) # Let's double check to see if some other process is holding /dev/kvm if os.path.isfile("/dev/kvm"): process.system("fuser -k /dev/kvm", ignore_status=True) def _cleanup_links_qemu(self): """ Removes previously created links, if they exist :return: None """ qemu_path = os.path.join(self.test_builddir, self.QEMU_BIN) qemu_img_path = os.path.join(self.test_builddir, self.QEMU_IMG_BIN) qemu_io_path = os.path.join(self.test_builddir, self.QEMU_IO_BIN) qemu_fs_proxy_path = os.path.join(self.test_builddir, self.QEMU_FS_PROXY_BIN) # clean up previous links, if they exist for path in (qemu_path, qemu_img_path, qemu_io_path, qemu_fs_proxy_path): if os.path.lexists(path): os.unlink(path) def _cleanup_link_unittest(self): """ Removes previously created links, if they exist :return: None """ qemu_unittest_path = os.path.join(self.test_builddir, "unittests") if os.path.lexists(qemu_unittest_path): os.unlink(qemu_unittest_path) def _create_symlink_unittest(self): """ Create symbolic links for qemu and qemu-img commands on test bindir :return: None """ unittest_src = os.path.join(self.install_prefix, 'share', 'qemu', 'tests') unittest_dst = os.path.join(self.test_builddir, "unittests") if os.path.lexists(unittest_dst): logging.debug("Unlinking unittest dir") os.unlink(unittest_dst) logging.debug("Linking unittest dir") os.symlink(unittest_src, unittest_dst) def _qemu_bin_exists_at_prefix(self): """ Attempts to find the QEMU binary at the installation prefix :return: full path of QEMU binary or None if not found """ result = None for name in self.ACCEPTABLE_QEMU_BIN_NAMES: qemu_bin_name = os.path.join(self.install_prefix, 'bin', name) if os.path.isfile(qemu_bin_name): result = qemu_bin_name break if result is not None: logging.debug('Found QEMU binary at %s', result) else: logging.debug('Could not find QEMU binary at prefix %s', self.install_prefix) return result def _qemu_img_bin_exists_at_prefix(self): """ Attempts to find the qemu-img binary at the installation prefix :return: full path of qemu-img binary or None if not found """ qemu_img_bin_name = os.path.join(self.install_prefix, 'bin', self.QEMU_IMG_BIN) if os.path.isfile(qemu_img_bin_name): logging.debug('Found qemu-img binary at %s', qemu_img_bin_name) return qemu_img_bin_name else: logging.debug('Could not find qemu-img binary at prefix %s', self.install_prefix) return None def _qemu_io_bin_exists_at_prefix(self): """ Attempts to find the qemu-io binary at the installation prefix :return: full path of qemu-io binary or None if not found """ qemu_io_bin_name = os.path.join(self.install_prefix, 'bin', self.QEMU_IO_BIN) if os.path.isfile(qemu_io_bin_name): logging.debug('Found qemu-io binary at %s', qemu_io_bin_name) return qemu_io_bin_name else: logging.debug('Could not find qemu-io binary at prefix %s', self.install_prefix) return None def _qemu_fs_proxy_bin_exists_at_prefix(self): """ Attempts to find the qemu fs proxy binary at the installation prefix :return: full path of qemu fs proxy binary or None if not found """ qemu_fs_proxy_bin_name = os.path.join(self.install_prefix, 'bin', self.QEMU_FS_PROXY_BIN) if os.path.isfile(qemu_fs_proxy_bin_name): logging.debug('Found qemu fs proxy binary at %s', qemu_fs_proxy_bin_name) return qemu_fs_proxy_bin_name else: logging.debug('Could not find qemu fs proxy binary at prefix %s', self.install_prefix) return None def _create_symlink_qemu(self): """ Create symbolic links for qemu and qemu-img commands on test bindir :return: None """ logging.debug("Linking QEMU binaries") qemu_dst = os.path.join(self.test_builddir, self.QEMU_BIN) qemu_img_dst = os.path.join(self.test_builddir, self.QEMU_IMG_BIN) qemu_io_dst = os.path.join(self.test_builddir, self.QEMU_IO_BIN) qemu_fs_proxy_dst = os.path.join(self.test_builddir, self.QEMU_FS_PROXY_BIN) qemu_bin = self._qemu_bin_exists_at_prefix() if qemu_bin is not None: os.symlink(qemu_bin, qemu_dst) else: raise exceptions.TestError('Invalid qemu path') qemu_img_bin = self._qemu_img_bin_exists_at_prefix() if qemu_img_bin is not None: os.symlink(qemu_img_bin, qemu_img_dst) else: raise exceptions.TestError('Invalid qemu-img path') qemu_io_bin = self._qemu_io_bin_exists_at_prefix() if qemu_io_bin is not None: os.symlink(qemu_io_bin, qemu_io_dst) else: raise exceptions.TestError('Invalid qemu-io path') qemu_fs_proxy_bin = self._qemu_fs_proxy_bin_exists_at_prefix() if qemu_fs_proxy_bin is not None: os.symlink(qemu_fs_proxy_bin, qemu_fs_proxy_dst) else: logging.warning('Qemu fs proxy path %s not found on source dir') def _install_phase_init(self): """ Initializes the built and installed software This uses a simple mechanism of looking up the installer name for deciding what action to do. :return: None """ if 'unit' in self.name: self._cleanup_link_unittest() self._create_symlink_unittest() elif 'qemu' in self.name: self._cleanup_links_qemu() self._create_symlink_qemu() def uninstall(self): """ Performs the uninstallation of KVM userspace component :return: None """ self._kill_qemu_processes() self._cleanup_links() super(QEMUBaseInstaller, self).uninstall()
def run(test, params, env): """ Test virsh nwfilter-binding-list 1)Prepare parameters 2)Run nwfilter_binding_list command 3)check result 4)Clean env """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) new_filter_1 = params.get("newfilter_1") new_filter_2 = params.get("newfilter_2") vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) new_net0_xml = os.path.join(data_dir.get_tmp_dir(), "new_net0.xml") new_net1_xml = os.path.join(data_dir.get_tmp_dir(), "new_net1.xml") option = params.get("option") status_error = "yes" == params.get("status_error") alias_name = params.get("alias_name") new_filter_name = params.get("new_filter_name") source_network = params.get("source_network") # prepare vm filterrfer parameters dict list filter_param_list_1 = [] params_key_1 = [] filter_param_list_2 = [] params_key_2 = [] for i in params.keys(): if 'parameters_name_' in i: params_key_1.append(i) params_key_1.sort() for i in range(len(params_key_1)): params_dict = {} params_dict['name'] = params[params_key_1[i]] params_dict['value'] = params['parameters_value_%s' % i] filter_param_list_1.append(params_dict) filterref_dict_1 = {} filterref_dict_1['name'] = new_filter_1 filterref_dict_1['parameters'] = filter_param_list_1 for i in params.keys(): if 'parameters_dhcp_' in i: params_key_2.append(i) params_key_2.sort() for i in range(len(params_key_2)): params_dict = {} params_dict['name'] = params[params_key_2[i]] params_dict['value'] = params['dhcp_value_%s' % i] filter_param_list_2.append(params_dict) filterref_dict_2 = {} filterref_dict_2['name'] = new_filter_2 filterref_dict_2['parameters'] = filter_param_list_2 def set_env(): """ set two interface with different network filter and change interface type """ virsh.attach_interface(vm_name, option) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) devices = vmxml.get_devices('interface') iface_xml = devices[0] iface_xml_2 = devices[1] vmxml.del_device(iface_xml) vmxml.del_device(iface_xml_2) new_iface_1 = interface.Interface('network') new_iface_2 = interface.Interface('network') new_iface_1.xml = iface_xml.xml new_iface_2.xml = iface_xml_2.xml new_iface_1.type_name = "network" new_iface_2.type_name = "network" new_iface_1.source = {'network': source_network} new_iface_2.source = {'network': source_network} new_iface_1.target = {'dev': 'new_net0'} new_iface_2.target = {'dev': 'new_net1'} new_filterref = new_iface_1.new_filterref(**filterref_dict_1) new_iface_1.filterref = new_filterref new_filterref = new_iface_2.new_filterref(**filterref_dict_2) new_iface_2.filterref = new_filterref logging.debug("new interface xml is: %s \n %s" % (new_iface_1, new_iface_2)) vmxml.add_device(new_iface_1) vmxml.add_device(new_iface_2) vmxml.sync() return new_iface_1, new_iface_2 try: new_iface_1, new_iface_2 = set_env() # start vm virsh.start(vm_name, debug=True) # list binding port dev ret = virsh.nwfilter_binding_list(debug=True) utlv.check_exit_status(ret, status_error) virsh.nwfilter_binding_dumpxml(new_iface_1.target['dev'], to_file=new_net0_xml, debug=True) virsh.nwfilter_binding_dumpxml(new_iface_2.target['dev'], to_file=new_net1_xml, debug=True) # check dump filterbinding can pass xml validate new_net0_cmd = "virt-xml-validate %s" % new_net0_xml new_net1_cmd = "virt-xml-validate %s" % new_net1_xml valid_0 = process.run(new_net0_cmd, ignore_status=True, shell=True).exit_status valid_1 = process.run(new_net1_cmd, ignore_status=True, shell=True).exit_status if valid_0 or valid_1: test.fail("the xml can not validate successfully") # create new xml and update device newnet_iface = interface.Interface('network') newnet_iface.xml = new_iface_1.xml filterref_list = [] filterref_dict = {} filterref_dict['name'] = new_filter_name filterref_dict['parameters'] = filterref_list newnet_iface.alias = {'name': alias_name} newnet_iface.filterref = newnet_iface.new_filterref(**filterref_dict) ret = virsh.update_device(domainarg=vm_name, filearg=newnet_iface.xml, debug=True) utlv.check_exit_status(ret, status_error) ret_list = virsh.nwfilter_binding_list(debug=True) utlv.check_result(ret_list, expected_match="new_net1") ret_dump = virsh.nwfilter_binding_dumpxml('new_net0', debug=True) utlv.check_result(ret_dump, expected_match=new_filter_name) finally: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utils_v2v.check_exit_status(result, status_error, error_flag) output = to_text(result.stdout + result.stderr, errors=error_flag) output_stdout = to_text(result.stdout, errors=error_flag) if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False # 76 is the max length in v2v if v2v_start and len(line) > 76: test.fail('Error log longer than 76 characters: %s' % line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: test.fail('Disk exists for vm %s' % vm_name) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): test.fail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet' and not no_root: expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): test.fail("Import VM failed") else: vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint in ['vmx', 'vmx_ssh']: vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'quiet': if len(output.strip().splitlines()) > 10: test.fail('Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: test.fail('libguestfs-winsupport not in dependency') if all(pkg_pattern not in output for pkg_pattern in ['VMF', 'edk2-ovmf']): test.fail('OVMF/AAVMF not in dependency') if 'qemu-kvm-rhev' in output: test.fail('qemu-kvm-rhev is in dependency') if 'libX11' in output: test.fail('libX11 is in dependency') if 'kernel-rt' in output: test.fail('kernel-rt is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: test.fail('Command "%s" success' % command % win_img) #check 'yum deplist virt-v2v' if checkpoint == 'deplist': if 'platform-python' not in output: test.fail('platform-python is not in dependency') if checkpoint == 'no_dcpath': if '--dcpath' in output: test.fail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: test.fail('Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: test.fail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in output_stdout.lower(): test.fail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(output_stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): # Checking items in example_file exist in latest # output regardless of the orders and new items. with open(params['example_file']) as f: for line in f: if line.strip() not in output_stdout.strip(): if utils_v2v.multiple_versions_compare( V2V_UNSUPPORT_GLANCE_VER ) and 'glance' in line: continue else: test.error('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) qemu_img_cmd = 'qemu-img check %s' % img_path qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support( ) if qemu_img_locking_feature_support: qemu_img_cmd = 'qemu-img check %s -U' % img_path disk_check = process.run(qemu_img_cmd).stdout_text logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: test.fail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if msg_content in messages: test.fail('Found "%s" in /var/log/messages' % msg_content) if checkpoint == 'print_estimate_tofile': check_print_estimate(estimate_file) log_check = utils_v2v.check_log(params, output) if log_check: test.fail(log_check) check_man_page(params.get('in_man'), params.get('not_in_man'))
def run(test, params, env): """ Test guest numa setting """ def replace_qemu_cmdline(cmdline_list): """ Replace the expected qemu command line for new machine type :param cmdline_list: The list for expected qemu command lines :return: The list contains the updated qemu command lines if any """ os_xml = getattr(vmxml, "os") machine_ver = getattr(os_xml, 'machine') if (machine_ver.startswith("pc-q35-rhel") and machine_ver > 'pc-q35-rhel8.2.0' and libvirt_version.version_compare(6, 4, 0)): # Replace 'node,nodeid=0,cpus=0-1,mem=512' with # 'node,nodeid=0,cpus=0-1,memdev=ram-node0' # Replace 'node,nodeid=1,cpus=2-3,mem=512' with # 'node,nodeid=1,cpus=2-3,memdev=ram-node1' for cmd in cmdline_list: line = cmd['cmdline'] try: node = line.split(',')[1][-1] cmd['cmdline'] = line.replace( 'mem=512', 'memdev=ram-node{}'.format(node)) # We can skip replacing, when the cmdline parameter is empty. except IndexError: pass return cmdline_list host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.online_nodes arch = platform.machine() dynamic_node_replacement(params, host_numa_node, test) if 'ppc64' in arch: try: ppc_memory_nodeset = "" nodes = params['memory_nodeset'] if '-' in nodes: for n in range(int(nodes.split('-')[0]), int(nodes.split('-')[1])): ppc_memory_nodeset += str(node_list[n]) + ',' ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])]) else: node_lst = nodes.split(',') for n in range(len(node_lst) - 1): ppc_memory_nodeset += str(node_list[int( node_lst[n])]) + ',' ppc_memory_nodeset += str(node_list[int(node_lst[-1])]) params['memory_nodeset'] = ppc_memory_nodeset except IndexError: test.cancel("No of numas in config does not match with no of " "online numas in system") except utils_params.ParamNotFound: pass pkeys = ('memnode_nodeset', 'page_nodenum') for pkey in pkeys: for key in params.keys(): if pkey in key: params[key] = str(node_list[int(params[key])]) # Modify qemu command line try: if params['qemu_cmdline_mem_backend_1']: memory_nodeset = sorted(params['memory_nodeset'].split(',')) if len(memory_nodeset) > 1: if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1: qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \ ".*?host-nodes=%s-%s,policy=bind" % \ (memory_nodeset[0], memory_nodeset[1]) else: qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \ ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \ (memory_nodeset[0], memory_nodeset[1]) params['qemu_cmdline_mem_backend_1'] = qemu_cmdline except utils_params.ParamNotFound: pass try: if params['qemu_cmdline_mem_backend_0']: qemu_cmdline = params['qemu_cmdline_mem_backend_0'] params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace( ".*?host-nodes=1", ".*?host-nodes=%s" % params['memnode_nodeset_0']) except utils_params.ParamNotFound: pass vcpu_num = int(params.get("vcpu_num", 2)) max_mem = int(params.get("max_mem", 1048576)) max_mem_unit = params.get("max_mem_unit", 'KiB') vcpu_placement = params.get("vcpu_placement", 'static') bug_url = params.get("bug_url", "") expect_cpus = params.get('expect_cpus') status_error = "yes" == params.get("status_error", "no") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) mode_dict = { 'strict': 'bind', 'preferred': 'prefer', 'interleave': 'interleave' } cpu_num = cpu.get_cpu_info().get('CPU(s)') if vcpu_num > int(cpu_num): test.cancel('Number of vcpus(%s) is larger than number of ' 'cpus on host(%s).' % (vcpu_num, cpu_num)) # Prepare numatune memory parameter dict and list mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset') numa_memory = {} for mem_param in mem_tuple: value = params.get(mem_param) if value: numa_memory[mem_param.split('_')[1]] = value memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset') numa_memnode = handle_param(memnode_tuple, params) if numa_memnode: if not libvirt_version.version_compare(1, 2, 7): test.cancel("Setting hugepages more specifically per " "numa node not supported on current " "version") # Prepare cpu numa cell parameter topology = {} topo_tuple = ('sockets', 'cores', 'threads') for key in topo_tuple: if params.get(key): topology[key] = params.get(key) cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory') numa_cell = handle_param(cell_tuple, params) # Prepare qemu cmdline check parameter cmdline_tuple = ("qemu_cmdline", ) cmdline_list = handle_param(cmdline_tuple, params) # Prepare hugepages parameter backup_list = [] page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset') page_list = handle_param(page_tuple, params) nr_pagesize_total = params.get("nr_pagesize_total") deallocate = False if page_list: if not libvirt_version.version_compare(1, 2, 5): test.cancel("Setting hugepages more specifically per " "numa node not supported on current " "version") hp_cl = test_setup.HugePageConfig(params) supported_hp_size = hp_cl.get_multi_supported_hugepage_size() mount_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() qemu_conf_restore = False def _update_qemu_conf(): """ Mount hugepage path, update qemu conf then restart libvirtd """ size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'} for page in page_list: if page['size'] not in supported_hp_size: test.cancel("Hugepage size [%s] isn't supported, " "please verify kernel cmdline configuration." % page['size']) m_path = "/dev/hugepages%s" % size_dict[page['size']] hp_cl.hugepage_size = int(page['size']) hp_cl.hugepage_path = m_path hp_cl.mount_hugepage_fs() mount_path.append(m_path) if mount_path: qemu_conf.hugetlbfs_mount = mount_path libvirtd.restart() try: # Get host numa node list logging.debug("host node list is %s", node_list) used_node = [] if numa_memory.get('nodeset'): used_node += cpu.cpus_parser(numa_memory['nodeset']) if numa_memnode: for i in numa_memnode: used_node += cpu.cpus_parser(i['nodeset']) if page_list: host_page_tuple = ("hugepage_size", "page_num", "page_nodenum") h_list = handle_param(host_page_tuple, params) h_nodenum = [ h_list[p_size]['nodenum'] for p_size in range(len(h_list)) ] for i in h_nodenum: used_node += cpu.cpus_parser(i) if used_node and not status_error: logging.debug("set node list is %s", used_node) used_node = list(set(used_node)) for i in used_node: if i not in node_list: test.cancel("%s in nodeset out of range" % i) mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal') logging.debug("the memory total in the node %s is %s", i, mem_size) if not int(mem_size): test.cancel("node %s memory is empty" % i) # set hugepage with qemu.conf and mount path _update_qemu_conf() qemu_conf_restore = True # set hugepage with total number or per-node number if nr_pagesize_total: # Only set total 2M size huge page number as total 1G size runtime # update not supported now. deallocate = True hp_cl.target_hugepages = int(nr_pagesize_total) hp_cl.set_hugepages() if page_list: hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))] multi_hp_size = hp_cl.get_multi_supported_hugepage_size() for size in hp_size: if size not in multi_hp_size: test.cancel("The hugepage size %s not " "supported or not configured under" " current running kernel." % size) # backup node page setting and set new value for i in h_list: node_val = hp_cl.get_node_num_huge_pages( i['nodenum'], i['size']) # set hugpege per node if current value not satisfied # kernel 1G hugepage runtime number update is supported now if int(i['num']) > node_val: node_dict = i.copy() node_dict['num'] = node_val backup_list.append(node_dict) hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'], i['size']) node_val_after_set = hp_cl.get_node_num_huge_pages( i['nodenum'], i['size']) if node_val_after_set < int(i['num']): test.cancel("There is not enough memory to allocate.") vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) vmxml.vcpu = vcpu_num vmxml.max_mem = max_mem vmxml.max_mem_unit = max_mem_unit vmxml.current_mem = max_mem vmxml.current_mem_unit = max_mem_unit # numatune setting if numa_memnode: vmxml.numa_memory = numa_memory vmxml.numa_memnode = numa_memnode del vmxml.numa_memory if numa_memory: vmxml.numa_memory = numa_memory # vcpu placement setting vmxml.placement = vcpu_placement # guest numa cpu setting vmcpuxml = libvirt_xml.vm_xml.VMCPUXML() vmcpuxml.xml = "<cpu><numa/></cpu>" if topology: vmcpuxml.topology = topology logging.debug(vmcpuxml.numa_cell) vmcpuxml.numa_cell = vmcpuxml.dicts_to_cells(numa_cell) logging.debug(vmcpuxml.numa_cell) vmxml.cpu = vmcpuxml # hugepages setting if page_list: membacking = libvirt_xml.vm_xml.VMMemBackingXML() hugepages = libvirt_xml.vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(page_list)): pagexml = hugepages.PageXML() pagexml.update(page_list[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml is %s", vmxml) vmxml.sync() try: vm.start() session = vm.wait_for_login() vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("vm xml after start is %s", vmxml_new) except virt_vm.VMStartError as e: # Starting VM failed. if status_error: return else: test.fail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url)) vm_pid = vm.get_pid() # numa hugepage check if page_list: with open("/proc/%s/numa_maps" % vm_pid) as numa_maps: numa_map_info = numa_maps.read() hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info) if not hugepage_info: test.fail("Can't find hugepages usage info in vm " "numa maps") else: logging.debug("The hugepage info in numa_maps is %s" % hugepage_info) map_dict = {} usage_dict = {} node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s" node_pattern += "N(\d+)=(\d+)" for map_info in hugepage_info: for (mem_mode, mem_num, cell_num, host_node_num, vm_page_num) in re.findall(node_pattern, map_info): usage_dict[mem_mode] = cpu.cpus_parser(mem_num) usage_dict[host_node_num] = vm_page_num map_dict[cell_num] = usage_dict.copy() logging.debug("huagepage info in vm numa maps is %s", map_dict) memnode_dict = {} usage_dict = {} if numa_memnode: for i in numa_memnode: node = cpu.cpus_parser(i['nodeset']) mode = mode_dict[i['mode']] usage_dict[mode] = node memnode_dict[i['cellid']] = usage_dict.copy() logging.debug("memnode setting dict is %s", memnode_dict) for k in list(memnode_dict.keys()): for mk in list(memnode_dict[k].keys()): if memnode_dict[k][mk] != map_dict[k][mk]: test.fail("vm pid numa map dict %s" " not expected" % map_dict) # qemu command line check with open("/proc/%s/cmdline" % vm_pid) as f_cmdline: q_cmdline_list = f_cmdline.read().split("\x00") logging.debug("vm qemu cmdline list is %s" % q_cmdline_list) cmdline_list = replace_qemu_cmdline(cmdline_list) for cmd in cmdline_list: logging.debug("checking '%s' in qemu cmdline", cmd['cmdline']) p_found = False for q_cmd in q_cmdline_list: if re.search(cmd['cmdline'], q_cmd): p_found = True break else: continue if not p_found: test.fail("%s not found in vm qemu cmdline" % cmd['cmdline']) # vm inside check vm_cpu_info = cpu.get_cpu_info(session) logging.debug("lscpu output dict in vm is %s", vm_cpu_info) session.close() node_num = int(vm_cpu_info["NUMA node(s)"]) if node_num != len(numa_cell): test.fail("node number %s in vm is not expected" % node_num) for i in range(len(numa_cell)): cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i] vm_cpu_list = cpu.cpus_parser(cpu_str) cpu_list = cpu.cpus_parser(numa_cell[i]["cpus"]) if i == 0 and expect_cpus: cpu_list = cpu.cpus_parser(expect_cpus) if vm_cpu_list != cpu_list: test.fail("vm node %s cpu list %s not expected" % (i, vm_cpu_list)) if topology: vm_topo_tuple = ("Socket(s)", "Core(s) per socket", "Thread(s) per core") for i in range(len(topo_tuple)): topo_info = vm_cpu_info[vm_topo_tuple[i]] if topo_info != topology[topo_tuple[i]]: test.fail("%s in vm topology not expected." % topo_tuple[i]) finally: if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if page_list: for i in backup_list: hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'], i['size']) if deallocate: hp_cl.deallocate = deallocate hp_cl.cleanup() if qemu_conf_restore: qemu_conf.restore() libvirtd.restart() for mt_path in mount_path: try: process.run("umount %s" % mt_path, shell=True) except process.CmdError: logging.warning("umount %s failed" % mt_path)