def backup_raw_device(src, dst): if os.path.exists(src): _dst = dst + '.part' process.system("dd if=%s of=%s bs=4k conv=sync" % (src, _dst)) os.rename(_dst, dst) else: logging.info("No source %s, skipping dd...", src)
def set_chap_auth_target(self): """ set up authentication information for every single initiator, which provides the capability to define common login information for all Endpoints in a TPG """ auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target attr_cmd = ("set attribute %s %s %s" % ("demo_mode_write_protect=0", "generate_node_acls=1", "cache_dynamic_acls=1")) process.system(auth_cmd + attr_cmd) # Set userid userid_cmd = "%s set auth userid=%s" % (auth_cmd, self.chap_user) output = process.system_output(userid_cmd) if self.chap_user not in output: raise exceptions.TestFail("Failed to set user. (%s)" % output) # Set password passwd_cmd = "%s set auth password=%s" % (auth_cmd, self.chap_passwd) output = process.system_output(passwd_cmd) if self.chap_passwd not in output: raise exceptions.TestFail("Failed to set password. (%s)" % output) # Save configuration process.system("targetcli / saveconfig")
def test_scpandssh(self): ''' check scp and ssh ''' cmd = "ssh %s@%s \"echo hi\"" % (self.user, self.peer) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("unable to ssh into peer machine") process.run("dd if=/dev/zero of=/tmp/tempfile bs=1024000000 count=1", shell=True) time.sleep(15) md_val1 = hashlib.md5(open('/tmp/tempfile', 'rb').read()).hexdigest() time.sleep(5) cmd = "timeout 600 scp /tmp/tempfile %s@%s:/tmp" %\ (self.user, self.peer) ret = process.system(cmd, shell=True, verbose=True, ignore_status=True) time.sleep(15) if ret != 0: self.fail("unable to copy into peer machine") cmd = "timeout 600 scp %s@%s:/tmp/tempfile /tmp" %\ (self.user, self.peer) ret = process.system(cmd, shell=True, verbose=True, ignore_status=True) time.sleep(15) if ret != 0: self.fail("unable to copy from peer machine") md_val2 = hashlib.md5(open('/tmp/tempfile', 'rb').read()).hexdigest() time.sleep(5) if md_val1 != md_val2: self.fail("Test Failed")
def tearDown(self): ''' set the initial state ''' self.bond_remove("local") for val1, val2, val3 in map(None, self.host_interfaces, self.host_ips, self.net_mask): cmd = "ip addr add %s/%s dev %s;ip link set %s up"\ % (val2, val3, val1, val1) process.system(cmd, shell=True, ignore_status=True) for i in range(0, 600, 60): if 'state UP' in process.system_output("ip link \ show %s" % val1, shell=True): self.log.info("Interface %s is up" % val1) break time.sleep(60) else: self.log.info("Interface %s in not up\ in the host machine" % val1) if self.peer_bond_needed: self.bond_remove("peer") for val1, val2, val3 in map(None, self.peer_interfaces, self.peer_ips, self.net_mask): msg = "ip addr add %s/%s dev %s;ip link set %s up;sleep %s"\ % (val2, val3, val1, val1, self.peer_wait_time) cmd = "ssh %s@%s \"%s\""\ % (self.user, self.peer_first_ipinterface, msg) if process.system(cmd, shell=True, ignore_status=True) != 0: self.log.info("unable to bring to original state in host") time.sleep(self.sleep_time)
def set_num_huge_pages(num): """ Set number of huge pages. :param num: Target number of huge pages. """ process.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
def extensivetest(self): """ Extensive software raid options are run viz create, delete, assemble, create spares, remove and add drives """ cmd = "mdadm --fail /dev/md/mdsraid %s" % (self.remadd) self.check_pass(cmd, "Unable to fail a drive from MD device") cmd = "mdadm --detail /dev/md/mdsraid" self.check_pass(cmd, "Failed to display MD device details") cmd = "mdadm --manage /dev/md/mdsraid --remove %s" % (self.remadd) self.check_pass(cmd, "Failed to remove a drive from MD device") cmd = "mdadm --detail /dev/md/mdsraid" self.check_pass(cmd, "Failed to display MD device details") cmd = "mdadm --manage /dev/md/mdsraid --add %s" % (self.remadd) self.check_pass(cmd, "Failed to add back the drive to MD device") cmd = "mdadm --detail /dev/md/mdsraid" self.check_pass(cmd, "Failed to display MD device details") cmd = "mdadm --manage /dev/md/mdsraid --stop" self.check_pass(cmd, "Failed to stop/remove the MD device") cmd = "mdadm --assemble /dev/md/mdsraid %s %s" \ % (self.disk, self.sparedisk) self.check_pass(cmd, "Failed to assemble back the MD device") cmd = "mdadm --detail /dev/md/mdsraid | grep State | grep recovering" while process.system(cmd, ignore_status=True, shell=True) == 0: time.sleep(30) process.system(cmd, ignore_status=True, shell=True) cmd = "mdadm --detail /dev/md/mdsraid" self.check_pass(cmd, "Failed to display the MD device details")
def package_jeos(img): """ Package JeOS and make it ready for upload. Steps: 1) Move /path/to/jeos.qcow2 to /path/to/jeos.qcow2.backup 2) Sparsify the image, creating a new, trimmed down /path/to/jeos.qcow2 3) Compress the sparsified image with 7za :param img: Path to a qcow2 image """ basedir = os.path.dirname(img) backup = img + '.backup' qemu_img = utils_misc.find_command('qemu-img') shutil.move(img, backup) logging.info("Backup %s saved", backup) process.system("%s convert -f qcow2 -O qcow2 %s %s" % (qemu_img, backup, img)) logging.info("Sparse file %s created successfully", img) archiver = utils_misc.find_command('7za') compressed_img = img + ".7z" process.system("%s a %s %s" % (archiver, compressed_img, img)) logging.info("JeOS compressed file %s created successfuly", compressed_img)
def pingpong_exec(self, arg1, arg2, arg3): ''' ping pong exec function ''' test = arg2 logs = "> /tmp/ib_log 2>&1 &" if test == "basic": test = "" msg = " \"timeout %s %s -d %s -g %d -i %d %s %s %s\" " \ % (self.tmo, arg1, self.peer_ca, self.peer_gid, self.peer_port, test, arg3, logs) cmd = "ssh %s %s" % (self.peer_ip, msg) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("ssh failed to remote machine") time.sleep(2) self.log.info("client data for %s(%s)", arg1, arg2) self.log.info("%s -d %s -g %d %s -i %d %s %s", arg1, self.ca_name, self.gid, self.peer_ip, self.port, test, arg3) tmp = "timeout %s %s -d %s -g %d -i %d %s %s %s" \ % (self.tmo, arg1, self.ca_name, self.gid, self.port, self.peer_ip, test, arg3) if process.system(tmp, shell=True, ignore_status=True) != 0: self.fail("test failed") self.log.info("server data for %s(%s)", arg1, arg2) self.log.info("%s -d %s -g %d -i %d %s %s", arg1, self.peer_ca, self.peer_gid, self.peer_port, test, arg3) msg = " \"timeout %s cat /tmp/ib_log && rm -rf /tmp/ib_log\" " \ % self.tmo cmd = "ssh %s %s" % (self.peer_ip, msg) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("test failed")
def test_download_offset(test, vm, params): """ Test command download-offset """ add_ref = params.get("gf_add_ref", "disk") readonly = "yes" == params.get("gf_add_readonly") gf = utils_test.libguestfs.GuestfishTools(params) if add_ref == "disk": image_path = params.get("image_path") gf.add_drive_opts(image_path, readonly=readonly) elif add_ref == "domain": vm_name = params.get("main_vm") gf.add_domain(vm_name, readonly=readonly) gf.run() gf.do_mount("/") string = "Hello World" gf.write("/src.txt", string) src_size = gf.filesize("/src.txt").stdout.strip() dest = "%s/dest.txt" % data_dir.get_tmp_dir() gf.download_offset("/src.txt", "%s" % dest, 0, len(string)) gf.close_session() content = process.getoutput("cat %s" % dest) process.system("rm %s" % dest) if content != "Hello World": test.fail("Content or filesize is not match")
def install_packages(self): ''' Install necessary packages ''' smm = SoftwareManager() detected_distro = distro.detect() self.log.info("Test is running on %s", detected_distro.name) if not smm.check_installed("ksh") and not smm.install("ksh"): self.cancel('ksh is needed for the test to be run') if detected_distro.name == "Ubuntu": if not smm.check_installed("python-paramiko") and not \ smm.install("python-paramiko"): self.cancel('python-paramiko is needed for the test to be run') ubuntu_url = self.params.get('ubuntu_url', default=None) debs = self.params.get('debs', default=None) if not ubuntu_url or not debs: self.cancel("No url specified") for deb in debs: deb_url = os.path.join(ubuntu_url, deb) deb_install = self.fetch_asset(deb_url, expire='7d') shutil.copy(deb_install, self.workdir) process.system("dpkg -i %s/%s" % (self.workdir, deb), ignore_status=True, sudo=True) else: url = self.params.get('url', default=None) if not url: self.cancel("No url specified") rpm_install = self.fetch_asset(url, expire='7d') shutil.copy(rpm_install, self.workdir) os.chdir(self.workdir) process.run('chmod +x ibmtools') process.run('./ibmtools --install --managed')
def setUp(self): """ Get the number of cores and threads per core Set the SMT value to 4/8 """ if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") self.nfail = 0 self.CORES = process.system_output("lscpu | grep 'Core(s) per socket:'" "| awk '{print $4}'", shell=True) self.SOCKETS = process.system_output("lscpu | grep 'Socket(s):'" "| awk '{print $2}'", shell=True) self.THREADS = process.system_output("lscpu | grep 'Thread(s) per core" ":'| awk '{print $4}'", shell=True) self.T_CORES = int(self.CORES) * int(self.SOCKETS) self.log.info(" Cores = %s and threads = %s " % (self.T_CORES, self.THREADS)) process.system("echo 8 > /proc/sys/kernel/printk", shell=True, ignore_status=True) self.max_smt = 4 if cpu.get_cpu_arch().lower() == 'power8': self.max_smt = 8 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt = 2 process.system_output("ppc64_cpu --smt=%s" % self.max_smt, shell=True) self.path = "/sys/devices/system/cpu"
def cleanup(self, ssh_auto_recover=True): """ Cleanup NFS client. """ ssh_cmd = "ssh %s@%s " % (self.ssh_user, self.nfs_client_ip) logging.debug("Umount %s from %s" % (self.mount_dir, self.nfs_server_ip)) umount_cmd = ssh_cmd + "'umount -l %s'" % self.mount_dir try: process.system(umount_cmd, verbose=True) except process.CmdError: raise exceptions.TestFail("Failed to run: %s" % umount_cmd) if self.mkdir_mount_remote: rmdir_cmd = ssh_cmd + "'rm -rf %s'" % self.mount_dir try: process.system(rmdir_cmd, verbose=True) except process.CmdError: raise exceptions.TestFail("Failed to run: %s" % rmdir_cmd) if self.is_mounted(): raise exceptions.TestFail("Failed to umount %s" % self.mount_dir) # Recover SSH connection self.ssh_obj.auto_recover = ssh_auto_recover del self.ssh_obj
def setup_remote(self): """ Mount sharing directory to remote host. """ ssh_cmd = "ssh %s@%s " % (self.ssh_user, self.nfs_client_ip) check_mount_dir_cmd = ssh_cmd + "'ls -d %s'" % self.mount_dir logging.debug("To check if the %s exists", self.mount_dir) output = commands.getoutput(check_mount_dir_cmd) if re.findall("No such file or directory", output, re.M): mkdir_cmd = ssh_cmd + "'mkdir -p %s'" % self.mount_dir logging.debug("Prepare to create %s", self.mount_dir) s, o = commands.getstatusoutput(mkdir_cmd) if s != 0: raise exceptions.TestFail("Failed to run %s: %s" % (mkdir_cmd, o)) self.mkdir_mount_remote = True self.mount_src = "%s:%s" % (self.nfs_server_ip, self.mount_src) logging.debug("Mount %s to %s" % (self.mount_src, self.mount_dir)) mount_cmd = ssh_cmd + "'mount -t nfs %s %s -o %s'" % (self.mount_src, self.mount_dir, self.mount_options) try: process.system(mount_cmd, verbose=True) except process.CmdError: raise exceptions.TestFail("Failed to run: %s" % mount_cmd) # Check if the sharing directory is mounted if not self.is_mounted(): raise exceptions.TestFail("Failed to mount from %s to %s" % self.mount_src, self.mount_dir)
def latencyperf_exec(self, arg1, arg2, arg3): ''' latency performance exec function ''' flag = 0 logs = "> /tmp/ib_log 2>&1 &" cmd = "ssh %s \" timeout %s %s -d %s -i %s %s %s %s \" " \ % (self.peer_ip, self.tmo, arg1, self.peer_ca, self.peer_port, arg2, arg3, logs) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("ssh failed to remote machine\ or faing data from remote machine failed") time.sleep(2) self.log.info("client data for %s(%s)", arg1, arg2) cmd = "timeout %s %s -d %s -i %s %s %s %s" \ % (self.tmo, arg1, self.ca_name, self.port, self.peer_ip, arg2, arg3) if process.system(cmd, shell=True, ignore_status=True) != 0: flag = 1 self.log.info("server data for %s(%s)", arg1, arg2) cmd = "ssh %s \" timeout %s cat /tmp/ib_log && rm -rf /tmp/ib_log\" \ " % (self.peer_ip, self.tmo) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("ssh failed to remote machine\ or fetching data from remote machine failed") return flag
def patch(self): """ Patches the source dir with all patch files """ os.chdir(self.source_dir) for patch in self.patches: process.system("patch -p1 < %s" % os.path.basename(patch), shell=True)
def clear_dmesg_logs(cls): """ Clears dmesg logs, so that functions which uses dmesg gets the latest logs """ cmd = "dmesg -C" process.system(cmd, ignore_status=True, shell=True)
def test(self): """ Test ping6 """ self.log.info(self.test_name) logs = "> /tmp/ib_log 2>&1 &" cmd = "ssh %s \" timeout %s %s %s %s\" " \ % (self.peer_ip, self.timeout, self.test_name, self.option_list[0], logs) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("SSH connection (or) Server command failed") time.sleep(5) self.log.info("Client data - %s(%s)" % (self.test_name, self.option_list[0])) cmd = "timeout %s %s %s" \ % (self.timeout, self.test_name, self.option_list[1]) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("Client command failed") time.sleep(5) self.log.info("Server data - %s(%s)" % (self.test_name, self.option_list[1])) cmd = "ssh %s \"timeout %s cat /tmp/ib_log && rm -rf /tmp/ib_log\" " \ % (self.peer_ip, self.timeout) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("Server output retrieval failed")
def check_installed(self, name, version=None, arch=None): """ Check if package [name] is installed. :param name: Package name. :param version: Package version. :param arch: Package architecture. """ if arch: cmd = (self.lowlevel_base_cmd + ' -q --qf %{ARCH} ' + name) inst_archs = process.system_output(cmd, ignore_status=True) inst_archs = inst_archs.split('\n') for inst_arch in inst_archs: if inst_arch == arch: return self._check_installed_version(name, version) return False elif version: return self._check_installed_version(name, version) else: cmd = 'rpm -q ' + name try: process.system(cmd) return True except exceptions.CmdError: return False
def upgrade(self, name=None): """ Upgrade all packages of the system with eventual new versions. Optionally, upgrade individual packages. :param name: optional parameter wildcard spec to upgrade :type name: str """ ud_command = 'update' ud_cmd = self.base_command + ' ' + ud_command try: process.system(ud_cmd) except exceptions.CmdError: log.error("Apt package update failed") if name: up_command = 'install --only-upgrade' up_cmd = self.base_command + ' ' + up_command + ' ' + name else: up_command = 'upgrade' up_cmd = self.base_command + ' ' + up_command try: process.system(up_cmd) return True except exceptions.CmdError: return False
def test_get_diskspace(self): """ Use scsi_debug device to check disk size """ pre = glob.glob("/dev/sd*") try: process.system("modprobe scsi_debug", sudo=True) disks = set(glob.glob("/dev/sd*")).difference(pre) self.assertEqual(len(disks), 1, "pre: %s\npost: %s" % (disks, glob.glob("/dev/sd*"))) disk = disks.pop() self.assertEqual(lv_utils.get_diskspace(disk), "8388608") except BaseException: for _ in xrange(10): res = process.run("rmmod scsi_debug", ignore_status=True, sudo=True) if not res.exit_status: print("scsi_debug removed") break else: print("Fail to remove scsi_debug: %s" % res) for _ in xrange(10): res = process.run("rmmod scsi_debug", ignore_status=True, sudo=True) if not res.exit_status: break else: self.fail("Fail to remove scsi_debug after testing: %s" % res)
def setUp(self): """ Download 'nvme-cli'. """ self.device = self.params.get('device', default='/dev/nvme0') self.disk = self.params.get('disk', default='/dev/nvme0n1') cmd = 'ls %s' % self.device if process.system(cmd, ignore_status=True) is not 0: self.skip("%s does not exist" % self.device) smm = SoftwareManager() if not smm.check_installed("nvme-cli") and not \ smm.install("nvme-cli"): self.skip('nvme-cli is needed for the test to be run') python_packages = pip.get_installed_distributions() python_packages_list = [i.key for i in python_packages] python_pkgs = ['nose', 'nose2', 'pep8', 'flake8', 'pylint', 'epydoc'] for py_pkg in python_pkgs: if py_pkg not in python_packages_list: self.skip("python package %s not installed" % py_pkg) url = 'https://codeload.github.com/linux-nvme/nvme-cli/zip/master' tarball = self.fetch_asset("nvme-cli-master.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) self.nvme_dir = os.path.join(self.teststmpdir, "nvme-cli-master") print os.listdir(self.nvme_dir) os.chdir(os.path.join(self.nvme_dir, 'tests')) msg = ['{'] msg.append(' \"controller\": \"%s\",' % self.device) msg.append(' \"ns1\": \"%s\",' % self.disk) msg.append(' \"log_dir\": \"%s\"' % self.outputdir) msg.append('}') with open('config.json', 'w') as config_file: config_file.write("\n".join(msg)) process.system("cat config.json")
def rbd_image_create(ceph_monitor, rbd_pool_name, rbd_image_name, rbd_image_size, force_create=False): """ Create a rbd image. :params ceph_monitor: The specified monitor to connect to :params rbd_pool_name: The name of rbd pool :params rbd_image_name: The name of rbd image :params rbd_image_size: The size of rbd image :params force_create: Force create the image or not """ if rbd_image_exist(ceph_monitor, rbd_pool_name, rbd_image_name): create_image = False image_info = rbd_image_info(ceph_monitor, rbd_pool_name, rbd_image_name) try: int(rbd_image_size) compare_str = rbd_image_size except ValueError: compare_str = utils_misc.normalize_data_size(rbd_image_size) if image_info['size'] != compare_str or force_create: rbd_image_rm(ceph_monitor, rbd_pool_name, rbd_image_name) create_image = True if create_image: cmd = "rbd create %s/%s -m %s" % (rbd_pool_name, rbd_image_name, ceph_monitor) process.system(cmd, verbose=True) else: logging.debug("Image already exist skip the create.")
def setUp(self): ''' Build Rmaptest Source: https://www.kernel.org/pub/linux/kernel/people/mbligh/tools/rmap-test.c ''' # Check for basic utilities smm = SoftwareManager() if not smm.check_installed("gcc") and not smm.install("gcc"): self.error('Gcc is needed for the test to be run') rmaptest = self.fetch_asset('https://www.kernel.org/pub/' 'linux/kernel/people/mbligh/' 'tools/rmap-test.c', expire='7d') shutil.copyfile(rmaptest, os.path.join(self.workdir, 'rmap-test.c')) os.chdir(self.workdir) if 'CC' in os.environ: cc = '$CC' else: cc = 'cc' process.system('%s -Wall -o rmaptest rmap-test.c' % cc, ignore_status=True)
def setUp(self): """ Build 'nvme-cli' and setup the device. """ self.device = self.params.get('device', default='/dev/nvme0') cmd = 'ls %s' % self.device if process.system(cmd, ignore_status=True) is not 0: self.skip("%s does not exist" % self.device) locations = ["https://github.com/linux-nvme/nvme-cli/archive/" "master.zip"] tarball = self.fetch_asset("nvme-cli.zip", locations=locations, expire='15d') archive.extract(tarball, self.srcdir) os.chdir("%s/nvme-cli-master" % self.srcdir) process.system("./NVME-VERSION-GEN", ignore_status=True) if process.system_output("cat NVME-VERSION-FILE").strip("\n").\ split()[-1] != process.system_output("nvme version").\ strip("\n").split()[-1]: build.make(".") build.make(".", extra_args='install') self.id_ns = self.create_namespace() self.log.info(self.id_ns) cmd = "nvme id-ns %s | grep 'in use' | awk '{print $5}' | \ awk -F':' '{print $NF}'" % self.id_ns self.format_size = process.system_output(cmd, shell=True).strip('\n') self.format_size = pow(2, int(self.format_size)) cmd = "nvme id-ns %s | grep 'in use' | awk '{print $2}'" % self.id_ns self.lba = process.system_output(cmd, shell=True).strip('\n')
def setUp(self): smm = SoftwareManager() self.minthreads = self.params.get( 'minthrd', default=(500 + cpu.online_cpus_count())) self.maxthreads = self.params.get('maxthrd', default=None) self.iothreads = self.params.get('iothrd', default=self.minthreads/2) self.maxmem = self.params.get('maxmem', default=int( memory.meminfo.MemFree.m / self.minthreads)) self.maxio = self.params.get('maxio', default=None) self.longthreads = self.params.get('longthrd', default=False) self.shrtthreads = self.params.get('shortthrd', default=False) self.time = self.params.get('time', default=100) self.iotime = self.params.get('iotime', default=50) if self.longthreads and self.shrtthreads: self.cancel('Please choose right inputs') dist = distro.detect() packages = ['gcc'] if dist.name == 'Ubuntu': packages.extend(['g++']) elif dist.name in ['SuSE', 'fedora', 'rhel']: packages.extend(['gcc-c++']) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) for file_name in ['dwh.cpp', 'Makefile']: self.copyutil(file_name) os.chdir(self.teststmpdir) if dist.name in ['fedora', 'rhel']: process.system('patch -p0 < %s' % self.get_data('fofd.patch'), shell=True) build.make(self.teststmpdir)
def setUp(self): ''' Build Hackbench Source: http://people.redhat.com/~mingo/cfs-scheduler/tools/hackbench.c ''' self._threshold_time = self.params.get('time_val', default=None) self._num_groups = self.params.get('num_groups', default=90) self._iterations = self.params.get('iterations', default=1) self.results = None sm = SoftwareManager() if not sm.check_installed("gcc") and not sm.install("gcc"): self.cancel("Gcc is needed for the test to be run") hackbench = self.fetch_asset('http://people.redhat.com' '/~mingo/cfs-scheduler/' 'tools/hackbench.c') shutil.copyfile(hackbench, os.path.join(self.workdir, 'hackbench.c')) os.chdir(self.workdir) if 'CC' in os.environ: cc = '$CC' else: cc = 'cc' process.system('%s hackbench.c -o hackbench -lpthread' % cc)
def setup_remote(self): """ Mount sharing directory to remote host. """ check_mount_dir_cmd = self.ssh_cmd + "'ls -d %s'" % self.mount_dir logging.debug("To check if the %s exists", self.mount_dir) output = process.getoutput(check_mount_dir_cmd) if re.findall("No such file or directory", output, re.M): mkdir_cmd = self.ssh_cmd + "'mkdir -p %s'" % self.mount_dir logging.debug("Prepare to create %s", self.mount_dir) s, o = process.getstatusoutput(mkdir_cmd) if s != 0: raise exceptions.TestFail("Failed to run %s: %s" % (mkdir_cmd, o)) self.mkdir_mount_remote = True if self.params.get("firewall_to_permit_nfs", "yes") == "yes": self.firewall_to_permit_nfs() self.mount_src = "%s:%s" % (self.nfs_server_ip, self.mount_src) logging.debug("Mount %s to %s" % (self.mount_src, self.mount_dir)) mount_cmd = "mount -t nfs %s %s" % (self.mount_src, self.mount_dir) if self.mount_options: mount_cmd += " -o %s" % self.mount_options try: cmd = "%s '%s'" % (self.ssh_cmd, mount_cmd) process.system(cmd, verbose=True) except process.CmdError: raise exceptions.TestFail("Failed to run: %s" % cmd) # Check if the sharing directory is mounted if not self.is_mounted(): raise exceptions.TestFail("Failed to mount from %s to %s" % self.mount_src, self.mount_dir)
def plot_2d_graphs(self): """ For each one of the throughput parameters, generate a set of gnuplot commands that will create a parametric surface with file size vs. record size vs. throughput. """ datasource_2d = os.path.join(self.output_dir, '2d-datasource-file') for index, label in zip(range(2, 15), _LABELS[2:]): commands_path = os.path.join(self.output_dir, '2d-%s.do' % label) commands = "" commands += "set title 'Iozone performance: %s'\n" % label commands += "set logscale x\n" commands += "set xlabel 'File size (KB)'\n" commands += "set ylabel 'Througput (MB/s)'\n" commands += "set terminal png small size 450 350\n" commands += "set output '%s'\n" % os.path.join(self.output_dir, '2d-%s.png' % label) commands += ("plot '%s' using 1:%s title '%s' with lines \n" % (datasource_2d, index, label)) commands_file = open(commands_path, 'w') commands_file.write(commands) commands_file.close() try: process.system("gnuplot \"%s\"" % commands_path, shell=True) except process.CmdError: self.log.error("Problem plotting from commands file %s", commands_path)
def run_ip_test(session, ip_ver): """ Check iptables on host and ipv6 address on guest """ if ip_ver == "ipv6": # Clean up iptables rules for guest to get ipv6 address session.cmd_status("ip6tables -F") # It may take some time to get the ip address def get_ip_func(): return utils_net.get_guest_ip_addr(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) if not get_ip_func(): utils_net.restart_guest_network(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) vm_ip = get_ip_func() logging.debug("Guest has ip: %s", vm_ip) if not vm_ip: test.fail("Can't find ip address on guest") ip_gateway = net_ip_address if ip_ver == "ipv6": ip_gateway = net_ipv6_address # Cleanup ip6talbes on host for ping6 test process.system("ip6tables -F") if ip_gateway and not routes: ping_s, _ = ping(dest=ip_gateway, count=5, timeout=10, session=session) if ping_s: test.fail("Failed to ping gateway address: %s" % ip_gateway)
def action(self): """ Execute 'fio' with appropriate parameters. """ os.chdir(self.srcdir) cmd = ('./fio %s' % self.get_data_path(self.params.fio_job)) process.system(cmd)
def get_firmware_log(self): """ Returns the firmware log. """ cmd = "%s fw-log %s" % (self.binary, self.device) process.system(cmd, shell=True, ignore_status=True)
if test_ipv4_address: check_ipt_rules(check_ipv4=True) run_ip_test(session, "ipv4") if test_guest_libvirt: run_guest_libvirt(session) session.close() except virt_vm.VMStartError as details: logging.info(str(details)) if not (start_error or restart_error): test.fail('VM failed to start:\n%s' % details) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) for vms in vms_list: virsh.remove_domain(vms.name, "--remove-all-storage") logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) vmxml_backup.sync() if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + "=%s" % original_accept_ra)
def run(test, params, env): """ convert specific kvm guest to rhev """ for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: test.error('Missing command: virt-v2v') # Guest name might be changed, we need a new variant to save the original # name vm_name = params['original_vm_name'] = params.get('main_vm', 'EXAMPLE') unprivileged_user = params.get('unprivileged_user') target = params.get('target') input_mode = params.get('input_mode') input_file = params.get('input_file') output_mode = params.get('output_mode') output_format = params.get('output_format') os_pool = output_storage = params.get('output_storage', 'default') bridge = params.get('bridge') network = params.get('network') address_cache = env.get('address_cache') v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') skip_vm_check = params.get('skip_vm_check', 'no') skip_virsh_pre_conn = params.get('skip_virsh_pre_conn', 'no') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) checkpoint = params.get('checkpoint', '') datastore = params.get('datastore') esxi_host = params.get('esx_hostname') esxi_password = params.get('esxi_password') hypervisor = params.get("hypervisor") input_transport = params.get("input_transport") vmx_nfs_src = params.get("vmx_nfs_src") # for construct rhv-upload option in v2v cmd output_method = params.get("output_method") rhv_upload_opts = params.get("rhv_upload_opts") storage_name = params.get('storage_name') # for get ca.crt file from ovirt engine rhv_passwd = params.get("rhv_upload_passwd") rhv_passwd_file = params.get("rhv_upload_passwd_file") ovirt_engine_passwd = params.get("ovirt_engine_password") ovirt_hostname = params.get("ovirt_engine_url").split( '/')[2] if params.get("ovirt_engine_url") else None ovirt_ca_file_path = params.get("ovirt_ca_file_path") local_ca_file_path = params.get("local_ca_file_path") vpx_dc = params.get("vpx_dc") vpx_hostname = params.get("vpx_hostname") vpx_password = params.get("vpx_password") src_uri_type = params.get('src_uri_type') v2v_opts = '-v -x' if params.get('v2v_debug', 'on') in ['on', 'force_on' ] else '' v2v_sasl = '' if params.get('v2v_opts'): # Add a blank by force v2v_opts += ' ' + params.get("v2v_opts") error_list = [] # create different sasl_user name for different job if output_mode == 'rhev': params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) if output_method == 'rhv_upload': # Create password file for '-o rhv_upload' to connect to ovirt with open(rhv_passwd_file, 'w') as f: f.write(rhv_passwd) # Copy ca file from ovirt to local remote.scp_from_remote(ovirt_hostname, 22, 'root', ovirt_engine_passwd, ovirt_ca_file_path, local_ca_file_path) def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def check_BSOD(): """ Check if boot up into BSOD """ bar = 0.999 match_img = params.get('image_to_match') screenshot = '%s/BSOD_screenshot.ppm' % data_dir.get_tmp_dir() if match_img is None: test.error('No BSOD screenshot to match!') cmd_man_page = 'man virt-v2v|grep -i "Boot failure: 0x0000007B"' if process.run(cmd_man_page, shell=True).exit_status != 0: log_fail('Man page not contain boot failure msg') for i in range(100): virsh.screenshot(vm_name, screenshot) similar = ppm_utils.image_histogram_compare(screenshot, match_img) if similar > bar: logging.info('Meet BSOD with similarity %s' % similar) return time.sleep(1) log_fail('No BSOD as expected') def check_result(result, status_error): """ Check virt-v2v command result """ def vm_check(): """ Checking the VM """ if output_mode == 'json' and not check_json_output(params): test.fail('check json output failed') if output_mode == 'local' and not check_local_output(params): test.fail('check local output failed') if output_mode in ['null', 'json', 'local']: return # Create vmchecker before virsh.start so that the vm can be undefined # if started failed. vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after convertion if params.get('skip_vm_check') != 'yes': if checkpoint != 'win2008r2_ostk': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'win2008r2_ostk': check_BSOD() # Merge 2 error lists error_list.extend(vmchecker.errors) libvirt.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text if not status_error: vm_check() log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: if checkpoint == 'regular_user_sudo': regular_sudo_config = '/etc/sudoers.d/v2v_test' with open(regular_sudo_config, 'w') as fd: fd.write('%s ALL=(ALL) NOPASSWD: ALL' % unprivileged_user) # create user try: pwd.getpwnam(unprivileged_user) except KeyError: process.system("useradd %s" % unprivileged_user) # generate ssh-key rsa_private_key_path = '/home/%s/.ssh/id_rsa' % unprivileged_user rsa_public_key_path = '/home/%s/.ssh/id_rsa.pub' % unprivileged_user process.system('su - %s -c \'ssh-keygen -t rsa -q -N "" -f %s\'' % (unprivileged_user, rsa_private_key_path)) with open(rsa_public_key_path) as fd: pub_key = fd.read() v2v_params = { 'main_vm': vm_name, 'target': target, 'v2v_opts': v2v_opts, 'os_storage': output_storage, 'network': network, 'bridge': bridge, 'input_mode': input_mode, 'input_file': input_file, 'new_name': 'ova_vm_' + utils_misc.generate_random_string(3), 'datastore': datastore, 'esxi_host': esxi_host, 'esxi_password': esxi_password, 'input_transport': input_transport, 'vmx_nfs_src': vmx_nfs_src, 'output_method': output_method, 'os_storage_name': storage_name, 'os_pool': os_pool, 'rhv_upload_opts': rhv_upload_opts, 'params': params } if input_mode == 'vmx': v2v_params.update({ 'new_name': vm_name + utils_misc.generate_random_string(3), 'hypervisor': hypervisor, 'vpx_dc': vpx_dc, 'password': vpx_password if src_uri_type != 'esx' else esxi_password, 'hostname': vpx_hostname, 'skip_virsh_pre_conn': skip_virsh_pre_conn }) if checkpoint == 'regular_user_sudo': v2v_params.update({'pub_key': pub_key}) # copy ova from nfs storage before v2v conversion if input_mode == 'ova': src_dir = params.get('ova_dir') dest_dir = params.get('ova_copy_dir') if os.path.isfile(src_dir) and not os.path.exists(dest_dir): os.makedirs(dest_dir, exist_ok=True) if os.path.isdir(src_dir) and os.path.exists(dest_dir): shutil.rmtree(dest_dir) if os.path.isdir(src_dir): shutil.copytree(src_dir, dest_dir) else: shutil.copy(src_dir, dest_dir) logging.info('Copy ova from %s to %s', src_dir, dest_dir) if output_format: v2v_params.update({'of_format': output_format}) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') # Build rhev related options if output_mode == 'rhev': # Create SASL user on the ovirt host user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) if output_mode == 'local': v2v_params['os_directory'] = data_dir.get_tmp_dir() if checkpoint == 'ova_relative_path': logging.debug('Current dir: %s', os.getcwd()) ova_dir = params.get('ova_dir') logging.info('Change to dir: %s', ova_dir) os.chdir(ova_dir) # Set libguestfs environment variable os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint == 'permission': os.environ['LIBGUESTFS_BACKEND'] = '' process.run('echo $LIBGUESTFS_BACKEND', shell=True) v2v_result = utils_v2v.v2v_cmd(v2v_params) if 'new_name' in v2v_params: vm_name = params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: # Cleanup constant files utils_v2v.cleanup_constant_files(params) if input_mode == 'ova' and os.path.exists(dest_dir): shutil.rmtree(dest_dir) if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'regular_user_sudo' and os.path.exists( regular_sudo_config): os.remove(regular_sudo_config) if unprivileged_user: process.system("userdel -fr %s" % unprivileged_user) if input_mode == 'vmx' and input_transport == 'ssh': process.run("killall ssh-agent")
def setUp(self): """ Set up. """ self.iface = self.params.get("interface", default="") self.count = self.params.get("count", default="500") self.nping_count = self.params.get("nping_count", default="") self.peer_ip = self.params.get("peer_ip", default="") self.drop = self.params.get("drop_accepted", default="10") self.host_ip = self.params.get("host_ip", default="") self.option = self.params.get("option", default='') # Check if interface exists in the system interfaces = netifaces.interfaces() if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) if not self.peer_ip: self.cancel("peer ip should specify in input") self.ipaddr = self.params.get("host_ip", default="") self.netmask = self.params.get("netmask", default="") localhost = LocalHost() self.networkinterface = NetworkInterface(self.iface, localhost) try: self.networkinterface.add_ipaddr(self.ipaddr, self.netmask) self.networkinterface.save(self.ipaddr, self.netmask) except Exception: self.networkinterface.save(self.ipaddr, self.netmask) self.networkinterface.bring_up() if not wait.wait_for(self.networkinterface.is_link_up, timeout=120): self.cancel( "Link up of interface is taking longer than 120 seconds") self.peer_user = self.params.get("peer_user", default="root") self.peer_password = self.params.get("peer_password", '*', default="None") self.mtu = self.params.get("mtu", default=1500) remotehost = RemoteHost(self.peer_ip, self.peer_user, password=self.peer_password) self.peer_interface = remotehost.get_interface_by_ipaddr( self.peer_ip).name self.peer_networkinterface = NetworkInterface(self.peer_interface, remotehost) if self.peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") if self.networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in host") # Install needed packages smm = SoftwareManager() detected_distro = distro.detect() pkgs = ['tcpdump', 'flex', 'bison', 'gcc', 'gcc-c++', 'nmap'] for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package Can not install" % pkg) if detected_distro.name == "SuSE": self.nmap = os.path.join(self.teststmpdir, 'nmap') nmap_download = self.params.get("nmap_download", default="https:" "//nmap.org/dist/" "nmap-7.80.tar.bz2") tarball = self.fetch_asset(nmap_download) self.version = os.path.basename(tarball.split('.tar')[0]) self.n_map = os.path.join(self.nmap, self.version) archive.extract(tarball, self.nmap) os.chdir(self.n_map) process.system('./configure ppc64le', shell=True) build.make(self.n_map) process.system('./nping/nping -h', shell=True)
def clear_qos_setting(iface): error_context.context("Clear qos setting for ovs port '%s'" % iface, logging.info) clear_cmd = "ovs-vsctl clear Port %s qos" % iface process.system(clear_cmd) logging.info("Clear ovs command: %s", clear_cmd)
def run(test, params, env): """ Test various options of virt-v2v. """ if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') for v in list(params.values()): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) version_requried = params.get("version_requried") vm_name = params.get("main_vm", "EXAMPLE") new_vm_name = params.get("new_vm_name") input_mode = params.get("input_mode") input_file = params.get("input_file") v2v_options = params.get("v2v_options", "") hypervisor = params.get("hypervisor", "kvm") remote_host = params.get("remote_host", "EXAMPLE") vpx_dc = params.get("vpx_dc", "EXAMPLE") esx_ip = params.get("esx_ip", "EXAMPLE") source_user = params.get("username", "root") output_mode = params.get("output_mode") output_storage = params.get("output_storage", "default") disk_img = params.get("input_disk_image", "") nfs_storage = params.get("storage") no_root = 'yes' == params.get('no_root', 'no') mnt_point = params.get("mnt_point") export_domain_uuid = params.get("export_domain_uuid", "") fake_domain_uuid = params.get("fake_domain_uuid") vdsm_image_uuid = params.get("vdsm_image_uuid") vdsm_vol_uuid = params.get("vdsm_vol_uuid") vdsm_vm_uuid = params.get("vdsm_vm_uuid") vdsm_ovf_output = params.get("vdsm_ovf_output") v2v_user = params.get("unprivileged_user", "") v2v_timeout = int(params.get("v2v_timeout", 1200)) status_error = "yes" == params.get("status_error", "no") su_cmd = "su - %s -c " % v2v_user output_uri = params.get("oc_uri", "") pool_name = params.get("pool_name", "v2v_test") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "v2v_pool") emulated_img = params.get("emulated_image_path", "v2v-emulated-img") pvt = utlv.PoolVolumeTest(test, params) new_v2v_user = False address_cache = env.get('address_cache') params['vmcheck_flag'] = False checkpoint = params.get('checkpoint', '') error_flag = 'strict' estimate_file = '' def create_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Create libvirt pool as the output storage """ if output_uri == "qemu:///session" or user_pool: target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'mkdir -p %s'" % target_path process.system(cmd, verbose=True) # Sometimes pool_creat_as returns sucess, but the pool can # not be found in user session. virsh.pool_create_as(pool_name, 'dir', target_path, unprivileged_user=v2v_user, debug=True) res = virsh.pool_info(pool_name, unprivileged_user=v2v_user, debug=True) if res.exit_status != 0: return False else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) return True def cleanup_pool(user_pool=False, pool_name=pool_name, pool_target=pool_target): """ Clean up libvirt pool """ if output_uri == "qemu:///session" or user_pool: virsh.pool_destroy(pool_name, unprivileged_user=v2v_user, debug=True) target_path = os.path.join("/home", v2v_user, pool_target) cmd = su_cmd + "'rm -rf %s'" % target_path process.system(cmd, verbose=True) else: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) def get_all_uuids(output): """ Get export domain uuid, image uuid and vol uuid from command output. """ tmp_target = re.findall(r"qemu-img\s'convert'\s.+\s'(\S+)'\n", output) if len(tmp_target) < 1: test.error("Fail to find tmp target file name when converting vm" " disk image") targets = tmp_target[0].split('/') return (targets[3], targets[5], targets[6]) def get_ovf_content(output): """ Find and read ovf file. """ export_domain_uuid, _, vol_uuid = get_all_uuids(output) export_vm_dir = os.path.join(mnt_point, export_domain_uuid, 'master/vms') ovf_content = "" if os.path.isdir(export_vm_dir): ovf_id = "ovf:id='%s'" % vol_uuid ret = to_text( process.system_output("grep -R \"%s\" %s" % (ovf_id, export_vm_dir))) ovf_file = ret.split(":")[0] if os.path.isfile(ovf_file): ovf_f = open(ovf_file, "r") ovf_content = ovf_f.read() ovf_f.close() else: logging.error("Can't find ovf file to read") return ovf_content def get_img_path(output): """ Get the full path of the converted image. """ img_name = vm_name + "-sda" if output_mode == "libvirt": img_path = virsh.vol_path(img_name, output_storage).stdout.strip() elif output_mode == "local": img_path = os.path.join(output_storage, img_name) elif output_mode in ["rhev", "vdsm"]: export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output) img_path = os.path.join(mnt_point, export_domain_uuid, 'images', image_uuid, vol_uuid) return img_path def check_vmtype(ovf, expected_vmtype): """ Verify vmtype in ovf file. """ if output_mode != "rhev": return if expected_vmtype == "server": vmtype_int = 1 elif expected_vmtype == "desktop": vmtype_int = 0 else: return if "<VmType>%s</VmType>" % vmtype_int in ovf: logging.info("Find VmType=%s in ovf file", expected_vmtype) else: test.fail("VmType check failed") def check_image(img_path, check_point, expected_value): """ Verify image file allocation mode and format """ if not img_path or not os.path.isfile(img_path): test.error("Image path: '%s' is invalid" % img_path) img_info = utils_misc.get_image_info(img_path) logging.debug("Image info: %s", img_info) if check_point == "allocation": if expected_value == "sparse": if img_info['vsize'] > img_info['dsize']: logging.info("%s is a sparse image", img_path) else: test.fail("%s is not a sparse image" % img_path) elif expected_value == "preallocated": if img_info['vsize'] <= img_info['dsize']: logging.info("%s is a preallocated image", img_path) else: test.fail("%s is not a preallocated image" % img_path) if check_point == "format": if expected_value == img_info['format']: logging.info("%s format is %s", img_path, expected_value) else: test.fail("%s format is not %s" % (img_path, expected_value)) def check_new_name(output, expected_name): """ Verify guest name changed to the new name. """ found = False if output_mode == "libvirt": found = virsh.domain_exists(expected_name) if output_mode == "local": found = os.path.isfile( os.path.join(output_storage, expected_name + "-sda")) if output_mode in ["rhev", "vdsm"]: ovf = get_ovf_content(output) found = "<Name>%s</Name>" % expected_name in ovf else: return if found: logging.info("Guest name renamed when converting it") else: test.fail("Rename guest failed") def check_nocopy(output): """ Verify no image created if convert command use --no-copy option """ img_path = get_img_path(output) if not os.path.isfile(img_path): logging.info("No image created with --no-copy option") else: test.fail("Find %s" % img_path) def check_connection(output, expected_uri): """ Check output connection uri used when converting guest """ init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri if init_msg in output: logging.info("Find message: %s", init_msg) else: test.fail("Not find message: %s" % init_msg) def check_ovf_snapshot_id(ovf_content): """ Check if snapshot id in ovf file consists of '0's """ search = re.search("ovf:vm_snapshot_id='(.*?)'", ovf_content) if search: snapshot_id = search.group(1) logging.debug('vm_snapshot_id = %s', snapshot_id) if snapshot_id.count('0') >= 32: test.fail('vm_snapshot_id consists with "0"') else: test.fail('Fail to find snapshot_id') def check_source(output): """ Check if --print-source option print the correct info """ # Parse source info source = output.split('\n')[2:] for i in range(len(source)): if source[i].startswith('\t'): source[i - 1] += source[i] source[i] = '' source_strip = [x.strip() for x in source if x.strip()] source_info = {} for line in source_strip: source_info[line.split(':')[0]] = line.split(':', 1)[1].strip() logging.debug('Source info to check: %s', source_info) checklist = [ 'nr vCPUs', 'hypervisor type', 'source name', 'memory', 'disks', 'NICs' ] if hypervisor in ['kvm', 'xen']: checklist.extend(['display', 'CPU features']) for key in checklist: if key not in source_info: test.fail('%s info missing' % key) v2v_virsh = None close_virsh = False if hypervisor == 'kvm': v2v_virsh = virsh else: virsh_dargs = { 'uri': ic_uri, 'remote_ip': remote_host, 'remote_user': source_user, 'remote_pwd': source_pwd, 'auto_close': True, 'debug': True } v2v_virsh = virsh.VirshPersistent(**virsh_dargs) logging.debug('a new virsh session %s was created', v2v_virsh) close_virsh = True # Check single values fail = [] try: xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=v2v_virsh) finally: if close_virsh: logging.debug('virsh session %s is closing', v2v_virsh) v2v_virsh.close_session() check_map = {} check_map['nr vCPUs'] = xml.vcpu check_map['hypervisor type'] = xml.hypervisor_type check_map['source name'] = xml.vm_name check_map['memory'] = str(int(xml.max_mem) * 1024) + ' (bytes)' if hypervisor in ['kvm', 'xen']: check_map['display'] = xml.get_graphics_devices()[0].type_name logging.info('KEY:\tSOURCE<-> XML') for key in check_map: logging.info('%-15s:%18s <-> %s', key, source_info[key], check_map[key]) if str(check_map[key]) not in source_info[key]: fail.append(key) # Check disk info disk = list(xml.get_disk_all().values())[0] def _get_disk_subelement_attr_value(obj, attr, subattr): if obj.find(attr) is not None: return obj.find(attr).get(subattr) bus = _get_disk_subelement_attr_value(disk, 'target', 'bus') driver_type = _get_disk_subelement_attr_value(disk, 'driver', 'type') path = _get_disk_subelement_attr_value(disk, 'source', 'file') # For esx, disk output is like "disks: json: { ... } (raw) [scsi]" # For xen, disk output is like "disks: json: { ... } [ide]" # For kvm, disk output is like "/rhel8.0-2.qcow2 (qcow2) [virtio-blk]" if hypervisor == 'kvm': disks_info_pattern = r"%s \(%s\) \[%s" % (path, driver_type, bus) elif hypervisor == 'esx': # replace '.vmdk' with '-flat.vmdk', this is done in v2v path_pattern1 = path.split()[1].replace('.vmdk', '-flat.vmdk') # In newer qemu version, '_' is replaced with '%5f'. path_pattern2 = path_pattern1.replace('_', '%5f') # nbd:unix:/tmp/v2vnbdkit.u44G6C/nbdkit1.sock:exportname=/ (raw) # [scsi] path_pattern_nbd = r'nbd:unix:/.*? \(raw\) \[%s\]' % bus # For esx, '(raw)' is fixed? Let's see if others will be met. disks_info_pattern = '|'.join([path_pattern_nbd] + [ r"https://%s/folder/%s\?dcPath=data&dsName=esx.*} \(raw\) \[%s" % (remote_host, i, bus) for i in [path_pattern1, path_pattern2] ]) elif hypervisor == 'xen': disks_info_pattern = "file\.path.*%s.*file\.host.*%s.* \[%s" % ( path, remote_host, bus) source_disks = source_info['disks'].split() logging.info('disks:%s<->%s', source_info['disks'], disks_info_pattern) if not re.search(disks_info_pattern, source_info['disks']): fail.append('disks') # Check nic info nic = list(xml.get_iface_all().values())[0] type = nic.get('type') mac = nic.find('mac').get('address') nic_source = nic.find('source') name = nic_source.get(type) nic_info = '%s "%s" mac: %s' % (type, name, mac) logging.info('NICs:%s<->%s', source_info['NICs'], nic_info) if nic_info.lower() not in source_info['NICs'].lower(): fail.append('NICs') # Check cpu features if hypervisor in ['kvm', 'xen']: feature_list = xml.features.get_feature_list() logging.info('CPU features:%s<->%s', source_info['CPU features'], feature_list) if sorted(source_info['CPU features'].split(',')) != sorted( feature_list): fail.append('CPU features') if fail: test.fail('Source info not correct for: %s' % fail) def check_man_page(in_man, not_in_man): """ Check if content of man page or help info meets expectation """ man_page = process.run('man virt-v2v', verbose=False).stdout_text.strip() if in_man: logging.info('Checking man page of virt-v2v for "%s"', in_man) if in_man not in man_page: test.fail('"%s" not in man page' % in_man) if not_in_man: logging.info('Checking man page of virt-v2v for "%s"', not_in_man) if not_in_man in man_page: test.fail('"%s" not removed from man page' % not_in_man) def check_print_estimate(estimate_file): """ Check disk size and total size in file of estimate created by v2v """ import json content = None buf = '' with open(estimate_file) as fp: all_content = fp.read() fp.seek(0) for line in fp: buf += line if '}' not in line: continue if 'disks' in buf and 'total' in buf: content = json.loads(buf) break buf = '' logging.debug('json file content:\n%s' % all_content) if not content or sum(content['disks']) != content['total']: test.fail("The disks' size doesn't same as total value") def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utils_v2v.check_exit_status(result, status_error, error_flag) output = to_text(result.stdout + result.stderr, errors=error_flag) output_stdout = to_text(result.stdout, errors=error_flag) if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False # 76 is the max length in v2v if v2v_start and len(line) > 76: test.fail('Error log longer than 76 charactors: %s' % line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: test.fail('Disk exists for vm %s' % vm_name) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): test.fail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet' and not no_root: expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): test.fail("Import VM failed") else: vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint in ['vmx', 'vmx_ssh']: vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'quiet': if len(output.strip().splitlines()) > 10: test.fail('Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: test.fail('libguestfs-winsupport not in dependency') if all(pkg_pattern not in output for pkg_pattern in ['VMF', 'edk2-ovmf']): test.fail('OVMF/AAVMF not in dependency') if 'qemu-kvm-rhev' in output: test.fail('qemu-kvm-rhev is in dependency') if 'libX11' in output: test.fail('libX11 is in dependency') if 'kernel-rt' in output: test.fail('kernel-rt is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: test.fail('Command "%s" success' % command % win_img) if checkpoint == 'no_dcpath': if '--dcpath' in output: test.fail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: test.fail('Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: test.fail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in output_stdout.lower(): test.fail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(output_stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): # Checking items in example_file exist in latest # output regardless of the orders and new items. with open(params['example_file']) as f: for line in f: if line.strip() not in output_stdout.strip(): test.fail( '%s not in --machine-readable output' % line.strip()) else: test.error('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) qemu_img_cmd = 'qemu-img check %s' % img_path qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support( ) if qemu_img_locking_feature_support: qemu_img_cmd = 'qemu-img check %s -U' % img_path disk_check = process.run(qemu_img_cmd).stdout_text logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: test.fail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if msg_content in messages: test.fail('Found "%s" in /var/log/messages' % msg_content) if checkpoint == 'print_estimate_tofile': check_print_estimate(estimate_file) log_check = utils_v2v.check_log(params, output) if log_check: test.fail(log_check) check_man_page(params.get('in_man'), params.get('not_in_man')) backup_xml = None vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "") try: if version_requried and not utils_v2v.multiple_versions_compare( version_requried): test.cancel("Testing requries version: %s" % version_requried) if hypervisor == "xen": # See man virt-v2v-input-xen(1) process.run('update-crypto-policies --set LEGACY', verbose=True, ignore_status=True, shell=True) if checkpoint.startswith('empty_nic_source'): xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface = xml.get_devices('interface')[0] disks = xml.get_devices('disk') del iface.source iface.type_name = checkpoint.split('_')[-1] iface.source = {iface.type_name: ''} params[checkpoint] = [iface.type_name, iface.mac_address] logging.debug(iface.source) devices = vm_xml.VMXMLDevices() devices.extend(disks) devices.append(iface) xml.set_devices(devices) logging.info(xml.xmltreefile) params['input_xml'] = xml.xmltreefile.name # Build input options input_option = "" if input_mode is None: pass elif input_mode == "libvirt": uri_obj = utils_v2v.Uri(hypervisor) ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip) # Remote libvirt connection is not offically supported by # v2v and may fail. Just use localhost to simulate a remote # connection to test the warnings. if checkpoint == 'remote_libvirt_conn': ic_uri = 'qemu+ssh://localhost/system' input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name) if checkpoint == 'with_ic': ic_uri = 'qemu:///session' input_option = "-i ova %s -ic %s -of qcow2" % (input_file, ic_uri) if checkpoint == 'without_ic': input_option = "-i ova %s -of raw" % input_file # Build network&bridge option to avoid network error v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) elif input_mode == "disk": input_option += "-i %s %s" % (input_mode, disk_img) elif input_mode == 'libvirtxml': input_xml = params.get('input_xml') input_option += '-i %s %s' % (input_mode, input_xml) elif input_mode in ['ova']: test.cancel("Unsupported input mode: %s" % input_mode) else: test.error("Unknown input mode %s" % input_mode) input_format = params.get("input_format", "") input_allo_mode = params.get("input_allo_mode") if input_format: input_option += " -if %s" % input_format if not status_error: logging.info("Check image before convert") check_image(disk_img, "format", input_format) if input_allo_mode: check_image(disk_img, "allocation", input_allo_mode) # Build output options output_option = "" if output_mode: output_option = "-o %s" % output_mode if output_mode != 'null': output_option += " -os %s" % output_storage if checkpoint == 'rhv': output_option = output_option.replace('rhev', 'rhv') if checkpoint in ['with_ic', 'without_ic']: output_option = output_option.replace('v2v_dir', 'src_pool') output_format = params.get("output_format") if output_format and output_format != input_format: output_option += " -of %s" % output_format output_allo_mode = params.get("output_allo_mode") if output_allo_mode: output_option += " -oa %s" % output_allo_mode # Build vdsm related options if output_mode in ['vdsm', 'rhev']: if not os.path.isdir(mnt_point): os.mkdir(mnt_point) if not utils_misc.mount(nfs_storage, mnt_point, "nfs"): test.error("Mount NFS Failed") if output_mode == 'vdsm': v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid) vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid, "images", vdsm_image_uuid) vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid, "master/vms", vdsm_vm_uuid) # For vdsm_domain_dir, just create a dir to test BZ#1176591 os.makedirs(vdsm_domain_dir) os.makedirs(vdsm_image_dir) os.makedirs(vdsm_vm_dir) if output_mode == 'rhev': # create different sasl_user name for different job params.update({ 'sasl_user': params.get("sasl_user") + utils_misc.generate_random_string(3) }) logging.info('sals user name is %s' % params.get("sasl_user")) user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) logging.debug('A SASL session %s was created', v2v_sasl) # Output more messages except quiet mode if checkpoint == 'quiet': v2v_options += ' -q' elif checkpoint not in [ 'length_of_error', 'empty_nic_source_network', 'empty_nic_source_bridge', 'machine_readable' ]: v2v_options += " -v -x" # Prepare for libvirt unprivileged user session connection if "qemu:///session" in v2v_options or no_root: try: pwd.getpwnam(v2v_user) except KeyError: # create new user process.system("useradd %s" % v2v_user, ignore_status=True) new_v2v_user = True user_info = pwd.getpwnam(v2v_user) logging.info("Convert to qemu:///session by user '%s'", v2v_user) if input_mode == "disk": # Copy image from souce and change the image owner and group disk_path = os.path.join(data_dir.get_tmp_dir(), os.path.basename(disk_img)) logging.info('Copy image file %s to %s', disk_img, disk_path) shutil.copyfile(disk_img, disk_path) input_option = input_option.replace(disk_img, disk_path) os.chown(disk_path, user_info.pw_uid, user_info.pw_gid) elif not no_root: test.cancel("Only support convert local disk") # Setup ssh-agent access to xen hypervisor if hypervisor == 'xen': user = params.get("xen_host_user", "root") source_pwd = passwd = params.get("xen_host_passwd", "redhat") logging.info("set up ssh-agent access ") xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key( remote_host, user, passwd, auto_close=False) utils_misc.add_identities_into_ssh_agent() # Check if xen guest exists uri = utils_v2v.Uri(hypervisor).get_uri(remote_host) if not virsh.domain_exists(vm_name, uri=uri): logging.error('VM %s not exists', vm_name) # If the input format is not define, we need to either define # the original format in the source metadata(xml) or use '-of' # to force the output format, see BZ#1141723 for detail. if '-of' not in v2v_options and checkpoint != 'xen_no_output_format': v2v_options += ' -of %s' % params.get("default_output_format", "qcow2") # Create password file for access to ESX hypervisor if hypervisor == 'esx': source_pwd = vpx_passwd = params.get("vpx_password") vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(), "vpx_passwd") logging.info("Building ESX no password interactive verification.") pwd_f = open(vpx_passwd_file, 'w') pwd_f.write(vpx_passwd) pwd_f.close() output_option += " -ip %s" % vpx_passwd_file # rhel8 slow stream doesn't support option 'ip' temporarily # so use option 'password-file' instead. if not utils_v2v.v2v_supported_option("-ip <filename>"): output_option = output_option.replace('-ip', '--password-file', 1) # if don't specify any output option for virt-v2v, 'default' pool # will be used. if output_mode is None: # Cleanup first to avoid failure if 'default' pool exists. pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img) # Create libvirt dir pool if output_mode == "libvirt": utils_misc.wait_for(create_pool, timeout=30, step=3) # Work around till bug fixed os.environ['LIBGUESTFS_BACKEND'] = 'direct' if checkpoint in ['with_ic', 'without_ic']: new_v2v_user = True v2v_options += ' -on %s' % new_vm_name utils_misc.wait_for( lambda: create_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool'), timeout=30, step=3) if checkpoint == 'vmx': mount_point = params.get('mount_point') if not os.path.isdir(mount_point): os.mkdir(mount_point) nfs_vmx = params.get('nfs_vmx') if not utils_misc.mount(nfs_vmx, mount_point, 'nfs', verbose=True): test.error('Mount nfs for vmx failed') vmx = params.get('vmx') input_option = '-i vmx %s' % vmx v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) if checkpoint == 'vmx_ssh': esx_user = params.get("esx_host_user", "root") esx_pwd = params.get("esx_host_passwd") vmx = params.get('vmx') esx_pubkey, esx_session = utils_v2v.v2v_setup_ssh_key( esx_ip, esx_user, esx_pwd, server_type='esx', auto_close=False) utils_misc.add_identities_into_ssh_agent() input_option = '-i vmx -it ssh %s' % vmx v2v_options += " -b %s -n %s" % (params.get("output_bridge"), params.get("output_network")) if checkpoint == 'simulate_nfs': simulate_images = params.get("simu_images_path") simulate_vms = params.get("simu_vms_path") simulate_dom_md = params.get("simu_dom_md_path") os.makedirs(simulate_images) os.makedirs(simulate_vms) process.run('touch %s' % simulate_dom_md) process.run('chmod -R 777 /tmp/rhv/') if checkpoint == 'print_estimate_tofile': estimate_file = utils_misc.generate_tmp_file_name( 'v2v_print_estimate') v2v_options += " --machine-readable=file:%s" % estimate_file if checkpoint == 'remote_libvirt_conn': # Add localhost to known_hosts cmd = 'ssh-keyscan -t ecdsa localhost >> ~/.ssh/known_hosts' process.run(cmd, shell=True) # Setup remote login without password public_key = ssh_key.get_public_key().rstrip() cmd = 'echo "%s" >> ~/.ssh/authorized_keys' % public_key process.run(cmd, shell=True) # Running virt-v2v command cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option, v2v_options) if v2v_user: cmd_export_env = 'export LIBGUESTFS_BACKEND=direct' cmd = "%s '%s;%s'" % (su_cmd, cmd_export_env, cmd) if params.get('cmd_free') == 'yes': cmd = params.get('check_command') # only set error to 'ignore' to avoid exception for RHEL7-84978 if "guestfish" in cmd: error_flag = "replace" # Set timeout to kill v2v process before conversion succeed if checkpoint == 'disk_not_exist': v2v_timeout = 30 # Get tail content of /var/log/messages if checkpoint == 'tail_log': params['tail_log'] = os.path.join(data_dir.get_tmp_dir(), 'tail_log') params['tail'] = aexpect.Tail(command='tail -f /var/log/messages', output_func=utils_misc.log_line, output_params=(params['tail_log'], )) cmd_result = process.run(cmd, timeout=v2v_timeout, verbose=True, ignore_status=True) if new_vm_name: vm_name = new_vm_name params['main_vm'] = new_vm_name check_result(cmd, cmd_result, status_error) finally: if hypervisor == "esx": process.run("rm -rf %s" % vpx_passwd_file) for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]: if os.path.exists(vdsm_dir): shutil.rmtree(vdsm_dir) if os.path.exists(mnt_point): utils_misc.umount(nfs_storage, mnt_point, "nfs") os.rmdir(mnt_point) if output_mode == "local": image_name = vm_name + "-sda" img_file = os.path.join(output_storage, image_name) xml_file = img_file + ".xml" for local_file in [img_file, xml_file]: if os.path.exists(local_file): os.remove(local_file) if output_mode == "libvirt": if "qemu:///session" in v2v_options or no_root: cmd = su_cmd + "'virsh undefine %s'" % vm_name try: process.system(cmd) except Exception: logging.error('Undefine "%s" failed', vm_name) if no_root: cleanup_pool(user_pool=True, pool_name='src_pool', pool_target='v2v_src_pool') else: virsh.remove_domain(vm_name) cleanup_pool() if output_mode is None: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img) vmcheck_flag = params.get("vmcheck_flag") if vmcheck_flag and params.get('vmchecker'): params['vmchecker'].cleanup() if new_v2v_user: process.system("userdel -fr %s" % v2v_user) if backup_xml: backup_xml.sync() if output_mode == 'rhev' and v2v_sasl: v2v_sasl.cleanup() logging.debug('SASL session %s is closing', v2v_sasl) v2v_sasl.close_session() if checkpoint == 'vmx': utils_misc.umount(params['nfs_vmx'], params['mount_point'], 'nfs') os.rmdir(params['mount_point']) if checkpoint == 'vmx_ssh': utils_v2v.v2v_setup_ssh_key_cleanup(esx_session, esx_pubkey, 'esx') process.run("ssh-agent -k") if checkpoint == 'simulate_nfs': process.run('rm -rf /tmp/rhv/') if os.path.exists(estimate_file): os.remove(estimate_file) if hypervisor == "xen": # Restore crypto-policies to DEFAULT, the setting is impossible to be # other values by default in testing envrionment. process.run('update-crypto-policies --set DEFAULT', verbose=True, ignore_status=True, shell=True) utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey) process.run("ssh-agent -k") if checkpoint == 'remote_libvirt_conn': cmd = r"sed -i '/localhost/d' ~/.ssh/known_hosts" process.run(cmd, shell=True, ignore_status=True) if locals().get('public_key'): key = public_key.rstrip().split()[1].split('/')[0] cmd = r"sed -i '/%s/d' ~/.ssh/authorized_keys" % key process.run(cmd, shell=True, ignore_status=True)
def test(self): os.chdir(self.srcdir) process.system('./run_tests THR')
def trace_kvm_pio(): """ trace event kvm_pio """ process.system(trace_record_cmd)
def commit_test(cmd): """ Subcommand 'qemu-img commit' test. 1) Create a overlay file of the qemu harddisk specified by image_name. 2) Start a VM using the overlay file as its harddisk. 3) Touch a file "commit_testfile" in the overlay file, and shutdown the VM. 4) Commit the change to the backing harddisk by executing "qemu-img commit" command. 5) Start the VM using the backing harddisk. 6) Check if the file "commit_testfile" exists. :param cmd: qemu-img base command. """ logging.info("Commit testing started!") base_image_name = storage.get_image_filename(params, data_dir.get_data_dir()) pre_name = '.'.join(image_name.split('.')[:-1]) base_image_format = params.get("image_format", "qcow2") overlay_file_name = "%s_overlay.qcow2" % pre_name file_create_cmd = params.get("file_create_cmd", "touch /commit_testfile") file_info_cmd = params.get("file_info_cmd", "ls / | grep commit_testfile") file_exist_chk_cmd = params.get("file_exist_chk_cmd", "[ -e /commit_testfile ] && echo $?") file_del_cmd = params.get("file_del_cmd", "rm -f /commit_testfile") try: # Remove the existing overlay file if os.path.isfile(overlay_file_name): remove(overlay_file_name) # Create the new overlay file create_cmd = "%s create -b %s -F %s -f qcow2 %s" % ( cmd, base_image_name, base_image_format, overlay_file_name) msg = "Create overlay file by command: %s" % create_cmd error_context.context(msg, logging.info) try: process.system(create_cmd, verbose=False) except process.CmdError: test.fail("Could not create a overlay file!") logging.info("overlay file (%s) created!", overlay_file_name) # Set the qemu harddisk to the overlay file logging.info("Original image_name is: %s", params.get('image_name')) params['image_name'] = '.'.join(overlay_file_name.split('.')[:-1]) logging.info("Param image_name changed to: %s", params.get('image_name')) msg = "Start a new VM, using overlay file as its harddisk" error_context.context(msg, logging.info) vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) # Do some changes to the overlay_file harddisk try: output = session.cmd(file_create_cmd) logging.info("Output of %s: %s", file_create_cmd, output) output = session.cmd(file_info_cmd) logging.info("Output of %s: %s", file_info_cmd, output) except Exception as err: test.fail("Could not create commit_testfile in the " "overlay file %s" % err) vm.destroy() # Execute the commit command cmitcmd = "%s commit -f %s %s" % (cmd, image_format, overlay_file_name) error_context.context("Committing image by command %s" % cmitcmd, logging.info) try: process.system(cmitcmd, verbose=False) except process.CmdError: test.fail("Could not commit the overlay file") logging.info("overlay file (%s) committed!", overlay_file_name) msg = "Start a new VM, using image_name as its harddisk" error_context.context(msg, logging.info) params['image_name'] = pre_name vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) try: output = session.cmd(file_exist_chk_cmd) logging.info("Output of %s: %s", file_exist_chk_cmd, output) session.cmd(file_del_cmd) except Exception: test.fail("Could not find commit_testfile after a commit") vm.destroy() finally: # Remove the overlay file if os.path.isfile(overlay_file_name): remove(overlay_file_name)
def run(test, params, env): """ 1) Boot up vm with one or more rng devices 2) Read from /dev/random in host (optional) 3) Run random read in guest 4) Switch rng device if there is more than on rng devices 5) Run random read in guest 6) Read from /dev/random in host (optional) 7) Clean random read in host (optional) """ def get_rng_list(vm): """ Get attached rng devices from device dictionary """ rng_list = [] for device in vm.devices: if isinstance(device, qdevices.QDevice): if device.get_param("driver") == "virtio-rng-pci": rng_list.append(device) return rng_list def get_available_rng(session): """ Get available rng devices from /sys/devices """ verify_cmd = params["driver_available_cmd"] try: output = session.cmd_output_safe(verify_cmd) rng_devices = re.findall(r"virtio_rng.\d+", output) except aexpect.ShellTimeoutError: err = "%s timeout, pls check if it's a product bug" % verify_cmd test.fail(err) return rng_devices login_timeout = int(params.get("login_timeout", 360)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) sub_test = params.get("sub_test") if params.get("pre_cmd"): error_context.context("Fetch data from host", logging.info) process.system(params.get("pre_cmd"), shell=True) error_context.context("Read rng device in guest", logging.info) utils_test.run_virt_sub_test(test, params, env, sub_test) if params.get("os_type") == "linux": error_context.context("Query virtio rng device in guest", logging.info) rng_devices = get_available_rng(session) rng_attached = get_rng_list(vm) if len(rng_devices) != len(rng_attached): test.fail("The devices get from rng_arriable" " don't match the rng devices attached") if len(rng_devices) > 1: for rng_device in rng_devices: error_context.context( "Change virtio rng device to %s" % rng_device, logging.info) session.cmd_status(params.get("switch_rng_cmd") % rng_device) error_context.context("Read from %s in guest" % rng_device, logging.info) utils_test.run_virt_sub_test(test, params, env, sub_test) if params.get("post_cmd"): end_time = time.time() + 20 while time.time() < end_time: s = process.system(params.get("post_cmd"), ignore_status=params.get( "ignore_status", "False"), shell=True) if s == 0: break
def setUp(self): # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() deps = ['gcc', 'make', 'patch'] if detected_distro.name == "Ubuntu": deps += ['libpthread-stubs0-dev', 'git'] elif detected_distro.name == "SuSE": deps += ['glibc-devel-static', 'git-core'] else: deps += ['glibc-static', 'git'] for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel(' %s is needed for the test to be run' % package) kernel.check_version("2.6.16") if detected_distro.name == "Ubuntu": out = glob.glob("/usr/lib/*/libpthread.a") else: out = glob.glob("/usr/lib*/libpthread.a") if not out: self.cancel("libpthread.a is required!!!" "\nTry installing glibc-static") page_sizes = memory.get_supported_huge_pages_size() self.page_sizes = [str(each // 1024) for each in page_sizes] # Get arguments: pages_requested = self.params.get('pages_requested', default=20) # Check hugepages: pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): hugepages_support = genio.read_file("/proc/meminfo").rstrip("\n") if 'HugePages_' not in hugepages_support: self.cancel("No Hugepages Configured") else: self.cancel("Kernel does not support hugepages") self.configured_page_sizes = [] self.hugetlbfs_dir = {} for hp_size in self.page_sizes: try: genio.write_file( '/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % str(int(hp_size) * 1024), str(pages_requested)) except OSError: if (int(hp_size) * 1024) == 16777216: self.log.warn('Running 16GB hugepages') else: self.cancel('Writing to hugepage file failed') pages_available = int(genio.read_file( '/sys/kernel/mm/hugepages/huge' 'pages-%skB/nr_hugepages' % str(int(hp_size) * 1024).strip())) if pages_available < pages_requested: self.log.warn('%d pages available, < %d pages ' 'requested', pages_available, pages_requested) if pages_available: self.hugetlbfs_dir.update( {hp_size: tempfile.mkdtemp(dir=self.teststmpdir, prefix='avocado_' + __name__)}) if process.system('mount -t hugetlbfs -o pagesize=%sM none %s' % (hp_size, self.hugetlbfs_dir[hp_size]), sudo=True, ignore_status=True): self.cancel("hugetlbfs mount failed") self.configured_page_sizes.append(hp_size) if not self.configured_page_sizes: self.cancel("No hugepage size configured") git.get_repo('https://github.com/libhugetlbfs/libhugetlbfs.git', destination_dir=self.workdir) os.chdir(self.workdir) patch = self.params.get('patch', default='elflink.patch') process.run('patch -p1 < %s' % self.get_data(patch), shell=True) build.make(self.workdir, extra_args='BUILDTYPE=NATIVEONLY')
def run(test, params, env): """ Verify the "-debugcon" parameter under the UEFI environment: 1) Boot up a guest. If params["ovmf_log"] is not None, append debugcon parameter to qemu command lines. 2) Remove the existing isa-log device. 3) Destroy the guest. 4) Start the trace command on host. 5) Re-create the guest and verify it is alive. 6) Destroy the guest. 7) Check pio_read counts and pio_write counts. 7.1) If disable debugcon: pio_read_counts > 0 pio_write_counts = 0 7.2) If enable debugcon: pio_read_counts > 0 pio_write_counts > 0 :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_trace_process(): """ check whether trace process is existing """ if process.system(params["grep_trace_cmd"], ignore_status=True, shell=True): return False else: return True def remove_isa_debugcon(vm): """ remove the existing isa-log device """ for device in vm.devices: if device.type == "isa-log": vm.devices.remove(device) break env.register_vm(vm.name, vm) def trace_kvm_pio(): """ trace event kvm_pio """ process.system(trace_record_cmd) # install trace-cmd in host utils_package.package_install("trace-cmd") if params.get("ovmf_log"): error_context.context( "Append debugcon parameter to " "qemu command lines.", logging.info) ovmf_log = utils_misc.get_path(test.debugdir, params["ovmf_log"]) params["extra_params"] %= ovmf_log params["start_vm"] = "yes" env_process.process(test, params, env, env_process.preprocess_image, env_process.preprocess_vm) trace_output_file = utils_misc.get_path(test.debugdir, params["trace_output"]) trace_record_cmd = params["trace_record_cmd"] % trace_output_file check_pio_read = params["check_pio_read"] % trace_output_file check_pio_write = params["check_pio_write"] % trace_output_file stop_trace_record = params["stop_trace_record"] timeout = int(params.get("timeout", 120)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.context("Remove the existing isa-log device.", logging.info) remove_isa_debugcon(vm) vm.destroy() error_context.context("Run trace record command on host.", logging.info) bg = utils_test.BackgroundTest(trace_kvm_pio, ()) bg.start() if not utils_misc.wait_for(lambda: bg.is_alive, timeout): test.fail("Failed to start command: '%s'" % trace_record_cmd) try: vm.create() vm.verify_alive() vm.destroy() process.system(stop_trace_record, ignore_status=True, shell=True) if not utils_misc.wait_for(lambda: not check_trace_process(), timeout, 30, 3): test.fail("Failed to stop command: '%s' after %s seconds." % (stop_trace_record, timeout)) pio_read_counts = int( process.run(check_pio_read, shell=True).stdout.decode().strip()) err_str = "pio_read counts should be greater than 0. " err_str += "But the actual counts are %s." % pio_read_counts test.assertGreater(pio_read_counts, 0, err_str) pio_write_counts = int( process.run(check_pio_write, shell=True).stdout.decode().strip()) if params.get("ovmf_log"): err_str = "pio_write counts should be greater than 0. " err_str += "But the actual counts are %s." % pio_write_counts test.assertGreater(pio_write_counts, 0, err_str) else: err_str = "pio_write counts should be equal to 0. " err_str += "But the actual counts are %s." % pio_write_counts test.assertEqual(pio_write_counts, 0, err_str) finally: if check_trace_process(): process.system(stop_trace_record, ignore_status=True, shell=True)
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") expect_msg = params.get("net_define_undefine_err_msg") # define multi ip/dhcp sections in network multi_ip = "yes" == params.get("multi_ip", "no") netmask = params.get("netmask") prefix_v6 = params.get("prefix_v6") single_v6_range = "yes" == params.get("single_v6_range", "no") # Get 2nd ipv4 dhcp range dhcp_ranges_start = params.get("dhcp_ranges_start", None) dhcp_ranges_end = params.get("dhcp_ranges_end", None) # Get 2 groups of ipv6 ip address and dhcp section address_v6_1 = params.get("address_v6_1") dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None) dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None) address_v6_2 = params.get("address_v6_2") dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None) dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None) # Edit net xml forward/ip part then define/start to check invalid setting edit_xml = "yes" == params.get("edit_xml", "no") address_v4 = params.get("address_v4") nat_port_start = params.get("nat_port_start") nat_port_end = params.get("nat_port_end") test_port = "yes" == params.get("test_port", "no") loop = int(params.get("loop", 1)) # Get params about creating a bridge bridge = params.get('bridge', None) create_bridge = "yes" == params.get('create_bridge', 'no') ovs_bridge = "yes" == params.get('ovs_bridge', 'no') iface_name = utils_net.get_net_if(state="UP")[0] # Get params about creating a network create_netxml = "yes" == params.get("create_netxml", "no") domain = params.get('domain', None) forward = params.get("forward", None) net_dns_txt = params.get("net_dns_txt", None) net_bandwidth_inbound = params.get("net_bandwidth_inbound", None) net_bandwidth_outbound = params.get("net_bandwidth_outbound", None) mac = params.get("mac") # Edit the created network xml to get the xml to be tested del_mac = "yes" == params.get('del_mac', 'no') del_ip = "yes" == params.get('del_ip', 'no') add_dev = "yes" == params.get('add_dev', 'no') virtualport = 'yes' == params.get("virtualport", "no") virtualport_type = params.get("virtualport_type") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) logging.debug("Get network xml as testnet_xml: %s" % testnet_xml) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) if params.get('net_define_undefine_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': False, 'ignore_status': True, 'readonly': True } try: if edit_xml: ipxml_v4 = network_xml.IPXML() ipxml_v4.address = address_v4 ipxml_v4.netmask = netmask range_4 = network_xml.RangeXML() range_4.attrs = { "start": dhcp_ranges_start, "end": dhcp_ranges_end } ipxml_v4.dhcp_ranges = range_4 testnet_xml.del_ip() testnet_xml.set_ip(ipxml_v4) if test_port: nat_port = {"start": nat_port_start, "end": nat_port_end} testnet_xml.nat_port = nat_port testnet_xml.debug_xml() if multi_ip: # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = process.run(sysctl_cmd + ' -n').stdout_text if original_accept_ra != '2': process.system(sysctl_cmd + '=2') # add another ipv4 address and dhcp range set_ip_section(testnet_xml, address_v4, ipv6=False, netmask=netmask, dhcp_ranges_start=dhcp_ranges_start, dhcp_ranges_end=dhcp_ranges_end) # add ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_1, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_1, dhcp_ranges_end=dhcp_ranges_v6_end_1) # 2nd ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_2, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_2, dhcp_ranges_end=dhcp_ranges_v6_end_2) if create_netxml: net_dict = { 'del_nat_attrs': True, 'del_ip': del_ip, 'dns_txt': net_dns_txt, 'domain': domain, 'bridge': bridge, 'forward': forward, 'interface_dev': iface_name, 'virtualport': virtualport, 'virtualport_type': virtualport_type, 'mac': mac, 'net_bandwidth_inbound': net_bandwidth_inbound, 'net_bandwidth_outbound': net_bandwidth_outbound } logging.debug("net_dict is %s" % net_dict) testnet_xml = libvirt_network.modify_network_xml( net_dict, testnet_xml) testnet_xml.debug_xml() if create_bridge: if ovs_bridge: utils_net.create_ovs_bridge(bridge, ignore_status=False) else: utils_net.create_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Run test case while loop: try: define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states after define if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined network: %s" % str(net_state)) if define_status == 1 and status_error and expect_msg: logging.debug("check result is %s, expect_msg is %s" % (define_result, expect_msg)) libvirt.check_result(define_result, expect_msg.split(';')) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append( "Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append( "Found wrong network states for " "defined network: %s" % str(net_state)) # Check network states after start if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started network: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append( "Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append( "Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append( "undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append( "Found wrong network states for " "undefined network: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status except Exception: logging.debug( "The define and undefine operation in loop %s failed. ", loop) finally: loop = loop - 1 finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml if create_bridge: if ovs_bridge: utils_net.delete_ovs_bridge(bridge, ignore_status=False) else: utils_net.delete_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s" % result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def test_create_three(self): """ Test issuing multiple pool create commands at once. """ global urifile # Accumulate a list of pass/fail indicators representing what is expected for # each parameter then "and" them to determine the expected result of the test expected_for_param = [] modelist = self.params.get("mode", '/run/tests/modes/*') mode = modelist[0] expected_for_param.append(modelist[1]) setidlist = self.params.get("setname", '/run/tests/setnames/*') setid = setidlist[0] expected_for_param.append(setidlist[1]) uid = os.geteuid() gid = os.getegid() # if any parameter results in failure then the test should FAIL expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: cmd = ( '../../install/bin/orterun -np 1 ' '--ompi-server file:{0} ./pool/wrapper/SimplePoolTests {1} {2} {3} {4} {5}' .format(urifile, "create", mode, uid, gid, setid)) uuid_str_1 = """{0}""".format(process.system_output(cmd)) uuid_str_2 = """{0}""".format(process.system_output(cmd)) uuid_str_3 = """{0}""".format(process.system_output(cmd)) exists = CheckForPool.checkForPool('vm1', uuid_str_1) if exists != 0: self.fail("Pool {0} not found on host {1}.\n".format( uuid_str_1, 'vm1')) exists = CheckForPool.checkForPool('vm1', uuid_str_2) if exists != 0: self.fail("Pool {0} not found on host {1}.\n".format( uuid_str_2, 'vm1')) exists = CheckForPool.checkForPool('vm1', uuid_str_3) if exists != 0: self.fail("Pool {0} not found on host {1}.\n".format( uuid_str_3, 'vm1')) delete_cmd_1 = ( '../../install/bin/orterun -np 1 ' '--ompi-server file:{0} ./pool/wrapper/SimplePoolTests {1} {2} {3} {4}' .format(urifile, "destroy", uuid_str_1, setid, "1")) delete_cmd_2 = ( '../../install/bin/orterun -np 1 ' '--ompi-server file:{0} ./pool/wrapper/SimplePoolTests {1} {2} {3} {4}' .format(urifile, "destroy", uuid_str_2, setid, "1")) delete_cmd_3 = ( '../../install/bin/orterun -np 1 ' '--ompi-server file:{0} ./pool/wrapper/SimplePoolTests {1} {2} {3} {4}' .format(urifile, "destroy", uuid_str_3, setid, "1")) process.system(delete_cmd_1) process.system(delete_cmd_2) process.system(delete_cmd_3) exists = CheckForPool.checkForPool('vm1', uuid_str_1) if exists == 0: self.fail("Pool {0} found on host {1} after destroy.\n".format( uuid_str_1, 'vm1')) exists = CheckForPool.checkForPool('vm1', uuid_str_2) if exists == 0: self.fail("Pool {0} found on host {1} after destroy.\n".format( uuid_str_2, 'vm1')) exists = CheckForPool.checkForPool('vm1', uuid_str_3) if exists == 0: self.fail("Pool {0} found on host {1} after destroy.\n".format( uuid_str_3, 'vm1')) if expected_result == 'FAIL': self.fail("Expected to fail but passed.\n") except Exception as e: print e print traceback.format_exc() if expected_result == 'PASS': self.fail("Expecting to pass but test has failed.\n")
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) host_arch = platform.machine() virsh_dargs = {'debug': True, 'ignore_status': False} if not utils_package.package_install(["lsof"]): test.cancel("Failed to install dependency package lsof" " on host") def create_iface_xml(iface_mac, source): """ Create interface xml file :param iface_mac: mac of interface :param source: source of interface """ iface = Interface(type_name=iface_type) if source: iface.source = source iface.model = iface_model if iface_model else "virtio" iface.mac_address = iface_mac driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) if test_target: iface.target = {"dev": target_dev} logging.debug("Create new interface xml: %s", iface) return iface def modify_iface_xml(update, status_error=False): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if iface_model: iface.model = iface_model else: del iface.model if iface_type: iface.type_name = iface_type del iface.source source = iface_source if source: net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and 'dev' in source and source['dev'] not in net_ifs): logging.warn( "Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] iface.source = source backend = ast.literal_eval(iface_backend) if backend: iface.backend = backend driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) if test_target: logging.debug("iface.target is %s" % target_dev) iface.target = {"dev": target_dev} if iface.address: del iface.address if set_ip: iface.ips = [ast.literal_eval(x) for x in set_ips] logging.debug("New interface xml file: %s", iface) if unprivileged_user: # Create disk image for unprivileged user disk_index = xml_devices.index( xml_devices.by_device_tag("disk")[0]) disk_xml = xml_devices[disk_index] logging.debug("source: %s", disk_xml.source) disk_source = disk_xml.source.attrs["file"] cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}" "".format(disk_source, dst_disk, unprivileged_user)) process.run(cmd, shell=True) disk_xml.source = disk_xml.new_disk_source( attrs={"file": dst_disk}) vmxml.devices = xml_devices # Remove all channels to avoid of permission problem channels = vmxml.get_devices(device_type="channel") for channel in channels: vmxml.del_device(channel) logging.info("Unprivileged users can't use 'dac' security driver," " removing from domain xml if present...") vmxml.del_seclabel([('model', 'dac')]) # Set vm memory to 2G if it's larger than 2G if vmxml.memory > 2097152: vmxml.memory = vmxml.current_mem = 2097152 vmxml.xmltreefile.write() logging.debug("New VM xml: %s", vmxml) process.run("chmod a+rw %s" % vmxml.xml, shell=True) virsh.define(vmxml.xml, **virsh_dargs) # Try to modify interface xml by update-device or edit xml elif update: iface.xmltreefile.write() ret = virsh.update_device(vm_name, iface.xml, ignore_status=True) libvirt.check_exit_status(ret, status_error) else: vmxml.devices = xml_devices vmxml.xmltreefile.write() try: vmxml.sync() if define_error: test.fail("Define VM succeed, but it should fail") except xcepts.LibvirtXMLError as e: if not define_error: test.fail("Define VM fail: %s" % e) def check_offloads_option(if_name, driver_options, session=None): """ Check interface offloads by ethtool output """ offloads = { "csum": "tx-checksumming", "tso4": "tcp-segmentation-offload", "tso6": "tx-tcp6-segmentation", "ecn": "tx-tcp-ecn-segmentation", "ufo": "udp-fragmentation-offload" } if session: ret, output = session.cmd_status_output("ethtool -k %s | head" " -18" % if_name) else: out = process.run("ethtool -k %s | head -18" % if_name, shell=True) ret, output = out.exit_status, out.stdout_text if ret: test.fail("ethtool return error code") logging.debug("ethtool output: %s", output) for offload in list(driver_options.keys()): if offload in offloads: if (output.count(offloads[offload]) and not output.count( "%s: %s" % (offloads[offload], driver_options[offload]))): test.fail("offloads option %s: %s isn't" " correct in ethtool output" % (offloads[offload], driver_options[offload])) def run_xml_test(iface_mac): """ Test for interface options in vm xml """ # Get the interface object according the mac address vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_devices = vmxml.get_devices(device_type="interface") iface = None for iface_dev in iface_devices: if iface_dev.mac_address == iface_mac: iface = iface_dev if not iface: test.fail("Can't find interface with mac" " '%s' in vm xml" % iface_mac) driver_dict = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) for driver_opt in list(driver_dict.keys()): if not driver_dict[driver_opt] == iface.driver.driver_attr[ driver_opt]: test.fail("Can't see driver option %s=%s in vm xml" % (driver_opt, driver_dict[driver_opt])) else: logging.info("Find %s=%s in vm xml" % (driver_opt, driver_dict[driver_opt])) if iface_target: if ("dev" not in iface.target or not iface.target["dev"].startswith(iface_target)): test.fail("Can't see device target dev in vm xml") # Check macvtap mode by ip link command if iface_target == "macvtap" and "mode" in iface.source: cmd = "ip -d link show %s" % iface.target["dev"] output = process.run(cmd, shell=True).stdout_text logging.debug("ip link output: %s", output) mode = iface.source["mode"] if mode == "passthrough": mode = "passthru" if not re.search(r"macvtap\s+mode %s" % mode, output): test.fail("Failed to verify macvtap mode") # Check if the "target dev" is set successfully # 1. Target dev name with prefix as "vnet" will always be override; # 2. Target dev name with prefix as "macvtap" or "macvlan" with direct # type interface will be override; # 3. Other scenarios, the target dev should be set successfully. if test_target: if target_dev != iface.target["dev"]: if target_dev.startswith("vnet")\ or target_dev.startswith("macvtap")\ or target_dev.startswith("macvlan"): logging.debug("target dev %s is override" % target_dev) else: test.fail("Failed to set target dev to %s" % target_dev) else: logging.debug("target dev set successfully to %s", iface.target["dev"]) def run_cmdline_test(iface_mac, host_arch): """ Test qemu command line :param iface_mac: expected MAC :param host_arch: host architecture, e.g. x86_64 :raise avocado.core.exceptions.TestError: if preconditions are not met :raise avocado.core.exceptions.TestFail: if commandline doesn't match :return: None """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) ret = process.run(cmd, shell=True) logging.debug("Command line %s", ret.stdout_text) if test_vhost_net: if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver: test.fail("Can't see vhost options in" " qemu-kvm command line") if iface_model == "virtio": if host_arch == 's390x': model_option = "device virtio-net-ccw" else: model_option = "device virtio-net-pci" elif iface_model == 'rtl8139': model_option = "device rtl8139" else: test.error( "Don't know which device driver to expect on qemu cmdline" " for iface_model %s" % iface_model) iface_cmdline = re.findall( r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text) if not iface_cmdline: test.fail("Can't see %s with mac %s in command" " line" % (model_option, iface_mac)) driver_dict = {} # Test <driver> xml options. if iface_driver: iface_driver_dict = ast.literal_eval(iface_driver) for driver_opt in list(iface_driver_dict.keys()): if driver_opt == "name": continue elif driver_opt == "txmode": if iface_driver_dict["txmode"] == "iothread": driver_dict["tx"] = "bh" else: driver_dict["tx"] = iface_driver_dict["txmode"] elif driver_opt == "queues": driver_dict["mq"] = "on" if "pci" in model_option: driver_dict["vectors"] = str( int(iface_driver_dict["queues"]) * 2 + 2) else: driver_dict[driver_opt] = iface_driver_dict[driver_opt] # Test <driver><host/><driver> xml options. if iface_driver_host: driver_dict.update(ast.literal_eval(iface_driver_host)) # Test <driver><guest/><driver> xml options. if iface_driver_guest: driver_dict.update(ast.literal_eval(iface_driver_guest)) # Only check packed attribute in qemu command line if iface_driver and "packed" in ast.literal_eval(iface_driver): iface_cmdline = re.findall( r"%s=%s" % (driver_opt, driver_dict[driver_opt]), ret.stdout_text) cmd_opt = {} for opt in iface_cmdline[0].split(','): tmp = opt.rsplit("=") cmd_opt[tmp[0]] = tmp[1] logging.debug("Command line options %s", cmd_opt) for driver_opt in list(driver_dict.keys()): if (driver_opt not in cmd_opt or not cmd_opt[driver_opt] == driver_dict[driver_opt]): test.fail("Can't see option '%s=%s' in qemu-kvm " " command line" % (driver_opt, driver_dict[driver_opt])) logging.info("Find %s=%s in qemu-kvm command line" % (driver_opt, driver_dict[driver_opt])) if test_backend: guest_pid = ret.stdout_text.rsplit()[1] cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["tap"]) cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["vhost"]) def get_guest_ip(session, mac): """ Wrapper function to get guest ip address """ utils_net.restart_guest_network(session, mac) # Wait for IP address is ready utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac), 10) return utils_net.get_guest_ip_addr(session, mac) def check_user_network(session): """ Check user network ip address on guest """ vm_ips = [] vm_ips.append(get_guest_ip(session, iface_mac_old)) if attach_device: vm_ips.append(get_guest_ip(session, iface_mac)) logging.debug("IP address on guest: %s", vm_ips) if len(vm_ips) != len(set(vm_ips)): logging.debug( "Duplicated IP address on guest. Check bug: " "https://bugzilla.redhat.com/show_bug.cgi?id=1147238") for vm_ip in vm_ips: if not vm_ip or vm_ip != expect_ip: logging.debug("vm_ip is %s, expect_ip is %s", vm_ip, expect_ip) test.fail("Found wrong IP address" " on guest") # Check gateway address gateway = str(utils_net.get_default_gateway(False, session)) if expect_gw not in gateway: test.fail("The gateway on guest is %s, while expect is %s" % (gateway, expect_gw)) # Check dns server address ns_list = utils_net.get_guest_nameserver(session) if expect_ns not in ns_list: test.fail("The dns found is %s, which expect is %s" % (ns_list, expect_ns)) def check_mcast_network(session, add_session): """ Check multicast ip address on guests :param session: vm session :param add_session: additional vm session """ src_addr = iface_source['address'] vms_sess_dict = {vm_name: session, additional_vm.name: add_session} # Check mcast address on host cmd = "netstat -g | grep %s" % src_addr if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't find multicast ip address" " on host") vms_ip_dict = {} # Get ip address on each guest for vms in list(vms_sess_dict.keys()): vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms) vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac) if not vm_ip: test.fail("Can't get multicast ip" " address on guest") vms_ip_dict.update({vms: vm_ip}) if len(set(vms_ip_dict.values())) != len(vms_sess_dict): test.fail("Got duplicated multicast ip address") logging.debug("Found ips on guest: %s", vms_ip_dict) # Run omping server on host if not utils_package.package_install(["omping"]): test.error("Failed to install omping" " on host") cmd = ("iptables -F;omping -m %s %s" % (src_addr, "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values())))) # Run a backgroup job waiting for connection of client bgjob = utils_misc.AsyncJob(cmd) # Run omping client on guests for vms in list(vms_sess_dict.keys()): # omping should be installed first if not utils_package.package_install(["omping"], vms_sess_dict[vms]): test.error("Failed to install omping" " on guest") cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" % (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms])) ret, output = vms_sess_dict[vms].cmd_status_output(cmd) logging.debug("omping ret: %s, output: %s", ret, output) if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')): test.fail("omping failed on guest") # Kill the backgroup job bgjob.kill_func() def get_iface_model(iface_model, host_arch): """ Get iface_model. On s390x use default model 'virtio' if non-virtio given :param iface_model: value as by test configuration or default :param host_arch: host architecture, e.g. x86_64 :return: iface_model """ if 's390x' == host_arch and 'virtio' not in iface_model: return "virtio" else: return iface_model def check_vhostuser_guests(session1, session2): """ Check the vhostuser interface in guests param session1: Session of original guest param session2: Session of additional guest """ logging.debug("iface details is %s" % libvirt.get_interface_details(vm_name)) vm1_mac = str(libvirt.get_interface_details(vm_name)[0]['mac']) vm2_mac = str(libvirt.get_interface_details(add_vm_name)[0]['mac']) utils_net.set_guest_ip_addr(session1, vm1_mac, guest1_ip) utils_net.set_guest_ip_addr(session2, vm2_mac, guest2_ip) ping_status, ping_output = utils_net.ping(dest=guest2_ip, count='3', timeout=5, session=session1) logging.info("output:%s" % ping_output) if ping_status != 0: if ping_expect_fail: logging.info("Can not ping guest2 as expected") else: test.fail("Can not ping guest2 from guest1") else: if ping_expect_fail: test.fail("Ping guest2 successfully not expected") else: logging.info("Can ping guest2 from guest1") def get_ovs_statis(ovs): """ Get ovs-vsctl interface statistics and format in dict param ovs: openvswitch instance """ ovs_statis_dict = {} ovs_iface_info = ovs.ovs_vsctl(["list", "interface"]).stdout_text.strip() ovs_iface_list = re.findall( 'name\s+: (\S+)\n.*?statistics\s+: {(.*?)}\n', ovs_iface_info, re.S) logging.info("ovs iface list is %s", ovs_iface_list) # Dict of iface name and statistics for iface_name in vhostuser_names.split(): for ovs_iface in ovs_iface_list: if iface_name == eval(ovs_iface[0]): format_statis = dict( re.findall(r'(\S*?)=(\d*?),', ovs_iface[1])) ovs_statis_dict[iface_name] = format_statis break return ovs_statis_dict def check_libvirt_log(libvirtd_log_path, log_pattern_list): """ Check if the log patterns exist in libvirtd :param libvirtd_log_path: path of libvirtd log :param log_pattern_list: checking log pattern list """ with open(libvirtd_log_path) as f: lines = "".join(f.readlines()) for log_pattern in log_pattern_list: if re.search(log_pattern, lines): logging.info("Finding msg<%s> in libvirtd log", log_pattern) else: test.fail("Can not find msg:<%s> in libvirtd.log" % log_pattern) def get_domstats_runtime(): """ get domstats runtime dict :return: runtime_dict """ runtime_str = process.run("time virsh domstats", shell=True).stderr_text it = iter(runtime_str.split()) runtime_dict = dict(zip(it, it)) for key in runtime_dict.keys(): value_list = re.findall(r"\d+\.?\d*", runtime_dict[key]) if value_list is not None: runtime_dict[key] = float(value_list[0]) * 60 + float( value_list[1]) else: test.error("Can not get correct time for command execution") logging.info("The runtime dict is %s", runtime_dict) return runtime_dict status_error = "yes" == params.get("status_error", "no") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") unprivileged_user = params.get("unprivileged_user") # Interface specific attributes. iface_type = params.get("iface_type", "network") iface_source = ast.literal_eval(params.get("iface_source", "{}")) iface_driver = params.get("iface_driver") iface_model = get_iface_model(params.get("iface_model", "virtio"), host_arch) iface_target = params.get("iface_target") iface_backend = params.get("iface_backend", "{}") iface_driver_host = params.get("iface_driver_host") iface_driver_guest = params.get("iface_driver_guest") ovs_br_name = params.get("ovs_br_name") vhostuser_names = params.get("vhostuser_names") attach_device = params.get("attach_iface_device") expect_tx_size = params.get("expect_tx_size") guest1_ip = params.get("vhostuser_guest1_ip", "192.168.100.1") guest2_ip = params.get("vhostuser_guest2_ip", "192.168.100.2") test_type = params.get("test_type") change_option = "yes" == params.get("change_iface_options", "no") update_device = "yes" == params.get("update_iface_device", "no") additional_guest = "yes" == params.get("additional_guest", "no") serial_login = "******" == params.get("serial_login", "no") rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no") test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no") test_option_xml = "yes" == params.get("test_iface_option_xml", "no") test_vhost_net = "yes" == params.get("test_vhost_net", "no") test_option_offloads = "yes" == params.get("test_option_offloads", "no") test_iface_user = "******" == params.get("test_iface_user", "no") test_iface_mcast = "yes" == params.get("test_iface_mcast", "no") test_libvirtd = "yes" == params.get("test_libvirtd", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") restart_vm = "yes" == params.get("restart_vm", "no") test_guest_ip = "yes" == params.get("test_guest_ip", "no") test_backend = "yes" == params.get("test_backend", "no") check_guest_trans = "yes" == params.get("check_guest_trans", "no") set_ip = "yes" == params.get("set_user_ip", "no") set_ips = params.get("set_ips", "").split() expect_ip = params.get("expect_ip") expect_gw = params.get("expect_gw") expect_ns = params.get("expect_ns") test_target = "yes" == params.get("test_target", "no") target_dev = params.get("target_dev", None) # test params for vhostuser test huge_page = ast.literal_eval(params.get("huge_page", "{}")) numa_cell = ast.literal_eval(params.get("numa_cell", "{}")) additional_iface_source = ast.literal_eval( params.get("additional_iface_source", "{}")) vcpu_num = params.get("vcpu_num") cpu_mode = params.get("cpu_mode") hugepage_num = params.get("hugepage_num") log_pattern_list = ast.literal_eval(params.get("log_pattern_list", "[]")) log_level = params.get("log_level") limit_nofile = params.get("limit_nofile") # judgement params for vhostuer test need_vhostuser_env = "yes" == params.get("need_vhostuser_env", "no") ping_expect_fail = "yes" == params.get("ping_expect_fail", "no") check_libvirtd_log = "yes" == params.get("check_libvirtd_log", "no") check_statistics = "yes" == params.get("check_statistics", "no") enable_multiqueue = "yes" == params.get("enable_multiqueue", "no") queue_size = None if iface_driver: driver_dict = ast.literal_eval(iface_driver) if "queues" in driver_dict: queue_size = int(driver_dict.get("queues")) if iface_driver_host or iface_driver_guest or test_backend: if not libvirt_version.version_compare(1, 2, 8): test.cancel("Offloading/backend options not " "supported in this libvirt version") if iface_driver and "queues" in ast.literal_eval(iface_driver): if not libvirt_version.version_compare(1, 0, 6): test.cancel("Queues options not supported" " in this libvirt version") if iface_driver and "packed" in ast.literal_eval(iface_driver): if not libvirt_version.version_compare(6, 3, 0): test.cancel("Packed options not supported" " in this libvirt version") if unprivileged_user: if not libvirt_version.version_compare(1, 1, 1): test.cancel("qemu-bridge-helper not supported" " on this host") virsh_dargs["unprivileged_user"] = unprivileged_user # Create unprivileged user if needed cmd = ("grep {0} /etc/passwd || " "useradd {0}".format(unprivileged_user)) process.run(cmd, shell=True) # Need another disk image for unprivileged user to access dst_disk = "/tmp/%s.img" % unprivileged_user # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name) # iface_mac will update if attach a new interface iface_mac = iface_mac_old # Additional vm for test additional_vm = None libvirtd = utils_libvirtd.Libvirtd() libvirtd_log_path = None libvirtd_conf = None if check_libvirtd_log: libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") logging.debug("libvirtd_log_path is %s", libvirtd_log_path) libvirtd_conf = utils_config.LibvirtdConfig() if log_level: libvirtd_conf["log_level"] = log_level libvirtd_conf[ "log_filters"] = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"' libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path libvirtd.restart() # Prepare vhostuser ovs = None if need_vhostuser_env: # Reserve selinux status selinux_mode = utils_selinux.get_status() # Reserve orig page size orig_size = utils_memory.get_num_huge_pages() ovs_dir = data_dir.get_tmp_dir() ovs = utils_net.setup_ovs_vhostuser(hugepage_num, ovs_dir, ovs_br_name, vhostuser_names, queue_size) try: # Build the xml and run test. try: # Prepare interface backend files if test_backend: if not os.path.exists("/dev/vhost-net"): process.run("modprobe vhost-net", shell=True) backend = ast.literal_eval(iface_backend) backend_tap = "/dev/net/tun" backend_vhost = "/dev/vhost-net" if not backend: backend["tap"] = backend_tap backend["vhost"] = backend_vhost if not start_error: # Create backend files for normal test if not os.path.exists(backend["tap"]): os.rename(backend_tap, backend["tap"]) if not os.path.exists(backend["vhost"]): os.rename(backend_vhost, backend["vhost"]) # Edit the interface xml. if change_option: modify_iface_xml(update=False) if define_error: return if test_target: logging.debug("Setting target device name to %s", target_dev) modify_iface_xml(update=False) if rm_vhost_driver: # remove vhost driver on host and # the character file /dev/vhost-net cmd = ("modprobe -r {0}; " "rm -f /dev/vhost-net".format("vhost_net")) if process.system(cmd, ignore_status=True, shell=True): test.error("Failed to remove vhost_net driver") else: # Load vhost_net driver by default cmd = "modprobe vhost_net" process.system(cmd, shell=True) # Attach a interface when vm is shutoff if attach_device == 'config': iface_mac = utils_net.generate_mac_address_simple() att_source = additional_iface_source if test_type == "check_performance" else iface_source iface_xml_obj = create_iface_xml(iface_mac, att_source) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--config", ignore_status=True) libvirt.check_exit_status(ret) # Add hugepage and update cpu for vhostuser testing if huge_page: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml = hugepages.PageXML() pagexml.update(huge_page) hugepages.pages = [pagexml] membacking.hugepages = hugepages vmxml.mb = membacking vmxml.vcpu = int(vcpu_num) cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" cpu_xml.numa_cell = cpu_xml.dicts_to_cells([numa_cell]) cpu_xml.mode = cpu_mode if cpu_mode == "custom": vm_capability = capability_xml.CapabilityXML() cpu_xml.model = vm_capability.model vmxml.cpu = cpu_xml vmxml.sync() logging.debug("xmltreefile:%s", vmxml.xmltreefile) # Clone additional vm if additional_guest: add_vm_name = "%s_%s" % (vm_name, '1') # Clone additional guest timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, add_vm_name, True, timeout=timeout) additional_vm = vm.clone(add_vm_name) # Update iface source if needed if additional_iface_source: add_vmxml = vm_xml.VMXML.new_from_dumpxml(add_vm_name) add_xml_devices = add_vmxml.devices add_iface_index = add_xml_devices.index( add_xml_devices.by_device_tag("interface")[0]) add_iface = add_xml_devices[add_iface_index] add_iface.source = additional_iface_source add_vmxml.devices = add_xml_devices add_vmxml.xmltreefile.write() add_vmxml.sync() logging.debug("add vm xmltreefile:%s", add_vmxml.xmltreefile) additional_vm.start() # additional_vm.wait_for_login() username = params.get("username") password = params.get("password") add_session = additional_vm.wait_for_serial_login( username=username, password=password) # Start the VM. if unprivileged_user: virsh.start(vm_name, **virsh_dargs) cmd = ("su - %s -c 'virsh console %s'" % (unprivileged_user, vm_name)) session = aexpect.ShellSession(cmd) session.sendline() remote.handle_prompts(session, params.get("username"), params.get("password"), r"[\#\$]\s*$", 60) # Get ip address on guest if not get_guest_ip(session, iface_mac): test.error("Can't get ip address on guest") else: # Will raise VMStartError exception if start fails vm.start() if serial_login: session = vm.wait_for_serial_login() else: session = vm.wait_for_login() if start_error: test.fail("VM started unexpectedly") # Attach a interface when vm is running if attach_device == 'live': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac, iface_source) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--live", ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) # Need sleep here for attachment take effect time.sleep(5) # Update a interface options if update_device: modify_iface_xml(update=True, status_error=status_error) # Run tests for qemu-kvm command line options if test_option_cmd: run_cmdline_test(iface_mac, host_arch) # Run tests for vm xml if test_option_xml: run_xml_test(iface_mac) # Run tests for offloads options if test_option_offloads: if iface_driver_host: ifname_guest = utils_net.get_linux_ifname( session, iface_mac) check_offloads_option(ifname_guest, ast.literal_eval(iface_driver_host), session) if iface_driver_guest: ifname_host = libvirt.get_ifname_host(vm_name, iface_mac) check_offloads_option(ifname_host, ast.literal_eval(iface_driver_guest)) if test_iface_user: # Test user type network check_user_network(session) if test_iface_mcast: # Test mcast type network check_mcast_network(session, add_session) # Check guest ip address if test_guest_ip: if not get_guest_ip(session, iface_mac): test.fail("Guest can't get a" " valid ip address") # Check guest RX/TX ring if check_guest_trans: ifname_guest = utils_net.get_linux_ifname(session, iface_mac) ret, outp = session.cmd_status_output("ethtool -g %s" % ifname_guest) if ret: test.fail("ethtool return error code") logging.info("ethtool output is %s", outp) driver_dict = ast.literal_eval(iface_driver) if expect_tx_size: driver_dict['tx_queue_size'] = expect_tx_size for outp_p in outp.split("Current hardware"): if 'rx_queue_size' in driver_dict: if re.search( r"RX:\s*%s" % driver_dict['rx_queue_size'], outp_p): logging.info("Find RX setting RX:%s by ethtool", driver_dict['rx_queue_size']) else: test.fail("Cannot find matching rx setting") if 'tx_queue_size' in driver_dict: if re.search( r"TX:\s*%s" % driver_dict['tx_queue_size'], outp_p): logging.info("Find TX settint TX:%s by ethtool", driver_dict['tx_queue_size']) else: test.fail("Cannot find matching tx setting") if test_target: logging.debug("Check if the target dev is set") run_xml_test(iface_mac) # Check vhostuser guest if test_type == "multi_guests": check_vhostuser_guests(session, add_session) if test_type == "check_performance": # get runtime of domstats runtime_dict1 = get_domstats_runtime() # Check libvirtd log if check_libvirtd_log: check_libvirt_log(libvirtd_log_path, log_pattern_list) # change libvirtd.service if test_type == "check_performance": # Get the NOFILE limit value process.run("prlimit -p `pidof libvirtd` |grep NOFILE", shell=True) # Let libvirtd run with a big NOFILE limit, check runtime libvirtd_service_file = "/usr/lib/systemd/system/libvirtd.service" backup_file = os.path.join(data_dir.get_tmp_dir(), "libvirtd.service-bak") shutil.copy(libvirtd_service_file, backup_file) ori_value = process.getoutput("cat %s |grep LimitNOFILE" % libvirtd_service_file, shell=True) with open(libvirtd_service_file, 'r') as fr: alllines = fr.readlines() with open(libvirtd_service_file, 'w+') as fw: for line in alllines: newline = re.sub(ori_value, limit_nofile, line) fw.writelines(newline) process.run("systemctl daemon-reload") libvirtd.restart() process.run("prlimit -p `pidof libvirtd` |grep NOFILE", shell=True) # get runtime of domstats again and compare runtime_dict2 = get_domstats_runtime() for key in runtime_dict1.keys(): if abs(runtime_dict1[key] - runtime_dict2[key]) > 0.1: test.fail( "The difference of 2 runtime is larger than 0.1s") # check the libvirtd log again with big NOFILE limit check_libvirt_log(libvirtd_log_path, log_pattern_list) # Check statistics if check_statistics: session.sendline("ping %s" % guest2_ip) add_session.sendline("ping %s" % guest1_ip) time.sleep(5) vhost_name = vhostuser_names.split()[0] ovs_statis_dict = get_ovs_statis(ovs)[vhost_name] domif_info = {} domif_info = libvirt.get_interface_details(vm_name) virsh.domiflist(vm_name, debug=True) domif_stat_result = virsh.domifstat(vm_name, vhost_name) if domif_stat_result.exit_status != 0: test.fail("domifstat cmd fail with msg:%s" % domif_stat_result.stderr) else: domif_stat = domif_stat_result.stdout.strip() logging.debug("vhost_name is %s, domif_stat is %s", vhost_name, domif_stat) domif_stat_dict = dict( re.findall("%s (\S*) (\d*)" % vhost_name, domif_stat)) logging.debug("ovs_statis is %s, domif_stat is %s", ovs_statis_dict, domif_stat_dict) ovs_cmp_dict = { 'tx_bytes': ovs_statis_dict['rx_bytes'], 'tx_drop': ovs_statis_dict['rx_dropped'], 'tx_errs': ovs_statis_dict['rx_errors'], 'tx_packets': ovs_statis_dict['rx_packets'], 'rx_bytes': ovs_statis_dict['tx_bytes'], 'rx_drop': ovs_statis_dict['tx_dropped'] } logging.debug("ovs_cmp_dict is %s", ovs_cmp_dict) for dict_key in ovs_cmp_dict.keys(): if domif_stat_dict[dict_key] != ovs_cmp_dict[dict_key]: test.fail( "Find ovs %s result (%s) different with domifstate result (%s)" % (dict_key, ovs_cmp_dict[dict_key], domif_stat_dict[dict_key])) else: logging.info("ovs %s value %s is same with domifstate", dict_key, domif_stat_dict[dict_key]) # Check multi_queue if enable_multiqueue: ifname_guest = utils_net.get_linux_ifname(session, iface_mac) for comb_size in (queue_size, queue_size - 1): logging.info("Setting multiqueue size to %s" % comb_size) session.cmd_status("ethtool -L %s combined %s" % (ifname_guest, comb_size)) ret, outp = session.cmd_status_output("ethtool -l %s" % ifname_guest) logging.debug("ethtool cmd output:%s" % outp) if not ret: pre_comb = re.search( "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)", outp).group(1) cur_comb = re.search( "Current hardware settings:[\s\S]*?Combined:.*?(\d+)", outp).group(1) if int(pre_comb) != queue_size or int(cur_comb) != int( comb_size): test.fail( "Fail to check the combined size: setting: %s," "Pre-set: %s, Current-set: %s, queue_size: %s" % (comb_size, pre_comb, cur_comb, queue_size)) else: logging.info( "Getting correct Pre-set and Current set value" ) else: test.error("ethtool list fail: %s" % outp) session.close() if additional_guest: add_session.close() # Restart libvirtd and guest, then test again if restart_libvirtd: libvirtd.restart() if restart_vm: vm.destroy(gracefully=True) vm.start() if test_option_xml: run_xml_test(iface_mac) # Detach hot/cold-plugged interface at last if attach_device and not status_error: ret = virsh.detach_device(vm_name, iface_xml_obj.xml, flagstr="", ignore_status=True, debug=True) libvirt.check_exit_status(ret) except virt_vm.VMStartError as e: logging.info(str(e)) if not start_error: test.fail('VM failed to start\n%s' % e) finally: # Recover VM. logging.info("Restoring vm...") # Restore interface backend files if test_backend: if not os.path.exists(backend_tap): os.rename(backend["tap"], backend_tap) if not os.path.exists(backend_vhost): os.rename(backend["vhost"], backend_vhost) if rm_vhost_driver: # Restore vhost_net driver process.system("modprobe vhost_net", shell=True) if unprivileged_user: virsh.remove_domain(vm_name, **virsh_dargs) process.run('rm -f %s' % dst_disk, shell=True) if additional_vm: virsh.remove_domain(additional_vm.name, "--remove-all-storage") # Kill all omping server process on host process.system("pidof omping && killall omping", ignore_status=True, shell=True) if vm.is_alive(): vm.destroy(gracefully=True) vmxml_backup.sync() if need_vhostuser_env: utils_net.clean_ovs_env(selinux_mode=selinux_mode, page_size=orig_size, clean_ovs=True) if libvirtd_conf: libvirtd_conf.restore() libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path) if test_type == "check_performance": shutil.copy(backup_file, libvirtd_service_file) process.run("systemctl daemon-reload") libvirtd.restart()
def tearDown(self): for hp_size in self.configured_page_sizes: if process.system('umount %s' % self.hugetlbfs_dir[hp_size], ignore_status=True): self.log.warn("umount of hugetlbfs dir failed")
def setUp(self): ''' To check and install dependencies for the test ''' sm = SoftwareManager() for pkg in ["openssh-clients", "gcc"]: if not sm.check_installed(pkg) and not sm.install(pkg): self.skip("%s package is need to test" % pkg) interfaces = netifaces.interfaces() self.IF = self.params.get("interface", default="") self.PEER_IP = self.params.get("peer_ip", default="") if self.IF not in interfaces: self.skip("%s interface is not available" % self.IF) if self.PEER_IP == "": self.skip("%s peer machine is not available" % self.PEER_IP) self.to = self.params.get("timeout", default="600") self.IPERF_RUN = self.params.get("IPERF_RUN", default="0") self.NETSERVER_RUN = self.params.get("NETSERVER_RUN", default="0") self.iper = os.path.join(self.teststmpdir, 'iperf') self.netperf = os.path.join(self.teststmpdir, 'netperf') detected_distro = distro.detect() if detected_distro.name == "Ubuntu": cmd = "service ufw stop" # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif detected_distro.name in ['rhel', 'fedora', 'redhat']: cmd = "systemctl stop firewalld" elif detected_distro.name == "SuSE": cmd = "rcSuSEfirewall2 stop" elif detected_distro.name == "centos": cmd = "service iptables stop" else: self.skip("Distro not supported") if process.system("%s && ssh %s %s" % (cmd, self.PEER_IP, cmd), ignore_status=True, shell=True) != 0: self.skip("Unable to disable firewall") tarball = self.fetch_asset( 'ftp://ftp.netperf.org/netperf/' 'netperf-2.7.0.tar.bz2', expire='7d') archive.extract(tarball, self.netperf) version = os.path.basename(tarball.split('.tar.')[0]) self.neperf = os.path.join(self.netperf, version) tmp = "scp -r %s root@%s:" % (self.neperf, self.PEER_IP) if process.system(tmp, shell=True, ignore_status=True) != 0: self.skip("unable to copy the netperf into peer machine") tmp = "cd /root/netperf-2.7.0;./configure ppc64le;make" cmd = "ssh %s \"%s\"" % (self.PEER_IP, tmp) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("test failed because command failed in peer machine") time.sleep(5) os.chdir(self.neperf) process.system('./configure ppc64le', shell=True) build.make(self.neperf) self.perf = os.path.join(self.neperf, 'src') time.sleep(5) tarball = self.fetch_asset( 'iperf.zip', locations=['https://github.com/esnet/' 'iperf/archive/master.zip'], expire='7d') archive.extract(tarball, self.iper) self.ipe = os.path.join(self.iper, 'iperf-master') tmp = "scp -r %s root@%s:" % (self.ipe, self.PEER_IP) if process.system(tmp, shell=True, ignore_status=True) != 0: self.skip("unable to copy the iperf into peer machine") tmp = "cd /root/iperf-master;./configure;make" cmd = "ssh %s \"%s\"" % (self.PEER_IP, tmp) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("test failed because command failed in peer machine") time.sleep(5) os.chdir(self.ipe) process.system('./configure', shell=True) build.make(self.ipe) self.iperf = os.path.join(self.ipe, 'src')
def vg_cleanup(disk_filename=None, vg_disk_dir=None, vg_name=None, loop_device=None, use_tmpfs=True): """ Clean up any stage of the VG disk setup in case of test error. This detects whether the components were initialized and if so tries to remove them. In case of failure it raises summary exception. :param str disk_filename: name of the disk sparse file :param str vg_disk_dir: location of the disk file :param str vg_name: name of the volume group :param str loop_device: name of the disk or loop device :param bool use_tmpfs: whether to use RAM or slower storage :returns: disk_filename, vg_disk_dir, vg_name, loop_device :rtype: (str, str, str, str) :raises: :py:class:`lv_utils.LVException` on intolerable failure at any stage """ errs = [] if vg_name is not None: loop_device = re.search(r"([/\w-]+) +%s +lvm2" % vg_name, process.run("pvs", sudo=True).stdout_text) if loop_device is not None: loop_device = loop_device.group(1) process.run("vgremove -f %s" % vg_name, ignore_status=True, sudo=True) if loop_device is not None: result = process.run("pvremove %s" % loop_device, ignore_status=True, sudo=True) if result.exit_status != 0: errs.append("wipe pv") logging.error("Failed to wipe pv from %s: %s", loop_device, result) losetup_all = process.run("losetup --all", sudo=True).stdout_text if loop_device in losetup_all: disk_filename = re.search( r"%s: \[\d+\]:\d+ \(([/\w]+)\)" % loop_device, losetup_all) if disk_filename is not None: disk_filename = disk_filename.group(1) for _ in range(10): result = process.run("losetup -d %s" % loop_device, ignore_status=True, sudo=True) if b"resource busy" not in result.stderr: if result.exit_status != 0: errs.append("remove loop device") logging.error( "Unexpected failure when removing loop" "device %s, check the log", loop_device) break time.sleep(0.1) if disk_filename is not None: if os.path.exists(disk_filename): os.unlink(disk_filename) logging.debug("Disk filename %s deleted", disk_filename) vg_disk_dir = os.path.dirname(disk_filename) if vg_disk_dir is not None: if use_tmpfs and not process.system("mountpoint %s" % vg_disk_dir, ignore_status=True): for _ in range(10): result = process.run("umount %s" % vg_disk_dir, ignore_status=True, sudo=True) time.sleep(0.1) if result.exit_status == 0: break else: errs.append("umount") logging.error( "Unexpected failure unmounting %s, check the " "log", vg_disk_dir) if os.path.exists(vg_disk_dir): try: shutil.rmtree(vg_disk_dir) logging.debug("Disk directory %s deleted", vg_disk_dir) except OSError as details: errs.append("rm-disk-dir") logging.error("Failed to remove disk_dir: %s", details) if errs: raise lv_utils.LVException("vg_cleanup failed: %s" % ", ".join(errs))
def is_dir_unmounted(): cmd = 'umount %s' % self.dir process.system(cmd, shell=True, ignore_status=True) if disk.is_dir_mounted(self.dir): return False return True
def run_cmdline_test(iface_mac, host_arch): """ Test qemu command line :param iface_mac: expected MAC :param host_arch: host architecture, e.g. x86_64 :raise avocado.core.exceptions.TestError: if preconditions are not met :raise avocado.core.exceptions.TestFail: if commandline doesn't match :return: None """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) ret = process.run(cmd, shell=True) logging.debug("Command line %s", ret.stdout_text) if test_vhost_net: if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver: test.fail("Can't see vhost options in" " qemu-kvm command line") if iface_model == "virtio": if host_arch == 's390x': model_option = "device virtio-net-ccw" else: model_option = "device virtio-net-pci" elif iface_model == 'rtl8139': model_option = "device rtl8139" else: test.error( "Don't know which device driver to expect on qemu cmdline" " for iface_model %s" % iface_model) iface_cmdline = re.findall( r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text) if not iface_cmdline: test.fail("Can't see %s with mac %s in command" " line" % (model_option, iface_mac)) driver_dict = {} # Test <driver> xml options. if iface_driver: iface_driver_dict = ast.literal_eval(iface_driver) for driver_opt in list(iface_driver_dict.keys()): if driver_opt == "name": continue elif driver_opt == "txmode": if iface_driver_dict["txmode"] == "iothread": driver_dict["tx"] = "bh" else: driver_dict["tx"] = iface_driver_dict["txmode"] elif driver_opt == "queues": driver_dict["mq"] = "on" if "pci" in model_option: driver_dict["vectors"] = str( int(iface_driver_dict["queues"]) * 2 + 2) else: driver_dict[driver_opt] = iface_driver_dict[driver_opt] # Test <driver><host/><driver> xml options. if iface_driver_host: driver_dict.update(ast.literal_eval(iface_driver_host)) # Test <driver><guest/><driver> xml options. if iface_driver_guest: driver_dict.update(ast.literal_eval(iface_driver_guest)) # Only check packed attribute in qemu command line if iface_driver and "packed" in ast.literal_eval(iface_driver): iface_cmdline = re.findall( r"%s=%s" % (driver_opt, driver_dict[driver_opt]), ret.stdout_text) cmd_opt = {} for opt in iface_cmdline[0].split(','): tmp = opt.rsplit("=") cmd_opt[tmp[0]] = tmp[1] logging.debug("Command line options %s", cmd_opt) for driver_opt in list(driver_dict.keys()): if (driver_opt not in cmd_opt or not cmd_opt[driver_opt] == driver_dict[driver_opt]): test.fail("Can't see option '%s=%s' in qemu-kvm " " command line" % (driver_opt, driver_dict[driver_opt])) logging.info("Find %s=%s in qemu-kvm command line" % (driver_opt, driver_dict[driver_opt])) if test_backend: guest_pid = ret.stdout_text.rsplit()[1] cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["tap"]) cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["vhost"])
def run(test, params, env): """ Test pool command:virsh pool_autostart 1) Define a given type pool 2) Mark pool as autostart 3) Restart libvirtd and check pool 4) Destroy the pool 5) Unmark pool as autostart 6) Repeate step(3) """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") ip_protocal = params.get("ip_protocal", "ipv4") pool_ref = params.get("pool_ref", "name") pool_uuid = params.get("pool_uuid", "") invalid_source_path = params.get("invalid_source_path", "") status_error = "yes" == params.get("status_error", "no") readonly_mode = "yes" == params.get("readonly_mode", "no") pre_def_pool = "yes" == params.get("pre_def_pool", "yes") disk_type = params.get("disk_type", "") vg_name = params.get("vg_name", "") lv_name = params.get("lv_name", "") update_policy = params.get("update_policy") # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True if pool_target is "": pool_target = os.path.join(test.tmpdir, pool_target) # The file for dumped pool xml p_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): test.fail("Pool %s already exist" % pool_name) def check_pool(pool_name, pool_type, checkpoint, expect_value="", expect_error=False): """ Check the pool after autostart it :param pool_name: Name of the pool. :param pool_type: Type of the pool. :param checkpoint: Which part for checking. :param expect_value: Expected value. :param expect_error: Boolen value, expect command success or fail """ libvirt_pool = libvirt_storage.StoragePool() virsh.pool_list(option="--all", debug=True) if checkpoint == 'State': actual_value = libvirt_pool.pool_state(pool_name) if checkpoint == 'Autostart': actual_value = libvirt_pool.pool_autostart(pool_name) if actual_value != expect_value: if not expect_error: if checkpoint == 'State' and pool_type in ("dir", "scsi"): debug_msg = "Dir pool should be always active when libvirtd restart. " debug_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610" logging.debug(debug_msg) else: test.fail("Pool %s isn't %s as expected" % (checkpoint, expect_value)) else: logging.debug("Pool %s is %s as expected", checkpoint, actual_value) def change_source_path(new_path, update_policy="set"): n_poolxml = pool_xml.PoolXML() n_poolxml = n_poolxml.new_from_dumpxml(pool_name) s_xml = n_poolxml.get_source() s_xml.device_path = new_path if update_policy == "set": n_poolxml.set_source(s_xml) elif update_policy == "add": n_poolxml.add_source("device", {"path": new_path}) else: test.error("Unsupported policy type") logging.debug("After change_source_path:\n%s" % open(n_poolxml.xml).read()) return n_poolxml # Run Testcase pvt = utlv.PoolVolumeTest(test, params) kwargs = { 'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal, 'emulated_image': "emulated-image", 'pool_target': pool_target } params.update(kwargs) pool = pool_name clean_mount = False new_device = None try: if pre_def_pool: # Step(1) # Pool define pvt.pre_pool(**params) # Remove the partition for disk pool # For sometimes the partition will cause pool start failed if pool_type == "disk": virsh.pool_build(pool_name, "--overwrite", debug=True) # Get pool uuid: if pool_ref == "uuid" and not pool_uuid: pool = pool_ins.get_pool_uuid(pool_name) # Setup logical block device # Change pool source path # Undefine pool # Define pool with new xml # Start pool if update_policy: new_device = utlv.setup_or_cleanup_iscsi(True) lv_utils.vg_create(vg_name, new_device) new_device = utlv.create_local_disk(disk_type, size="0.5", vgname=vg_name, lvname=lv_name) new_path = new_device if invalid_source_path: new_path = invalid_source_path if pool_type == "fs": utlv.mkfs(new_device, source_format) n_poolxml = change_source_path(new_path, update_policy) p_xml = n_poolxml.xml if not virsh.pool_undefine(pool_name): test.fail("Undefine pool %s failed" % pool_name) if not virsh.pool_define(p_xml): test.fail("Define pool %s from %s failed" % (pool_name, p_xml)) logging.debug("Start pool %s" % pool_name) result = virsh.pool_start(pool_name, ignore_status=True, debug=True) utlv.check_exit_status(result, status_error) # Mount a valid fs to pool target if pool_type == "fs": source_list = [] mnt_cmd = "" pool_target = n_poolxml.target_path if invalid_source_path: source_list.append(new_device) else: s_devices = n_poolxml.xmltreefile.findall( "//source/device") for dev in s_devices: source_list.append(dev.get('path')) try: for src in source_list: mnt_cmd = "mount %s %s" % (src, pool_target) if not process.system(mnt_cmd, shell=True): clean_mount = True except process.CmdError: test.error("Failed to run %s" % mnt_cmd) # Step(2) # Pool autostart logging.debug("Try to mark pool %s as autostart" % pool_name) result = virsh.pool_autostart(pool, readonly=ro_flag, ignore_status=True, debug=True) if not pre_def_pool: utlv.check_exit_status(result, status_error) if not result.exit_status: check_pool(pool_name, pool_type, checkpoint='Autostart', expect_value="yes", expect_error=status_error) # Step(3) # Restart libvirtd and check pool status logging.info("Try to restart libvirtd") # Remove the autostart management file cmd = ("rm -rf /var/run/libvirt/storage/autostarted") process.run(cmd, ignore_status=True, shell=True) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() check_pool(pool_name, pool_type, checkpoint="State", expect_value="active", expect_error=status_error) # Step(4) # Pool destroy if pool_ins.is_pool_active(pool_name): virsh.pool_destroy(pool_name) logging.debug("Pool %s destroyed" % pool_name) # Step(5) # Pool autostart disable logging.debug("Try to unmark pool %s as autostart" % pool_name) result = virsh.pool_autostart(pool, extra="--disable", debug=True, ignore_status=True) if not pre_def_pool: utlv.check_exit_status(result, status_error) if not result.exit_status: check_pool(pool_name, pool_type, checkpoint='Autostart', expect_value="no", expect_error=status_error) # Repeat step (3) logging.debug("Try to restart libvirtd") libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() check_pool(pool_name, pool_type, checkpoint='State', expect_value="inactive", expect_error=status_error) finally: # Clean up logging.debug("Try to clean up env") try: if clean_mount is True: for src in source_list: process.system("umount %s" % pool_target) if pre_def_pool: pvt.cleanup_pool(**params) if new_device: utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) utlv.setup_or_cleanup_iscsi(False) if os.path.exists(p_xml): os.remove(p_xml) except exceptions.TestFail as details: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() logging.error(str(details))
def setUp(self): """ Use distro provided bonnie++ bin if not available Build bonnie++ from below Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-1.03e.tgz """ self.fstype = self.params.get('fs', default='') self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default='/mnt') self.uid_to_use = self.params.get('uid-to-use', default=getpass.getuser()) self.number_to_stat = self.params.get('number-to-stat', default=2048) self.data_size = self.params.get('data_size_to_pass', default=0) smm = SoftwareManager() # Install the package from web deps = ['gcc', 'make'] if distro.detect().name == 'Ubuntu': deps.extend(['g++']) else: deps.extend(['gcc-c++']) if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs not supported with RHEL 7.4 onwards") elif distro.detect().name == 'Ubuntu': deps.extend(['btrfs-tools']) if raid_needed: deps.append('mdadm') for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test" % package) if process.system("which bonnie++", ignore_status=True): tarball = self.fetch_asset('http://www.coker.com.au/bonnie++/' 'bonnie++-1.03e.tgz', expire='7d') archive.extract(tarball, self.teststmpdir) self.source = os.path.join(self.teststmpdir, os.path.basename( tarball.split('.tgz')[0])) os.chdir(self.source) process.run('./configure') build.make(self.source) build.make(self.source, extra_args='install') if not os.path.exists(self.dir): os.mkdir(self.dir) self.raid_name = '/dev/md/sraid' self.vgname = 'avocado_vg' self.lvname = 'avocado_lv' self.err_mesg = [] self.target = self.disk self.lv_disk = self.disk self.part_obj = Partition(self.disk, mountpoint=self.dir) self.sw_raid = softwareraid.SoftwareRaid(self.raid_name, '0', self.disk.split(), '1.2') dmesg.clear_dmesg() if self.disk is not None: self.pre_cleanup() if self.disk in disk.get_disks(): if raid_needed: self.create_raid(self.disk, self.raid_name) self.raid_create = True self.target = self.raid_name if lv_needed: self.lv_disk = self.target self.target = self.create_lv(self.target) self.lv_create = True if self.fstype: self.create_fs(self.target, self.dir, self.fstype) self.fs_create = True else: self.cancel("Missing disk %s in OS" % self.disk) else: self.cancel("please provide valid disk")
def test(self): args = self.params.get('arg', default='') args += ' c' process.system("%s ' run ' %s" % (os.path.join( self.srcdir, 'interbench'), args), sudo=True)
def is_fs_deleted(): cmd = "wipefs -af %s" % l_disk process.system(cmd, shell=True, ignore_status=True) if disk.fs_exists(l_disk): return False return True
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def prepare_pxe_boot(): """ Prepare tftp server and pxe boot files """ pkg_list = ["syslinux", "tftp-server", "tftp", "ipxe-roms-qemu", "wget"] # Try to install required packages if not utils_package.package_install(pkg_list): test.error("Failed ot install required packages") boot_initrd = params.get("boot_initrd", "EXAMPLE_INITRD") boot_vmlinuz = params.get("boot_vmlinuz", "EXAMPLE_VMLINUZ") if boot_initrd.count("EXAMPLE") or boot_vmlinuz.count("EXAMPLE"): test.cancel("Please provide initrd/vmlinuz URL") # Download pxe boot images process.system("wget %s -O %s/initrd.img" % (boot_initrd, tftp_root)) process.system("wget %s -O %s/vmlinuz" % (boot_vmlinuz, tftp_root)) process.system("cp -f /usr/share/syslinux/pxelinux.0 {0};" " mkdir -m 777 -p {0}/pxelinux.cfg".format(tftp_root), shell=True) pxe_file = "%s/pxelinux.cfg/default" % tftp_root boot_txt = """ DISPLAY boot.txt DEFAULT rhel LABEL rhel kernel vmlinuz append initrd=initrd.img PROMPT 1 TIMEOUT 3""" with open(pxe_file, 'w') as p_file: p_file.write(boot_txt) def modify_iface_xml(sync=True): """ Modify interface xml options :param sync: whether or not sync vmxml after the iface xml modified, default to be True """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if pxe_boot: # Config boot console for pxe boot osxml = vm_xml.VMOSXML() osxml.type = vmxml.os.type osxml.arch = vmxml.os.arch osxml.machine = vmxml.os.machine osxml.loader = "/usr/share/seabios/bios.bin" osxml.bios_useserial = "yes" osxml.bios_reboot_timeout = "-1" osxml.boots = ['network'] del vmxml.os vmxml.os = osxml xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if not sync: params.setdefault('original_iface', vmxml.devices[iface_index]) iface_bandwidth = {} iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) if iface_inbound: iface_bandwidth["inbound"] = iface_inbound if iface_outbound: iface_bandwidth["outbound"] = iface_outbound if iface_bandwidth: bandwidth = iface.new_bandwidth(**iface_bandwidth) iface.bandwidth = bandwidth iface_type = params.get("iface_type", "network") iface.type_name = iface_type source = ast.literal_eval(iface_source) if not source: source = {"network": "default"} net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and 'dev' in source and source['dev'] not in net_ifs): logging.warn("Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] del iface.source iface.source = source if iface_model: iface.model = iface_model if iface_rom: iface.rom = eval(iface_rom) if iface_boot: vmxml.remove_all_boots() iface.boot = iface_boot logging.debug("New interface xml file: %s", iface) if sync: vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() else: return iface def run_dnsmasq_default_test(key, value=None, exists=True, name="default"): """ Test dnsmasq configuration. :param key: key in conf file to check :param value: value in conf file to check :param exists: check the key:value exist or not :param name: The name of conf file """ conf_file = "/var/lib/libvirt/dnsmasq/%s.conf" % name if not os.path.exists(conf_file): test.cancel("Can't find %s.conf file" % name) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if value: config = "%s=%s" % (key, value) else: config = key if not configs.count(config): if exists: test.fail("Can't find %s=%s in configuration file" % (key, value)) else: if not exists: test.fail("Found %s=%s in configuration file" % (key, value)) def run_dnsmasq_addnhosts_test(hostip, hostnames): """ Test host ip and names configuration """ conf_file = "/var/lib/libvirt/dnsmasq/default.addnhosts" hosts_re = ".*".join(hostnames) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not re.search(r"%s.*%s" % (hostip, hosts_re), configs, re.M): test.fail("Can't find '%s' in configuration file" % hostip) def run_dnsmasq_host_test(iface_mac, guest_ip, guest_name): """ Test host name and ip configuration for dnsmasq """ conf_file = "/var/lib/libvirt/dnsmasq/default.hostsfile" config = "%s,%s,%s" % (iface_mac, guest_ip, guest_name) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not configs.count(config): test.fail("Can't find host configuration in file %s" % conf_file) def check_class_rules(ifname, rule_id, bandwidth): """ Check bandwidth settings via 'tc class' output """ cmd = "tc class show dev %s" % ifname class_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth class output: %s", class_output) class_pattern = (r"class htb %s.*rate (\d+)(K?M?)bit ceil" " (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" % rule_id) se = re.search(class_pattern, class_output, re.M) if not se: test.fail("Can't find outbound setting for htb %s" % rule_id) logging.debug("bandwidth from tc output:%s" % str(se.groups())) rate = None if "floor" in bandwidth: rate = int(bandwidth["floor"]) * 8 elif "average" in bandwidth: rate = int(bandwidth["average"]) * 8 if rate: if se.group(2) == 'M': rate_check = int(se.group(1)) * 1000 else: rate_check = int(se.group(1)) assert rate_check == rate if "peak" in bandwidth: if se.group(4) == 'M': ceil_check = int(se.group(3)) * 1000 else: ceil_check = int(se.group(3)) assert ceil_check == int(bandwidth["peak"]) * 8 if "burst" in bandwidth: if se.group(6) == 'M': tc_burst = int(se.group(5)) * 1024 else: tc_burst = int(se.group(5)) assert tc_burst == int(bandwidth["burst"]) def check_filter_rules(ifname, bandwidth, expect_none=False): """ Check bandwidth settings via 'tc filter' output :param ifname: name of iface to be checked :param bandwidth: bandwidth to be match with :param expect_none: whether or not expect nothing in output, default to be False :return: if expect nothing from the output, return True if the output is empty, else return False """ cmd = "tc -d filter show dev %s parent ffff:" % ifname filter_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth filter output: %s", filter_output) if expect_none: return not filter_output.strip() if not filter_output.count("filter protocol all pref"): test.fail("Can't find 'protocol all' settings in filter rules") filter_pattern = ".*police.*rate (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" se = re.search(r"%s" % filter_pattern, filter_output, re.M) if not se: test.fail("Can't find any filter policy") logging.debug("bandwidth from tc output:%s" % str(se.groups())) logging.debug("bandwidth from setting:%s" % str(bandwidth)) if "average" in bandwidth: if se.group(2) == 'M': tc_average = int(se.group(1)) * 1000 else: tc_average = int(se.group(1)) assert tc_average == int(bandwidth["average"]) * 8 if "burst" in bandwidth: if se.group(4) == 'M': tc_burst = int(se.group(3)) * 1024 else: tc_burst = int(se.group(3)) assert tc_burst == int(bandwidth["burst"]) def check_host_routes(): """ Check network routes on host """ for rt in routes: try: route = ast.literal_eval(rt) addr = "%s/%s" % (route["address"], route["prefix"]) cmd = "ip route list %s" % addr if "family" in route and route["family"] == "ipv6": cmd = "ip -6 route list %s" % addr output = to_text(process.system_output(cmd)) match_obj = re.search(r"via (\S+).*metric (\d+)", output) if match_obj: via_addr = match_obj.group(1) metric = match_obj.group(2) logging.debug("via address %s for %s, matric is %s" % (via_addr, addr, metric)) assert via_addr == route["gateway"] if "metric" in route: assert metric == route["metric"] except KeyError: pass def run_bandwidth_test(check_net=False, check_iface=False): """ Test bandwidth option for network or interface by tc command. """ iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) net_inbound = ast.literal_eval(net_bandwidth_inbound) net_outbound = ast.literal_eval(net_bandwidth_outbound) net_bridge_name = ast.literal_eval(net_bridge)["name"] iface_name = libvirt.get_ifname_host(vm_name, iface_mac) try: if check_net and net_inbound: # Check qdisc rules cmd = "tc -d qdisc show dev %s" % net_bridge_name qdisc_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth qdisc output: %s", qdisc_output) if not qdisc_output.count("qdisc ingress ffff:"): test.fail("Can't find ingress setting") check_class_rules(net_bridge_name, "1:1", {"average": net_inbound["average"], "peak": net_inbound["peak"]}) check_class_rules(net_bridge_name, "1:2", net_inbound) # Check filter rules on bridge interface if check_net and net_outbound: check_filter_rules(net_bridge_name, net_outbound) # Check class rules on interface inbound settings if check_iface and iface_inbound: check_class_rules(iface_name, "1:1", {'average': iface_inbound['average'], 'peak': iface_inbound['peak'], 'burst': iface_inbound['burst']}) if "floor" in iface_inbound: if not libvirt_version.version_compare(1, 0, 1): test.cancel("Not supported Qos options 'floor'") check_class_rules(net_bridge_name, "1:3", {'floor': iface_inbound["floor"]}) # Check filter rules on interface outbound settings if check_iface and iface_outbound: check_filter_rules(iface_name, iface_outbound) except AssertionError: stacktrace.log_exc_info(sys.exc_info()) test.fail("Failed to check network bandwidth") def check_name_ip(session): """ Check dns resolving on guest """ # Check if bind-utils is installed if "ubuntu" in vm.get_distro().lower(): pkg = "bind9" else: pkg = "bind-utils" if not utils_package.package_install(pkg, session): test.error("Failed to install bind-utils on guest") # Run host command to check if hostname can be resolved if not guest_ipv4 and not guest_ipv6: test.fail("No ip address found from parameters") guest_ip = guest_ipv4 if guest_ipv4 else guest_ipv6 cmd = "host %s | grep %s" % (guest_name, guest_ip) if session.cmd_status(cmd): test.fail("Can't resolve name %s on guest" % guest_name) def check_ipt_rules(check_ipv4=True, check_ipv6=False): """ Check iptables for network/interface """ br_name = ast.literal_eval(net_bridge)["name"] net_forward = ast.literal_eval(params.get("net_forward", "{}")) net_ipv4 = params.get("net_ipv4") net_ipv6 = params.get("net_ipv6") net_dev_in = "" net_dev_out = "" if "dev" in net_forward: net_dev_in = " -i %s" % net_forward["dev"] net_dev_out = " -o %s" % net_forward["dev"] if libvirt_version.version_compare(5, 1, 0): input_chain = "LIBVIRT_INP" output_chain = "LIBVIRT_OUT" postrouting_chain = "LIBVIRT_PRT" forward_filter = "LIBVIRT_FWX" forward_in = "LIBVIRT_FWI" forward_out = "LIBVIRT_FWO" else: input_chain = "INPUT" output_chain = "OUTPUT" postrouting_chain = "POSTROUTING" forward_filter = "FORWARD" forward_in = "FORWARD" forward_out = "FORWARD" ipt_rules = ( "%s -i %s -p udp -m udp --dport 53 -j ACCEPT" % (input_chain, br_name), "%s -i %s -p tcp -m tcp --dport 53 -j ACCEPT" % (input_chain, br_name), "{0} -i {1} -o {1} -j ACCEPT".format(forward_filter, br_name), "%s -o %s -j REJECT --reject-with icmp" % (forward_in, br_name), "%s -i %s -j REJECT --reject-with icmp" % (forward_out, br_name)) if check_ipv4: ipv4_rules = list(ipt_rules) ipv4_rules.extend( ["%s -i %s -p udp -m udp --dport 67 -j ACCEPT" % (input_chain, br_name), "%s -i %s -p tcp -m tcp --dport 67 -j ACCEPT" % (input_chain, br_name), "%s -o %s -p udp -m udp --dport 68 -j ACCEPT" % (output_chain, br_name), "%s -o %s -p udp -m udp --dport 68 " "-j CHECKSUM --checksum-fill" % (postrouting_chain, br_name)]) ctr_rule = "" nat_rules = [] if "mode" in net_forward and net_forward["mode"] == "nat": nat_port = ast.literal_eval(params.get("nat_port")) p_start = nat_port["start"] p_end = nat_port["end"] ctr_rule = " -m .* RELATED,ESTABLISHED" nat_rules = [("{0} -s {1} ! -d {1} -p tcp -j MASQUERADE" " --to-ports {2}-{3}".format(postrouting_chain, net_ipv4, p_start, p_end)), ("{0} -s {1} ! -d {1} -p udp -j MASQUERADE" " --to-ports {2}-{3}".format(postrouting_chain, net_ipv4, p_start, p_end)), ("{0} -s {1} ! -d {1}" " -j MASQUERADE".format(postrouting_chain, net_ipv4))] if nat_rules: ipv4_rules.extend(nat_rules) if (net_ipv4 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("%s -d %s%s -o %s%s -j ACCEPT" % (forward_in, net_ipv4, net_dev_in, br_name, ctr_rule)), ("%s -s %s -i %s%s -j ACCEPT" % (forward_out, net_ipv4, br_name, net_dev_out))] ipv4_rules.extend(rules) output = to_text(process.system_output('iptables-save')) logging.debug("iptables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv4, br_name), output, re.M): test.fail("Find iptable rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('iptables-save')) if re.search(r"%s|%s" % (net_ipv4, br_name), output_again, re.M): test.fail("Find iptable rule for open mode after restart " "libvirtd") else: logging.info("Can't find iptable rule for open mode as expected") else: for ipt in ipv4_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find iptable rule:\n%s" % ipt) return ipv4_rules if check_ipv6: ipv6_rules = list(ipt_rules) ipt6_rules.extend([ ("INPUT -i %s -p udp -m udp --dport 547 -j ACCEPT" % br_name)]) if (net_ipv6 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("%s -d %s%s -o %s -j ACCEPT" % (forward_in, net_ipv6, net_dev_in, br_name)), ("%s -s %s -i %s%s -j ACCEPT" % (forward_out, net_ipv6, br_name, net_dev_out))] ipv6_rules.extend(rules) output = to_text(process.system_output("ip6tables-save")) logging.debug("ip6tables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv6, br_name), output, re.M): test.fail("Find ip6table rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('ip6tables-save')) if re.search(r"%s|%s" % (net_ipv6, br_name), output_again, re.M): test.fail("Find ip6table rule for open mode after restart " "libvirtd") else: for ipt in ipv6_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find ip6table rule:\n%s" % ipt) return ipv6_rules def run_ip_test(session, ip_ver): """ Check iptables on host and ipv6 address on guest """ if ip_ver == "ipv6": # Clean up iptables rules for guest to get ipv6 address session.cmd_status("ip6tables -F") # It may take some time to get the ip address def get_ip_func(): return utils_net.get_guest_ip_addr(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) if not get_ip_func(): utils_net.restart_guest_network(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) vm_ip = get_ip_func() logging.debug("Guest has ip: %s", vm_ip) if not vm_ip: test.fail("Can't find ip address on guest") ip_gateway = net_ip_address if ip_ver == "ipv6": ip_gateway = net_ipv6_address # Cleanup ip6talbes on host for ping6 test process.system("ip6tables -F") if ip_gateway and not routes: ping_s, _ = ping(dest=ip_gateway, count=5, timeout=10, session=session) if ping_s: test.fail("Failed to ping gateway address: %s" % ip_gateway) def run_guest_libvirt(session): """ Check guest libvirt network """ # Try to install required packages if "ubuntu" in vm.get_distro().lower(): pkg = "libvirt-bin" else: pkg = "libvirt" if not utils_package.package_install(pkg, session): test.error("Failed to install libvirt package on guest") # Try to load tun module first session.cmd("lsmod | grep tun || modprobe tun") # Check network state on guest cmd = ("service libvirtd restart; virsh net-info default" " | grep 'Active:.*yes'") if session.cmd_status(cmd): test.fail("'default' network isn't in active state") # Try to destroy&start default network on guest for opt in ['net-destroy', 'net-start']: cmd = "virsh %s default" % opt status, output = session.cmd_status_output(cmd) logging.debug("Run %s on guest exit %s, output %s" % (cmd, status, output)) if status: test.fail(output) if not utils_package.package_remove("libvirt*", session): test.error("Failed to remove libvirt packages on guest") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") restart_error = "yes" == params.get("restart_error", "no") # network specific attributes. net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") net_domain = params.get("net_domain") net_ip_address = params.get("net_ip_address") net_ipv6_address = params.get("net_ipv6_address") net_dns_forward = params.get("net_dns_forward") net_dns_txt = params.get("net_dns_txt") net_dns_srv = params.get("net_dns_srv") net_dns_hostip = params.get("net_dns_hostip") net_dns_hostnames = params.get("net_dns_hostnames", "").split() dhcp_start_ipv4 = params.get("dhcp_start_ipv4") dhcp_end_ipv4 = params.get("dhcp_end_ipv4") dhcp_start_ipv6 = params.get("dhcp_start_ipv6") dhcp_end_ipv6 = params.get("dhcp_end_ipv6") guest_name = params.get("guest_name") guest_ipv4 = params.get("guest_ipv4") guest_ipv6 = params.get("guest_ipv6") tftp_root = params.get("tftp_root") pxe_boot = "yes" == params.get("pxe_boot", "no") routes = params.get("routes", "").split() net_bandwidth_inbound = params.get("net_bandwidth_inbound", "{}") net_bandwidth_outbound = params.get("net_bandwidth_outbound", "{}") iface_bandwidth_inbound = params.get("iface_bandwidth_inbound", "{}") iface_bandwidth_outbound = params.get("iface_bandwidth_outbound", "{}") iface_num = params.get("iface_num", "1") iface_source = params.get("iface_source", "{}") iface_rom = params.get("iface_rom") iface_boot = params.get("iface_boot") iface_model = params.get("iface_model") multiple_guests = params.get("multiple_guests") create_network = "yes" == params.get("create_network", "no") attach_iface = "yes" == params.get("attach_iface", "no") serial_login = "******" == params.get("serial_login", "no") change_iface_option = "yes" == params.get("change_iface_option", "no") test_bridge = "yes" == params.get("test_bridge", "no") test_dnsmasq = "yes" == params.get("test_dnsmasq", "no") test_dhcp_range = "yes" == params.get("test_dhcp_range", "no") test_dns_host = "yes" == params.get("test_dns_host", "no") test_qos_bandwidth = "yes" == params.get("test_qos_bandwidth", "no") test_pg_bandwidth = "yes" == params.get("test_portgroup_bandwidth", "no") test_qos_remove = "yes" == params.get("test_qos_remove", "no") test_ipv4_address = "yes" == params.get("test_ipv4_address", "no") test_ipv6_address = "yes" == params.get("test_ipv6_address", "no") test_guest_libvirt = "yes" == params.get("test_guest_libvirt", "no") net_no_bridge = "yes" == params.get("no_bridge", "no") net_no_mac = "yes" == params.get("no_mac", "no") net_no_ip = "yes" == params.get("no_ip", "no") net_with_dev = "yes" == params.get("with_dev", "no") update_device = 'yes' == params.get('update_device', 'no') remove_bandwidth = 'yes' == params.get('remove_bandwidth', 'no') loop = int(params.get('loop', 0)) username = params.get("username") password = params.get("password") forward = ast.literal_eval(params.get("net_forward", "{}")) boot_failure = "yes" == params.get("boot_failure", "no") ipt_rules = [] ipt6_rules = [] # Destroy VM first if vm.is_alive() and not update_device: vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name) params["guest_mac"] = iface_mac vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vms_list = [] if "floor" in ast.literal_eval(iface_bandwidth_inbound): if not libvirt_version.version_compare(1, 0, 1): test.cancel("Not supported Qos options 'floor'") # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = to_text(process.system_output(sysctl_cmd + ' -n')) if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + '=2') # Build the xml and run test. try: if test_dnsmasq: # Check the settings before modifying network xml if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed", exists=False) run_dnsmasq_default_test("local", "//", exists=False) if net_domain: run_dnsmasq_default_test("domain", net_domain, exists=False) run_dnsmasq_default_test("expand-hosts", exists=False) # Prepare pxe boot directory if pxe_boot: prepare_pxe_boot() # Edit the network xml or create a new one. if create_network: net_ifs = utils_net.get_net_if(state="UP") # Check forward device is valid or not, # if it's not in host interface list, try to set # forward device to first active interface of host if ('mode' in forward and forward['mode'] in ['passthrough', 'private', 'bridge', 'macvtap'] and 'dev' in forward and forward['dev'] not in net_ifs): logging.warn("Forward device %s is not a interface" " of host, reset to %s", forward['dev'], net_ifs[0]) forward['dev'] = net_ifs[0] params["net_forward"] = str(forward) forward_iface = params.get("forward_iface") if forward_iface: interface = [x for x in forward_iface.split()] # The guest will use first interface of the list, # check if it's valid or not, if it's not in host # interface list, try to set forward interface to # first active interface of host. if interface[0] not in net_ifs: logging.warn("Forward interface %s is not a " " interface of host, reset to %s", interface[0], net_ifs[0]) interface[0] = net_ifs[0] params["forward_iface"] = " ".join(interface) netxml = libvirt.create_net_xml(net_name, params) if "mode" in forward and forward["mode"] == "open": netxml.mac = utils_net.generate_mac_address_simple() try: if net_no_bridge: netxml.del_bridge() if net_no_ip: netxml.del_ip() netxml.del_ip() if net_no_mac: netxml.del_mac() except xcepts.LibvirtXMLNotFoundError: pass if net_with_dev: net_forward = netxml.forward net_forward.update({"dev": net_ifs[0]}) netxml.forward = net_forward logging.info("netxml before define is %s", netxml) try: netxml.sync() except xcepts.LibvirtXMLError as details: logging.info(str(details)) if define_error: return else: test.fail("Failed to define network") # Check open mode network xml if "mode" in forward and forward["mode"] == "open": netxml_new = NetworkXML.new_from_net_dumpxml(net_name) logging.info("netxml after define is %s", netxml_new) try: if net_no_bridge: net_bridge = str(netxml_new.bridge) if net_no_mac: netxml_new.mac except xcepts.LibvirtXMLNotFoundError as details: test.fail("Failed to check %s xml: %s" % (net_name, details)) logging.info("mac/bridge still exist even if removed before define") # Edit the interface xml. if change_iface_option: try: if update_device: updated_iface = modify_iface_xml(sync=False) virsh.update_device(vm_name, updated_iface.xml, ignore_status=False, debug=True) else: modify_iface_xml() except xcepts.LibvirtXMLError as details: logging.info(str(details)) if define_error: if not str(details).count("Failed to define"): test.fail("VM sync failed msg not expected") return else: test.fail("Failed to sync VM") # Attach interface if needed if attach_iface: iface_type = params.get("iface_type", "network") for i in range(int(iface_num)): logging.info("Try to attach interface loop %s" % i) options = ("%s %s --model %s --config" % (iface_type, net_name, iface_model)) ret = virsh.attach_interface(vm_name, options, ignore_status=True) if ret.exit_status: logging.error("Command output %s" % ret.stdout.strip()) test.fail("Failed to attach-interface") if multiple_guests: # Clone more vms for testing for i in range(int(multiple_guests)): guest_name = "%s_%s" % (vm_name, i) timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout) vms_list.append(vm.clone(guest_name)) if test_bridge: bridge = ast.literal_eval(net_bridge) br_if = utils_net.Interface(bridge['name']) if not br_if.is_up(): test.fail("Bridge interface isn't up") if test_dnsmasq: # Check dnsmasq process dnsmasq_cmd = to_text(process.system_output("ps -aux|grep dnsmasq", shell=True)) logging.debug(dnsmasq_cmd) if not re.search("dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/%s.conf" % net_name, dnsmasq_cmd): test.fail("Can not find dnsmasq process or the process is not correct") # Check the settings in dnsmasq config file if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed") run_dnsmasq_default_test("local", "//") if net_domain: run_dnsmasq_default_test("domain", net_domain) run_dnsmasq_default_test("expand-hosts") if net_bridge: bridge = ast.literal_eval(net_bridge) run_dnsmasq_default_test("interface", bridge['name'], name=net_name) if 'stp' in bridge and bridge['stp'] == 'on': if 'delay' in bridge and bridge['delay'] != '0': # network xml forward delay value in seconds, while on # host, check by ip command, the value is in second*100 br_delay = int(bridge['delay'])*100 logging.debug("Expect forward_delay is %s ms" % br_delay) cmd = ("ip -d link sh %s | grep 'bridge forward_delay'" % bridge['name']) out = to_text(process.system_output( cmd, shell=True, ignore_status=False)) logging.debug("bridge statistics output: %s", out) pattern = (r"\s*bridge forward_delay\s+(\d+)") match_obj = re.search(pattern, out, re.M) if not match_obj: test.fail("Can't see forward delay messages from command") elif int(match_obj.group(1)) != br_delay: test.fail("Foward delay setting can't take effect") else: logging.debug("Foward delay set successfully!") if dhcp_start_ipv4 and dhcp_end_ipv4: run_dnsmasq_default_test("dhcp-range", "%s,%s" % (dhcp_start_ipv4, dhcp_end_ipv4), name=net_name) if dhcp_start_ipv6 and dhcp_end_ipv6: run_dnsmasq_default_test("dhcp-range", "%s,%s,64" % (dhcp_start_ipv6, dhcp_end_ipv6), name=net_name) if guest_name and guest_ipv4: run_dnsmasq_host_test(iface_mac, guest_ipv4, guest_name) # check the left part in dnsmasq conf run_dnsmasq_default_test("strict-order", name=net_name) run_dnsmasq_default_test("pid-file", "/var/run/libvirt/network/%s.pid" % net_name, name=net_name) run_dnsmasq_default_test("except-interface", "lo", name=net_name) run_dnsmasq_default_test("bind-dynamic", name=net_name) run_dnsmasq_default_test("dhcp-no-override", name=net_name) if dhcp_start_ipv6 and dhcp_start_ipv4: run_dnsmasq_default_test("dhcp-lease-max", "493", name=net_name) else: range_num = int(params.get("dhcp_range", "252")) run_dnsmasq_default_test("dhcp-lease-max", str(range_num+1), name=net_name) run_dnsmasq_default_test("dhcp-hostsfile", "/var/lib/libvirt/dnsmasq/%s.hostsfile" % net_name, name=net_name) run_dnsmasq_default_test("addn-hosts", "/var/lib/libvirt/dnsmasq/%s.addnhosts" % net_name, name=net_name) if dhcp_start_ipv6: run_dnsmasq_default_test("enable-ra", name=net_name) if test_dns_host: if net_dns_txt: dns_txt = ast.literal_eval(net_dns_txt) run_dnsmasq_default_test("txt-record", "%s,%s" % (dns_txt["name"], dns_txt["value"])) if net_dns_srv: dns_srv = ast.literal_eval(net_dns_srv) run_dnsmasq_default_test("srv-host", "_%s._%s.%s,%s,%s,%s,%s" % (dns_srv["service"], dns_srv["protocol"], dns_srv["domain"], dns_srv["target"], dns_srv["port"], dns_srv["priority"], dns_srv["weight"])) if net_dns_hostip and net_dns_hostnames: run_dnsmasq_addnhosts_test(net_dns_hostip, net_dns_hostnames) # Run bandwidth test for network if test_qos_bandwidth and not update_device: run_bandwidth_test(check_net=True) # If to remove bandwidth from iface, # update iface xml to the original one if remove_bandwidth: ori_iface = params['original_iface'] logging.debug(ori_iface) virsh.update_device(vm_name, ori_iface.xml, ignore_status=False, debug=True) # Check routes if needed if routes: check_host_routes() try: # Start the VM. if not update_device: vm.start() if start_error: test.fail("VM started unexpectedly") if pxe_boot: # Just check network boot messages here try: vm.serial_console.read_until_output_matches( ["Loading vmlinuz", "Loading initrd.img"], utils_misc.strip_console_codes) output = vm.serial_console.get_stripped_output() logging.debug("Boot messages: %s", output) except ExpectTimeoutError as details: if boot_failure: logging.info("Fail to boot from pxe as expected") else: test.fail("Fail to boot from pxe") else: if serial_login: session = vm.wait_for_serial_login(username=username, password=password) else: session = vm.wait_for_login() if test_dhcp_range: dhcp_range = int(params.get("dhcp_range", "252")) utils_net.restart_guest_network(session, iface_mac) vm_ip = utils_net.get_guest_ip_addr(session, iface_mac) logging.debug("Guest has ip: %s", vm_ip) if not vm_ip and dhcp_range: test.fail("Guest has invalid ip address") elif vm_ip and not dhcp_range: test.fail("Guest has ip address: %s" % vm_ip) dhcp_range = dhcp_range - 1 for vms in vms_list: # Start other VMs. vms.start() sess = vms.wait_for_serial_login() vms_mac = vms.get_virsh_mac_address() # restart guest network to get ip addr utils_net.restart_guest_network(sess, vms_mac) vms_ip = utils_net.get_guest_ip_addr(sess, vms_mac) if not vms_ip and dhcp_range: test.fail("Guest has invalid ip address") elif vms_ip and not dhcp_range: # Get IP address on guest should return Null # if it exceeds the dhcp range test.fail("Guest has ip address: %s" % vms_ip) dhcp_range = dhcp_range - 1 if vms_ip: ping_s, _ = ping(dest=vm_ip, count=5, timeout=10, session=sess) if ping_s: test.fail("Failed to ping, src: %s, " "dst: %s" % (vms_ip, vm_ip)) sess.close() # Check dnsmasq settings if take affect in guest if guest_ipv4: check_name_ip(session) # Run bandwidth test for interface if test_qos_bandwidth: run_bandwidth_test(check_iface=True) # Run bandwidth test for portgroup if test_pg_bandwidth: pg_bandwidth_inbound = params.get( "portgroup_bandwidth_inbound", "").split() pg_bandwidth_outbound = params.get( "portgroup_bandwidth_outbound", "").split() pg_name = params.get("portgroup_name", "").split() pg_default = params.get("portgroup_default", "").split() iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) iface_name = libvirt.get_ifname_host(vm_name, iface_mac) if_source = ast.literal_eval(iface_source) if "portgroup" in if_source: pg = if_source["portgroup"] else: pg = "default" for (name, df, bw_ib, bw_ob) in zip(pg_name, pg_default, pg_bandwidth_inbound, pg_bandwidth_outbound): if pg == name: inbound = ast.literal_eval(bw_ib) outbound = ast.literal_eval(bw_ob) elif pg == "default" and df == "yes": inbound = ast.literal_eval(bw_ib) outbound = ast.literal_eval(bw_ob) else: continue # Interface bandwidth settings will # overwriting portgroup settings if iface_inbound: inbound = iface_inbound if iface_outbound: outbound = iface_outbound check_class_rules(iface_name, "1:1", inbound) check_filter_rules(iface_name, outbound) if test_qos_remove: # Remove the bandwidth settings in network xml logging.debug("Removing network bandwidth settings...") netxml_backup.sync() vm.destroy(gracefully=False) # Should fail to start vm vm.start() if restart_error: test.fail("VM started unexpectedly") if test_ipv6_address: ipt6_rules = check_ipt_rules(check_ipv4=False, check_ipv6=True) if not ("mode" in forward and forward["mode"] == "open"): run_ip_test(session, "ipv6") if test_ipv4_address: ipt_rules = check_ipt_rules(check_ipv4=True) if not ("mode" in forward and forward["mode"] == "open"): run_ip_test(session, "ipv4") if test_guest_libvirt: run_guest_libvirt(session) session.close() except virt_vm.VMStartError as details: logging.info(str(details)) if not (start_error or restart_error): test.fail('VM failed to start:\n%s' % details) # Destroy created network and check iptable rules if net_name != "default": virsh.net_destroy(net_name) if ipt_rules: output_des = to_text(process.system_output('iptables-save')) for ipt in ipt_rules: if re.search(r"%s" % ipt, output_des, re.M): test.fail("Find iptable rule %s after net destroyed" % ipt) if ipt6_rules: output_des = to_text(process.system_output('ip6tables-save')) for ipt in ipt6_rules: if re.search(r"%s" % ipt, output_des, re.M): test.fail("Find ip6table rule %s after net destroyed" % ipt) if remove_bandwidth: iface_name = libvirt.get_ifname_host(vm_name, iface_mac) cur_xml = virsh.dumpxml(vm_name).stdout_text logging.debug(cur_xml) if 'bandwidth' in cur_xml: test.fail('bandwidth still in xml') if not check_filter_rules(iface_name, 0, expect_none=True): test.fail('There should be nothing in output') if update_device and loop: loop -= 1 if loop: # Rerun this procedure again with updated params # Reset params of the corresponding loop loop_prefix = 'loop' + str(loop) + '_' for k in {k: v for k, v in params.items() if k.startswith(loop_prefix)}: params[k.lstrip(loop_prefix)] = params[k] params['loop'] = str(loop) run(test, params, env) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) for vms in vms_list: virsh.remove_domain(vms.name, "--remove-all-storage") logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) vmxml_backup.sync() if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + "=%s" % original_accept_ra)
def run_cmd(self, cmd): if process.system(cmd, ignore_status=True, sudo=True, shell=True): self.is_fail += 1 self.fail_cmd.append(cmd)
def export_target(self): """ Export target in localhost for emulated iscsi """ selinux_mode = None if not os.path.isfile(self.emulated_image): process.system(self.create_cmd) else: emulated_image_size = os.path.getsize(self.emulated_image) / 1024 if emulated_image_size != self.emulated_expect_size: # No need to remvoe, rebuild is fine process.system(self.create_cmd) cmd = "tgtadm --lld iscsi --mode target --op show" try: output = process.system_output(cmd) except process.CmdError: restart_tgtd() output = process.system_output(cmd) if not re.findall("%s$" % self.target, output, re.M): logging.debug("Need to export target in host") # Set selinux to permissive mode to make sure iscsi target # export successfully if utils_selinux.is_enforcing(): selinux_mode = utils_selinux.get_status() utils_selinux.set_status("permissive") output = process.system_output(cmd) used_id = re.findall("Target\s+(\d+)", output) emulated_id = 1 while str(emulated_id) in used_id: emulated_id += 1 self.emulated_id = str(emulated_id) cmd = "tgtadm --mode target --op new --tid %s" % self.emulated_id cmd += " --lld iscsi --targetname %s" % self.target process.system(cmd) cmd = "tgtadm --lld iscsi --op bind --mode target " cmd += "--tid %s -I ALL" % self.emulated_id process.system(cmd) else: target_strs = re.findall("Target\s+(\d+):\s+%s$" % self.target, output, re.M) self.emulated_id = target_strs[0].split(':')[0].split()[-1] cmd = "tgtadm --lld iscsi --mode target --op show" try: output = process.system_output(cmd) except process.CmdError: # In case service stopped restart_tgtd() output = process.system_output(cmd) # Create a LUN with emulated image if re.findall(self.emulated_image, output, re.M): # Exist already logging.debug("Exported image already exists.") self.export_flag = True else: tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target, output, re.DOTALL) if tgt_str: luns = len(re.findall("\s+LUN:\s(\d+)", tgt_str.group(1), re.M)) else: luns = len(re.findall("\s+LUN:\s(\d+)", output, re.M)) cmd = "tgtadm --mode logicalunit --op new " cmd += "--tid %s --lld iscsi " % self.emulated_id cmd += "--lun %s " % luns cmd += "--backing-store %s" % self.emulated_image process.system(cmd) self.export_flag = True self.luns = luns # Restore selinux if selinux_mode is not None: utils_selinux.set_status(selinux_mode) if self.chap_flag: # Set CHAP authentication on the exported target self.set_chap_auth_target() # Set CHAP authentication for initiator to login target if self.portal_visible(): self.set_chap_auth_initiator()
def run(test, params, env): """ Test different hmi injections with guest :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def set_condn(action, recover=False): """ Set/reset guest state/action :param action: Guest state change/action :param recover: whether to recover given state default: False """ if not recover: if action == "pin_vcpu": for i in range(cur_vcpu): virsh.vcpupin(vm_name, i, hmi_cpu, "--live", ignore_status=False, debug=True) virsh.emulatorpin(vm_name, hmi_cpu, "live", ignore_status=False, debug=True) elif action == "filetrans": utils_test.run_file_transfer(test, params, env) elif action == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) time.sleep(10) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) os.remove(save_file) elif action == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) time.sleep(10) result = virsh.resume(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) return host_version = params.get("host_version") guest_version = params.get("guest_version", "") max_vcpu = int(params.get("ppchmi_vcpu_max", '1')) cur_vcpu = int(params.get("ppchmi_vcpu_cur", "1")) cores = int(params.get("ppchmi_cores", '1')) sockets = int(params.get("ppchmi_sockets", '1')) threads = int(params.get("ppchmi_threads", '1')) status_error = "yes" == params.get("status_error", "no") condition = params.get("condn", "") inject_code = params.get("inject_code", "") scom_base = params.get("scom_base", "") hmi_name = params.get("hmi_name", "") hmi_iterations = int(params.get("hmi_iterations", 1)) if host_version not in cpu.get_cpu_arch(): test.cancel("Unsupported Host cpu version") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sm = SoftwareManager() if not sm.check_installed("opal-utils") and not sm.install("opal-utils"): test.cancel("opal-utils package install failed") cpus_list = cpu.cpu_online_list() cpu_idle_state = cpu.get_cpuidle_state() cpu.set_cpuidle_state() # Lets use second available host cpu hmi_cpu = cpus_list[1] pir = int( open('/sys/devices/system/cpu/cpu%s/pir' % hmi_cpu).read().strip(), 16) if host_version == 'power9': coreid = (((pir) >> 2) & 0x3f) nodeid = (((pir) >> 8) & 0x7f) & 0xf hmi_scom_addr = hex(((coreid & 0x1f + 0x20) << 24) | int(scom_base, 16)) if host_version == 'power8': coreid = (((pir) >> 3) & 0xf) nodeid = (((pir) >> 7) & 0x3f) hmi_scom_addr = hex(((coreid & 0xf) << 24) | int(scom_base, 16)) hmi_cmd = "putscom -c %s %s %s" % (nodeid, hmi_scom_addr, inject_code) vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) org_xml = vmxml.copy() # Destroy the vm vm.destroy() try: session = None bgt = None libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, cur_vcpu, sockets=sockets, cores=cores, threads=threads, add_topology=True) if guest_version: libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version) vm.start() # Lets clear host and guest dmesg process.system("dmesg -C", verbose=False) session = vm.wait_for_login() session.cmd("dmesg -C") # Set condn if "vcpupin" in condition: set_condn("pin_vcpu") if "stress" in condition: utils_test.load_stress("stress_in_vms", params=params, vms=[vm]) if "save" in condition: set_condn("save") if "suspend" in condition: set_condn("suspend") # hmi inject logging.debug("Injecting %s HMI on cpu %s", hmi_name, hmi_cpu) logging.debug("HMI Command: %s", hmi_cmd) process.run(hmi_cmd) # Check host and guest dmesg host_dmesg = process.system_output("dmesg -c", verbose=False) guest_dmesg = session.cmd_output("dmesg") if "Unrecovered" in host_dmesg: test.fail("Unrecovered host hmi\n%s", host_dmesg) else: logging.debug("Host dmesg: %s", host_dmesg) logging.debug("Guest dmesg: %s", guest_dmesg) if "save" in condition: set_condn("save") if "suspend" in condition: set_condn("suspend") finally: if "stress" in condition: utils_test.unload_stress("stress_in_vms", params=params, vms=[vm]) if session: session.close() org_xml.sync() cpu.set_cpuidle_state(setstate=cpu_idle_state)
def _remove_temp_user(self): # This function removes the temporary user added for testing process.system('userdel -r test_pmu', sudo=True, ignore_status=True)
def export_target(self): """ Export target in localhost for emulated iscsi """ selinux_mode = None # create image disk if not os.path.isfile(self.emulated_image): process.system(self.create_cmd) else: emulated_image_size = os.path.getsize(self.emulated_image) / 1024 if emulated_image_size != self.emulated_expect_size: # No need to remvoe, rebuild is fine process.system(self.create_cmd) # confirm if the target exists and create iSCSI target cmd = "targetcli ls /iscsi 1" output = process.system_output(cmd) if not re.findall("%s$" % self.target, output, re.M): logging.debug("Need to export target in host") # Set selinux to permissive mode to make sure # iscsi target export successfully if utils_selinux.is_enforcing(): selinux_mode = utils_selinux.get_status() utils_selinux.set_status("permissive") # In fact, We've got two options here # # 1) Create a block backstore that usually provides the best # performance. We can use a block device like /dev/sdb or # a logical volume previously created, # (lvcreate -name lv_iscsi -size 1G vg) # 2) Create a fileio backstore, # which enables the local file system cache. # # This class Only works for emulated iscsi device, # So fileio backstore is enough and safe. # Create a fileio backstore device_cmd = ("targetcli /backstores/fileio/ create %s %s" % (self.device, self.emulated_image)) output = process.system_output(device_cmd) if "Created fileio" not in output: raise exceptions.TestFail("Failed to create fileio %s. (%s)" % (self.device, output)) # Create an IQN with a target named target_name target_cmd = "targetcli /iscsi/ create %s" % self.target output = process.system_output(target_cmd) if "Created target" not in output: raise exceptions.TestFail("Failed to create target %s. (%s)" % (self.target, output)) check_portal = "targetcli /iscsi/%s/tpg1/portals ls" % self.target portal_info = process.system_output(check_portal) if "0.0.0.0:3260" not in portal_info: # Create portal # 0.0.0.0 means binding to INADDR_ANY # and using default IP port 3260 portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s" % (self.target, "0.0.0.0")) output = process.system_output(portal_cmd) if "Created network portal" not in output: raise exceptions.TestFail("Failed to create portal. (%s)" % output) if ("ipv6" == utils_net.IPAddress(self.portal_ip).version and self.portal_ip not in portal_info): # Ipv6 portal address can't be created by default, # create ipv6 portal if needed. portal_cmd = ("targetcli /iscsi/%s/tpg1/portals/ create %s" % (self.target, self.portal_ip)) output = process.system_output(portal_cmd) if "Created network portal" not in output: raise exceptions.TestFail("Failed to create portal. (%s)" % output) # Create lun lun_cmd = "targetcli /iscsi/%s/tpg1/luns/ " % self.target dev_cmd = "create /backstores/fileio/%s" % self.device output = process.system_output(lun_cmd + dev_cmd) luns = re.findall(r"Created LUN (\d+).", output) if not luns: raise exceptions.TestFail("Failed to create lun. (%s)" % output) self.luns = luns[0] # Set firewall if it's enabled output = process.system_output("firewall-cmd --state", ignore_status=True) if re.findall("^running", output, re.M): # firewall is running process.system("firewall-cmd --permanent --add-port=3260/tcp") process.system("firewall-cmd --reload") # Restore selinux if selinux_mode is not None: utils_selinux.set_status(selinux_mode) self.export_flag = True else: logging.info("Target %s has already existed!" % self.target) if self.chap_flag: # Set CHAP authentication on the exported target self.set_chap_auth_target() # Set CHAP authentication for initiator to login target if self.portal_visible(): self.set_chap_auth_initiator() else: # To enable that so-called "demo mode" TPG operation, # disable all authentication for the corresponding Endpoint. # which means grant access to all initiators, # so that they can access all LUNs in the TPG # without further authentication. auth_cmd = "targetcli /iscsi/%s/tpg1/ " % self.target attr_cmd = ("set attribute %s %s %s %s" % ("authentication=0", "demo_mode_write_protect=0", "generate_node_acls=1", "cache_dynamic_acls=1")) output = process.system_output(auth_cmd + attr_cmd) logging.info("Define access rights: %s" % output) # Discovery the target self.portal_visible() # Save configuration process.system("targetcli / saveconfig") # Restart iSCSI service restart_iscsid()