def check_installed(self, name, version=None, arch=None): """ Check if package [name] is installed. @param name: Package name. @param version: Package version. @param arch: Package architecture. """ if arch: cmd = self.lowlevel_base_cmd + " -q --qf %{ARCH} " + name + " 2> /dev/null" inst_archs = utils.system_output(cmd, ignore_status=True) inst_archs = inst_archs.split("\n") for inst_arch in inst_archs: if inst_arch == arch: return self._check_installed_version(name, version) return False elif version: return self._check_installed_version(name, version) else: cmd = "rpm -q " + name + " 2> /dev/null" try: utils.system(cmd) return True except error.CmdError: return False
def patch(self): ''' Patches the source dir with all patch files ''' os.chdir(self.source_dir) for patch in self.patches: utils.system('patch -p1 < %s' % os.path.basename(patch))
def plot_2d_graphs(self): """ For each one of the throughput parameters, generate a set of gnuplot commands that will create a parametric surface with file size vs. record size vs. throughput. """ datasource_2d = os.path.join(self.output_dir, '2d-datasource-file') for index, label in zip(range(2, 15), _LABELS[2:]): commands_path = os.path.join(self.output_dir, '2d-%s.do' % label) commands = "" commands += "set title 'Iozone performance: %s'\n" % label commands += "set logscale x\n" commands += "set xlabel 'File size (KB)'\n" commands += "set ylabel 'Througput (MB/s)'\n" commands += "set terminal png small size 450 350\n" commands += "set output '%s'\n" % os.path.join(self.output_dir, '2d-%s.png' % label) commands += ("plot '%s' using 1:%s title '%s' with lines \n" % (datasource_2d, index, label)) commands_file = open(commands_path, 'w') commands_file.write(commands) commands_file.close() try: utils.system("%s %s" % (self.gnuplot, commands_path)) except error.CmdError: logging.error("Problem plotting from commands file %s", commands_path)
def unmount_force(self): """ Kill all other jobs accessing this partition. Use fuser and ps to find all mounts on this mountpoint and unmount them. :return: true for success or false for any errors """ logging.debug("Standard umount failed, will try forcing. Users:") try: cmd = 'fuser ' + self.get_mountpoint() logging.debug(cmd) fuser = utils.system_output(cmd) logging.debug(fuser) users = re.sub('.*:', '', fuser).split() for user in users: m = re.match('(\d+)(.*)', user) (pid, usage) = (m.group(1), m.group(2)) try: ps = utils.system_output('ps -p %s | sed 1d' % pid) logging.debug('%s %s %s' % (usage, pid, ps)) except Exception: pass utils.system('ls -l ' + self.device) umount_cmd = "umount -f " + self.device utils.system(umount_cmd) return True except error.CmdError: logging.debug('Umount_force failed for %s' % self.device) return False
def setup(self, tarball='oprofile-0.9.4.tar.bz2', local=None, *args, **dargs): if local is True: return try: self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(self.tarball, self.srcdir) os.chdir(self.srcdir) patch = os.path.join(self.bindir, "oprofile-69455.patch") utils.system('patch -p1 < %s' % patch) utils.configure('--with-kernel-support --prefix=' + self.srcdir) utils.make('-j %d' % utils.count_cpus()) utils.make('install') except Exception: # Build from source failed. # But maybe can still use the local copy local_opcontrol = os.path.exists('/usr/bin/opcontrol') local_opreport = os.path.exists('/usr/bin/opreport') if local is False or not local_opcontrol or not local_opreport: raise error.AutotestError('No oprofile available') else: # if we managed to build, try again to pick binaries self._pick_binaries(True)
def _boot_kernel(self, args, ident_check, expected_ident, subdir, notes): """ Boot a kernel, with post-boot kernel id check :param args: kernel cmdline arguments :param ident_check: check kernel id after boot :param expected_ident: :param subdir: job-step qualifier in status log :param notes: additional comment in status log """ # If we can check the kernel identity do so. if ident_check: when = int(time.time()) args += " IDENT=%d" % when self.job.next_step_prepend(["job.end_reboot_and_verify", when, expected_ident, subdir, notes]) else: self.job.next_step_prepend(["job.end_reboot", subdir, expected_ident, notes]) self.add_to_bootloader(args) # defer fsck for next reboot, to avoid reboots back to default kernel utils.system('touch /fastboot') # this file is removed automatically # Boot it. self.job.start_reboot() self.job.reboot(tag=self.installed_as)
def run_once(self, testdir=None, extra_args='', nproc='1000', nops='1000'): if not testdir: testdir = self.tmpdir args = '-d %s -p %s -n %s %s' % (testdir, nproc, nops, extra_args) cmd = self.srcdir + '/fsstress ' + args utils.system(cmd)
def setup(self, tarball='ext3-tools.tar.gz'): self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(self.tarball, self.srcdir) os.chdir(self.srcdir) utils.system('patch -p1 < %s/makefile.patch' % self.bindir) utils.make('bash-shared-mapping usemem')
def execute(self, eth='eth0', count=50000, clone_skb=1, dst_ip='192.168.210.210', dst_mac='01:02:03:04:05:07'): if not os.path.exists('/proc/net/pktgen'): utils.system('/sbin/modprobe pktgen') if not os.path.exists('/proc/net/pktgen'): raise error.TestError('pktgen not loaded') logging.info('Adding devices to run') self.pgdev = '/proc/net/pktgen/kpktgend_0' self.pgset('rem_device_all') self.pgset('add_device ' + eth) self.pgset('max_before_softirq 10000') # Configure the individual devices logging.info('Configuring devices') self.ethdev = '/proc/net/pktgen/' + eth self.pgdev = self.ethdev if clone_skb: self.pgset('clone_skb %d' % (count)) self.pgset('min_pkt_size 60') self.pgset('max_pkt_size 60') self.pgset('dst ' + dst_ip) self.pgset('dst_mac ' + dst_mac) self.pgset('count %d' % (count)) # Time to run self.pgdev = '/proc/net/pktgen/pgctrl' self.pgset('start') output = os.path.join(self.resultsdir, eth) utils.system('cp %s %s' % (self.ethdev, output))
def initialize(self, dir = None, pages_requested = 20): self.dir = None self.job.require_gcc() utils.check_kernel_ver("2.6.16") # Check huge page number pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): utils.write_one_line('/proc/sys/vm/nr_hugepages', str(pages_requested)) nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages') pages_available = int(nr_hugepages) else: raise error.TestNAError('Kernel does not support hugepages') if pages_available < pages_requested: raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested)) # Check if hugetlbfs has been mounted if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'): if not dir: dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(dir) utils.system('mount -t hugetlbfs none %s' % dir) self.dir = dir
def run_once(self, item=''): if not item: raise error.TestError('No test item provided') logging.info('Testing item %s', item) cfg_files = glob.glob(os.path.join(self.bindir, '*.cfg')) for src in cfg_files: basename = os.path.basename(src) dst = os.path.join(self.srcdir, basename) shutil.copyfile(src, dst) config_files_cfg = os.path.join(self.bindir, 'config_files.cfg') test_items = self.get_tests_from_cfg(config_files_cfg, item) if not test_items: raise error.TestError('No test available for item %s in ' 'config_files.cfg' % item) os.chdir(self.srcdir) failed_tests = [] for test_item in test_items: try: cfg_test = os.path.join('cases', test_item) utils.system('python libvirt-test-api -c %s' % cfg_test) except error.CmdError: logs = glob.glob(os.path.join('log', '*')) for log in logs: shutil.rmtree(log) failed_tests.append(os.path.basename(test_item).split('.')[0]) if failed_tests: raise error.TestFail('Tests failed for item %s: %s' % (item, failed_tests))
def package_jeos(img): """ Package JeOS and make it ready for upload. Steps: 1) Move /path/to/jeos.qcow2 to /path/to/jeos.qcow2.backup 2) Sparsify the image, creating a new, trimmed down /path/to/jeos.qcow2 3) Compress the sparsified image with 7za :param img: Path to a qcow2 image """ basedir = os.path.dirname(img) backup = img + '.backup' qemu_img = utils_misc.find_command('qemu-img') shutil.move(img, backup) logging.info("Backup %s saved", backup) utils.system("%s convert -f qcow2 -O qcow2 %s %s" % (qemu_img, backup, img)) logging.info("Sparse file %s created successfully", img) archiver = utils_misc.find_command('7za') compressed_img = img + ".7z" utils.system("%s a %s %s" % (archiver, compressed_img, img)) logging.info("JeOS compressed file %s created successfuly", compressed_img)
def mount(src, mount_point, fstype, perm="rw"): """ Mount the src into mount_point of the host. @src: mount source @mount_point: mount point @fstype: file system type @perm: mount premission """ umount(src, mount_point, fstype) mount_string = "%s %s %s %s" % (src, mount_point, fstype, perm) if mount_string in file("/etc/mtab").read(): logging.debug("%s is already mounted in %s with %s", src, mount_point, perm) return True mount_cmd = "mount -t %s %s %s -o %s" % (fstype, src, mount_point, perm) try: utils.system(mount_cmd) except error.CmdError: return False logging.debug("Verify the mount through /etc/mtab") if mount_string in file("/etc/mtab").read(): logging.debug("%s is successfully mounted", src) return True else: logging.error("Can't find mounted NFS share - /etc/mtab contents \n%s", file("/etc/mtab").read()) return False
def setup(tarball, topdir): srcdir = os.path.join(topdir, 'src') if not os.path.exists(tarball): utils.get_file('http://downloads.mysql.com/archives/mysql-5.0/mysql-5.0.45.tar.gz', tarball) utils.extract_tarball_to_dir(tarball, 'src') os.chdir(srcdir) utils.configure('--prefix=%s/mysql --enable-thread-safe-client' % topdir) utils.make('-j %d' % utils.count_cpus()) utils.make('install') # # MySQL doesn't create this directory on it's own. # This is where database logs and files are created. # try: os.mkdir(topdir + '/mysql/var') except Exception: pass # # Initialize the database. # utils.system('%s/mysql/bin/mysql_install_db' % topdir) os.chdir(topdir)
def initialize(self, test, log): '''Does the init part of the test 1.Finds initial count of entry in log 2.Creates a file 'cron' under cron.d 3.Backs up /etc/crontab 4.Modifies /etc/crontab ''' self.log = log self.initial_count = self.count_log('Cron automation') f = open('/etc/cron.d/cron', 'w') f.write('''#!/bin/bash touch %s echo 'Cron automation' >> %s ''' % (self.log, self.log)) f.close() utils.system('chmod +x /etc/cron.d/cron') shutil.copyfile('/etc/crontab', '/tmp/backup') f = open('/etc/crontab', 'w') f.write('* * * * * root run-parts /etc/cron.d/\n') f.close() if test == 'deny_cron': if os.path.exists('/etc/cron.d/jobs.deny'): shutil.move('/etc/cron.d/jobs.deny', '/tmp/jobs.deny') f = open('/etc/cron.d/jobs.deny', 'w') f.write('cron') f.close() elif test == 'allow_cron' : os.remove('/etc/cron.d/jobs.deny') if os.path.exists('/etc/cron.d/jobs.allow'): shutil.move('/etc/cron.d/jobs.allow', '/tmp/jobs.allow') f = open('/etc/cron.d/jobs.allow', 'w') f.write('cron') f.close()
def server_start(self, cpu_affinity): utils.system('killall netserver', ignore_status=True) cmd = self.server_prog if cpu_affinity: cmd = 'taskset %s %s' % (cpu_affinity, cmd) self.results.append(utils.system_output(cmd, retain_output=True))
def cleanup_cdrom(path): """ Removes created iso image """ if path: error.context("Cleaning up temp iso image '%s'" % path, logging.info) if "gluster" in path: g_mount_point = tempfile.mkdtemp("gluster") g_server, v_name, f_name = path.split("/")[-3:] if ":" in g_server: g_server = g_server.split(":")[0] g_mount_link = "%s:/%s" % (g_server, v_name) mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link, g_mount_point) utils.system(mount_cmd, timeout=60) path = os.path.join(g_mount_point, f_name) try: logging.debug("Remove the file with os.remove().") os.remove("%s" % path) except OSError, err: logging.warn("Fail to delete %s" % path) if "gluster" in path: try: umount_cmd = "umount %s" % g_mount_point utils.system(umount_cmd, timeout=60) os.rmdir(g_mount_point) except Exception, err: msg = "Fail to clean up %s" % g_mount_point msg += "Error message %s" % err logging.warn(msg)
def start(self, test): result = utils.system("mount | grep '%s'" % self.mountpoint, ignore_status=True) if result: utils.system('mount -t debugfs debugfs /sys/kernel/debug') device = self.get_device(test) self.blktrace_job = utils.BgJob('%s /dev/%s' % (self.blktrace, device))
def upgrade(self, name=None): """ Upgrade all packages of the system with eventual new versions. Optionally, upgrade individual packages. :param name: optional parameter wildcard spec to upgrade :type name: str """ ud_command = 'update' ud_cmd = self.base_command + ' ' + ud_command try: utils.system(ud_cmd) except error.CmdError: logging.error("Apt package update failed") if not name: up_command = 'install --only-upgrade' up_cmd = self.base_command + ' ' + up_command + ' ' + name else: up_command = 'upgrade' up_cmd = self.base_command + ' ' + up_command try: utils.system(up_cmd) return True except error.CmdError: return False
def setup(self): os.chdir(self.srcdir) if 'CC' in os.environ: cc = '$CC' else: cc = 'cc' utils.system('%s -lpthread hackbench.c -o hackbench' % cc)
def start(self, test): """ Start ftrace profiler :param test: Autotest test in which the profiler will operate on. """ # Make sure debugfs is mounted and tracing disabled. utils.system('%s reset' % self.trace_cmd) output_dir = os.path.join(test.profdir, 'ftrace') if not os.path.isdir(output_dir): os.makedirs(output_dir) self.output = os.path.join(output_dir, 'trace.dat') cmd = [self.trace_cmd, 'record', '-o', self.output] cmd += self.trace_cmd_args self.record_job = utils.BgJob(self.join_command(cmd), stderr_tee=utils.TEE_TO_LOGS) # Wait for tracing to be enabled. If trace-cmd dies before enabling # tracing, then there was a problem. tracing_on = os.path.join(self.tracing_dir, 'tracing_on') while (self.record_job.sp.poll() is None and utils.read_file(tracing_on).strip() != '1'): time.sleep(0.1) if self.record_job.sp.poll() is not None: utils.join_bg_jobs([self.record_job]) raise error.CmdError(self.record_job.command, self.record_job.sp.returncode, 'trace-cmd exited early.')
def download_test_provider(provider, update=False): """ Download a test provider defined on a .ini file inside test-providers.d. This function will only download test providers that are in git repos. Local filesystems don't need this functionality. :param provider: Test provider name, such as 'io-github-autotest-qemu'. """ provider_info = get_test_provider_info(provider) uri = provider_info.get('uri') if not uri.startswith('file://'): uri = provider_info.get('uri') branch = provider_info.get('branch') ref = provider_info.get('ref') pubkey = provider_info.get('pubkey') download_dst = data_dir.get_test_provider_dir(provider) repo_downloaded = os.path.isdir(os.path.join(download_dst, '.git')) if not repo_downloaded or update: download_dst = git.get_repo(uri=uri, branch=branch, commit=ref, destination_dir=download_dst) os.chdir(download_dst) try: utils.run('git remote add origin %s' % uri) except error.CmdError: pass utils.run('git pull origin %s' % branch) os.chdir(download_dst) utils.system('git log -1')
def copy(self): os.chdir(self.archive) utils.system('pax -rw . %s-test' % (self.archive)) expected_output = '''test test/d_one test/d_one/f_one test/d_one/f_four test/d_one/f_two test/d_one/f_three test/d_three test/d_two test/d_two/d_four''' expected_output = utils.re.sub('test', self.archive, expected_output).split('\n') expected_output.sort() os.chdir(self.archive + '-test') ls = [] ls.append(self.archive) for subdir, subdirs, filename in os.walk(self.archive): for i in subdirs: ls.append(utils.re.sub('-test', '', subdir + '/' + i + '\n')) for i in filename: ls.append(utils.re.sub('-test', '', subdir + '/' + i + '\n')) ls.sort() z = zip(ls, expected_output) for i, j in z: if i.strip() != j.strip(): raise error.TestError('Directory listing doesnt match') logging.info('Directory copy success.')
def setup(self, env): if self.ksmtuned_process != 0 and self.disable_ksmtuned: kill_cmd = "kill -1 %s" % self.ksmtuned_process utils.system(kill_cmd) env.data["KSM_default_config"] = self.default_status ksm_cmd = "" if self.interface == "sysfs": if self.run != self.default_status[0]: ksm_cmd += " echo %s > KSM_PATH/run;" % self.run if (self.pages_to_scan and self.pages_to_scan != self.default_status[1]): ksm_cmd += " echo %s > KSM_PATH" % self.pages_to_scan ksm_cmd += "/pages_to_scan;" if (self.sleep_ms and self.sleep_ms != self.default_status[2]): ksm_cmd += " echo %s > KSM_PATH" % self.sleep_ms ksm_cmd += "/sleep_millisecs" ksm_cmd = re.sub("KSM_PATH", self.ksm_path, ksm_cmd) elif self.interface == "ksmctl": if self.run == "1": ksm_cmd += "ksmctl start %s %s" % (self.pages_to_scan, self.sleep_ms) else: ksm_cmd += "ksmctl stop" utils.system(ksm_cmd)
def create_interface(self): """ Create virtual interface Create interface veth0, veth1, veth2, veth3 Verify if interface is created using ip link Create teamd conf file """ utils.system("ip link add veth0 type veth peer name veth1") utils.system("ip link add veth2 type veth peer name veth3") interface_out = utils.system_output("ip link") if not all(x in interface_out for x in ["veth0", "veth1", "veth2", "veth3"]): self.nfail += 1 raise error.TestError("\nSetup failed to create virtual interface") else: self.cleanup_iface = True f = open("%s/teamd_conf" % self.tmpdir, "w") f.writelines( """ { "device": "team2", "runner": {"name": "roundrobin"}, "ports": {"veth1": {}, "veth0": {}} } """ ) f.close()
def install(self, tag='', prefix='/', extraversion='autotest'): """make install in the kernel tree""" self.log('Installing ...') os.chdir(self.build_dir) if not os.path.isdir(prefix): os.mkdir(prefix) self.boot_dir = os.path.join(prefix, 'boot') if not os.path.isdir(self.boot_dir): os.mkdir(self.boot_dir) # remember what we are going to install xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion) self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz' self.xen_syms = self.boot_dir + '/xen-syms-' + xen_version self.log('Installing Xen ...') os.environ['XEN_EXTRAVERSION'] = '-unstable-%s' % extraversion # install xen utils.system('make DESTDIR=%s -C xen install' % prefix) # install tools utils.system('make DESTDIR=%s -C tools install' % prefix) # install kernel ktag = self.kjob.get_kernel_build_ver() kprefix = prefix self.kjob.install(tag=ktag, prefix=kprefix)
def sr_iov_cleanup(self): """ Clean up the sriov setup Check if the PCI hardware device drive is loaded with the appropriate, parameters (none of VFs), and if it's not, perform cleanup. @return: True, if the setup was completed successfuly, False otherwise. """ # Check if the host support interrupt remapping error.context("Clean up host env after PCI assign test", logging.info) kvm_re_probe = False if self.kvm_params is not None: if (self.auai_path and open(self.auai_path, "r").read().strip() == "Y"): if self.kvm_params and self.kvm_params[self.auai_path] == "N": kvm_re_probe = True else: kvm_re_probe = True # Try to re probe kvm module with interrupt remapping support if kvm_re_probe: kvm_arch = kvm_control.get_kvm_arch() utils.system("modprobe -r %s" % kvm_arch) utils.system("modprobe -r kvm") cmd = "modprobe kvm" if self.kvm_params: for i in self.kvm_params: if self.kvm_params[i] == "Y": params_name = os.path.split(i)[1] cmd += " %s=1" % params_name logging.info("Loading kvm with command: %s" % cmd) try: utils.system(cmd) except Exception: logging.debug("Failed to reload kvm") cmd = "modprobe %s" % kvm_arch logging.info("Loading %s with command: %s" % (kvm_arch, cmd)) utils.system(cmd) re_probe = False s = commands.getstatusoutput('lsmod | grep %s' % self.driver)[0] if s: cmd = "modprobe -r %s" % self.driver logging.info("Running host command: %s" % cmd) os.system(cmd) re_probe = True else: return True # Re-probe driver with proper number of VFs if re_probe: cmd = "modprobe %s" % self.driver msg = "Loading the driver '%s' without option" % self.driver error.context(msg, logging.info) s = commands.getstatusoutput(cmd)[0] utils.system("/etc/init.d/network restart", ignore_status=True) if s: return False return True
def setup(self, tarball='ebizzy-0.3.tar.gz'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) utils.system('[ -x configure ] && ./configure') utils.make()
def run_usb_host(test, params, env): """ Test usb host device passthrough @param test: kvm test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ device = params.get("usb_host_device") if device == None: raise error.TestNAError("not configured (use 'usb_host_device = " "<vendor>:<product>')") (vendorid,productid) = device.split(":") # compose strings lsusb_cmd = "lsusb -v -d %s" % device; monitor_add = "device_add usb-host,bus=usbtest.0,id=usbhostdev" monitor_add += ",vendorid=%s" % vendorid monitor_add += ",productid=%s" % productid monitor_del = "device_del usbhostdev" match_add = "New USB device found, " match_add += "idVendor=%s, idProduct=%s" % (vendorid,productid) match_del = "USB disconnect" error.context("Check usb device %s on host" % device, logging.info) try: utils.system(lsusb_cmd) except: raise error.TestNAError("Device %s not present on host" % device) error.context("Log into guest", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() session.cmd_status("dmesg -c") error.context("Plugin usb device", logging.info) vm.monitor.cmd(monitor_add) session.cmd_status("sleep 1") session.cmd_status("udevadm settle") messages_add = session.cmd("dmesg -c") for line in messages_add.splitlines(): logging.debug("[dmesg add] %s" % line) if messages_add.find(match_add) == -1: raise error.TestFail("kernel didn't detect plugin") error.context("Check usb device %s in guest" % device, logging.info) session.cmd(lsusb_cmd) error.context("Unplug usb device", logging.info) vm.monitor.cmd(monitor_del) session.cmd_status("sleep 1") messages_del = session.cmd("dmesg -c") for line in messages_del.splitlines(): logging.debug("[dmesg del] %s" % line) if messages_del.find(match_del) == -1: raise error.TestFail("kernel didn't detect unplug") session.close()
def setup(self, tarball='NPB3.3.tar.gz'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) # Prepare the makefile and benchmarks to generate. utils.system('patch -p1 < %s/enable-all-tests.patch' % self.bindir) utils.system('cd NPB3.3-OMP && make suite')
def plot_3d_graphs(self): """ For each one of the throughput parameters, generate a set of gnuplot commands that will create a parametric surface with file size vs. record size vs. throughput. """ for index, label in zip(range(1, 14), _LABELS[2:]): commands_path = os.path.join(self.output_dir, '%s.do' % label) commands = "" commands += "set title 'Iozone performance: %s'\n" % label commands += "set grid lt 2 lw 1\n" commands += "set surface\n" commands += "set parametric\n" commands += "set xtics\n" commands += "set ytics\n" commands += "set logscale x 2\n" commands += "set logscale y 2\n" commands += "set logscale z\n" commands += "set xrange [2.**5:2.**24]\n" commands += "set xlabel 'File size (KB)'\n" commands += "set ylabel 'Record size (KB)'\n" commands += "set zlabel 'Througput (KB/s)'\n" commands += "set style data lines\n" commands += "set dgrid3d 80,80, 3\n" commands += "set terminal png small size 900 700\n" commands += "set output '%s'\n" % os.path.join( self.output_dir, '%s.png' % label) commands += ("splot '%s' using 1:2:%s title '%s'\n" % (self.datasource, index, label)) commands_file = open(commands_path, 'w') commands_file.write(commands) commands_file.close() try: utils.system("%s %s" % (self.gnuplot, commands_path)) except error.CmdError: logging.error("Problem plotting from commands file %s", commands_path)
def setup(self, tarball='isic-0.06.tar.bz2'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) utils.system('patch -p1 < %s/build-fixes.patch' % self.bindir) utils.system('PREFIX=%s/deps/libnet/libnet/ ./configure' % self.autodir) utils.system('make')
def rebase(self, params): """ Rebase image @param params: dictionary containing the test parameters @note: params should contain: cmd -- qemu-img cmd snapshot_img -- the snapshot name base_img -- base image name base_fmt -- base image format snapshot_fmt -- the snapshot format mode -- there are two value, "safe" and "unsafe", default is "safe" """ self.check_option("base_image_filename") self.check_option("base_format") rebase_mode = params.get("rebase_mode") cmd = self.image_cmd cmd += " rebase" if self.image_format: cmd += " -f %s" % self.image_format if rebase_mode == "unsafe": cmd += " -u" if self.base_tag: cmd += " -b %s -F %s %s" % (self.base_image_filename, self.base_format, self.image_filename) else: raise error.TestError("Can not find the image parameters need" " for rebase.") logging.info("Rebase snapshot %s to %s..." % (self.image_filename, self.base_image_filename)) utils.system(cmd) return self.base_tag
def setup(self, tarball='unixbench-4.1.0.tar.bz2'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) utils.system('patch -p1 < ../unixbench.patch') utils.system('patch -p1 < ../Makefile.patch') utils.make() utils.system('rm pgms/select')
def provides(self, file): """ Return a list of packages that provide [file]. @param file: File path. """ if not self.check_installed('apt-file'): self.install('apt-file') command = os_dep.command('apt-file') cache_update_cmd = command + ' update' try: utils.system(cache_update_cmd, ignore_status=True) except Exception: logging.error("Apt file cache update failed") fu_cmd = command + ' search ' + file try: provides = utils.system_output(fu_cmd).split('\n') list_provides = [] for line in provides: if line: try: line = line.split(':') package = line[0].strip() path = line[1].strip() if path == file and package not in list_provides: list_provides.append(package) except IndexError: pass if len(list_provides) > 1: logging.warning('More than one package found, ' 'opting by the first queue result') if list_provides: logging.info("Package %s provides %s", list_provides[0], file) return list_provides[0] return None except Exception: return None
def run_once(self, opts=None, job=None, user='******'): log = os.path.join(self.resultsdir, 'fio-mixed.log') _opts = '--output %s ' % (log) if opts: _opts += opts arch = platform.machine() if "ppc" in arch: job_cfg_file = 'fio-mixed-ppc.job' else: job_cfg_file = 'fio-mixed.job' if job is None: job = os.path.join(self.bindir, job_cfg_file) else: if not os.path.isabs(job): job = os.path.join(self.bindir, job) _opts += ' %s' % (job) os.chdir(self.srcdir) ##vars = 'TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir) env_vars = 'LD_LIBRARY_PATH="' + self.autodir + '/deps/libaio/lib"' ##opts = '-m -o ' + self.resultsdir + '/fio-tio.log ' + self.srcdir + '/examples/tiobench-example' utils.system(env_vars + ' ./fio ' + _opts)
def __init__(self, job, device, loop_size=0, mountpoint=None): """ @param job: A L{client.job} instance. @param device: The device in question (e.g."/dev/hda2"). If device is a file it will be mounted as loopback. If you have job config 'partition.partitions', e.g., job.config_set('partition.partitions', ["/dev/sda2", "/dev/sda3"]) you may specify a partition in the form of "partN" e.g. "part0", "part1" to refer to elements of the partition list. This is specially useful if you run a test in various machines and you don't want to hardcode device names as those may vary. @param loop_size: Size of loopback device (in MB). Defaults to 0. """ # NOTE: This code is used by IBM / ABAT. Do not remove. part = re.compile(r'^part(\d+)$') m = part.match(device) if m: number = int(m.groups()[0]) partitions = job.config_get('partition.partitions') try: device = partitions[number] except Exception: raise NameError("Partition '" + device + "' not available") self.device = device self.name = os.path.basename(device) self.job = job self.loop = loop_size self.fstype = None self.mountpoint = mountpoint self.mkfs_flags = None self.mount_options = None self.fs_tag = None if self.loop: cmd = 'dd if=/dev/zero of=%s bs=1M count=%d' % (device, loop_size) utils.system(cmd)
def run_perf_kvm(test, params, env): """ run perf tool to get kvm events info :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) transfer_timeout = int(params.get("transfer_timeout", 240)) perf_record_timeout = int(params.get("perf_record_timeout", 240)) vm_kallsyms_path = "/tmp/guest_kallsyms" vm_modules_path = "/tmp/guest_modules" # Prepare test environment in guest session = vm.wait_for_login(timeout=login_timeout) session.cmd("cat /proc/kallsyms > %s" % vm_kallsyms_path) session.cmd("cat /proc/modules > %s" % vm_modules_path) vm.copy_files_from("/tmp/guest_kallsyms", "/tmp", timeout=transfer_timeout) vm.copy_files_from("/tmp/guest_modules", "/tmp", timeout=transfer_timeout) perf_record_cmd = "perf kvm --host --guest --guestkallsyms=%s" % vm_kallsyms_path perf_record_cmd += " --guestmodules=%s record -a -o /tmp/perf.data sleep %s " % ( vm_modules_path, perf_record_timeout) perf_report_cmd = "perf kvm --host --guest --guestkallsyms=%s" % vm_kallsyms_path perf_report_cmd += " --guestmodules=%s report -i /tmp/perf.data --force " % vm_modules_path utils.system(perf_record_cmd) utils.system(perf_report_cmd) session.close()
def avahi_syslog(self): """ clear /var/log/messages Start avahi-autoipd using -s Verify /var/log/messages is populated with the data """ utils.system("echo > %s" % self.log_messages) utils.system("avahi-autoipd -s veth0 &") time.sleep(15) self.ip_addr = None result = open(self.log_messages, "r").readlines() for i in result: if "Successfully claimed IP address" in i: self.ip_addr = i.split()[-1] if self.ip_addr is None: self.nfail += 1 raise error.TestError('\n avahi-autopid -s failed') else: logging.info('\n ip address using avahi-autoipd -s is %s' % self.ip_addr) self.stop_avahi()
def run(test, params, env): """ live_snapshot_base test: 1). Boot up guest 2). Create a file on host and record md5 3). Copy the file to guest 3). Create live snapshot 4). Copy the file from guest,then check md5 :param test: Kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 3600)) session = vm.wait_for_login(timeout=timeout) dd_timeout = params.get("dd_timeoout", 600) copy_timeout = params.get("copy_timeoout", 600) base_file = storage.get_image_filename(params, data_dir.get_data_dir()) device = vm.get_block({"file": base_file}) snapshot_file = "images/%s" % params.get("snapshot_name") snapshot_file = utils_misc.get_path(data_dir.get_data_dir(), snapshot_file) snapshot_format = params.get("snapshot_format", "qcow2") tmp_name = utils_misc.generate_random_string(5) src = dst = "/tmp/%s" % tmp_name if params.get("os_type") != "linux": dst = "c:\\users\\public\\%s" % tmp_name try: error.context("create file on host, copy it to guest", logging.info) cmd = params.get("dd_cmd") % src utils.system(cmd, timeout=dd_timeout) md5 = utils.hash_file(src, method="md5") vm.copy_files_to(src, dst, timeout=copy_timeout) utils.system("rm -f %s" % src) error.context("create live snapshot", logging.info) if vm.live_snapshot(base_file, snapshot_file, snapshot_format) != device: raise error.TestFail("Fail to create snapshot") backing_file = vm.monitor.get_backingfile(device) if backing_file != base_file: logging.error("backing file: %s, base file: %s", backing_file, base_file) raise error.TestFail("Got incorrect backing file") error.context("copy file to host, check content not changed", logging.info) vm.copy_files_from(dst, src, timeout=copy_timeout) if md5 and (md5 != utils.hash_file(src, method="md5")): raise error.TestFail("diff md5 before/after create snapshot") session.cmd(params.get("alive_check_cmd", "dir")) finally: if session: session.close() utils.system("rm -f %s %s" % (snapshot_file, src))
def run_once(self, args='', stepsecs=0): vars = ('TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir)) if stepsecs: # change time per subtest from unixbench's defaults of # 10 secs for small tests, 30 secs for bigger tests vars += ' systime=%i looper=%i seconds=%i'\ ' dhrytime=%i arithtime=%i' \ % ((stepsecs,) * 5) os.chdir(self.srcdir) try: utils.system(vars + ' ./Run ' + args) finally: times_path = os.path.join(self.resultsdir, 'times') # The 'times' file can be needlessly huge as it contains warnings # and error messages printed out by small benchmarks that are # run in a loop. It can easily compress 100x in such cases. if os.path.exists(times_path): utils.system("gzip -9 '%s'" % (times_path, ), ignore_status=True) report_path = os.path.join(self.resultsdir, 'report') self.report_data = open(report_path).readlines()[9:]
def run_once(self): utils.system( "cp " + self.srcdir + "/../../site_tests/successfulupdate/reboottesting.service /media/state/units/" ) utils.system("sudo cgpt add -i 3 -P 5 /dev/sda") #TODO change to prod update service utils.system( "update_engine_client -update -omaha_url http://192.168.122.1:8080/update" )
def setup(self, tarball='ext3-tools.tar.gz'): self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(self.tarball, self.srcdir) self.job.setup_dep(['libaio']) ldflags = '-L' + self.autodir + '/deps/libaio/lib' cflags = '-I' + self.autodir + '/deps/libaio/include' var_ldflags = 'LDFLAGS="' + ldflags + '"' var_cflags = 'CFLAGS="' + cflags + '"' self.make_flags = var_ldflags + ' ' + var_cflags os.chdir(self.srcdir) p1 = '0001-Minor-fixes-to-PAGE_SIZE-handling.patch' p2 = '0002-Enable-cross-compiling-for-fsx.patch' p3 = '0003-ext3-tools.patch' utils.system('patch -p1 < %s/%s' % (self.bindir, p1)) utils.system('patch -p1 < %s/%s' % (self.bindir, p2)) utils.system('patch -p1 < %s/%s' % (self.bindir, p3)) utils.system(self.make_flags + ' make fsx-linux')
def setup(self): utils.system_output('rm /etc/*/S99autotest || true', retain_output=True) utils.system_output('useradd zfs-tests || true', retain_output=True) utils.system_output('echo \"zfs-tests ALL=(ALL)NOPASSWD: ALL\" >> /etc/sudoers', retain_output=True) print "Extracting zfs-test tarball.." tarball = utils.unmap_url(self.bindir, 'zfs-tests.tar.bz2', self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) utils.system_output('rsync -arv %s/tests %s/' % (self.bindir, self.srcdir)) os.chdir(self.srcdir) print "Patching zfs-test tarball.." utils.system('patch -p1 < %s/zfs-tweaks.patch' % self.bindir) print "Building zfs-test tarball.." utils.system('./autogen.sh') utils.configure('') utils.system('SRCDIR=%s make' % self.srcdir) utils.system('modprobe zfs')
def run_once(self): tool_node = self.check_tool_result_node() lptlog.info("-----------开始获取测试参数") self.parallels =self.get_config_value(tool_node, "parallel", 1, valueType=int) if self.parallels >999: lptlog.info("限制并行数小于999, 将采用1") self.parallels = 1 lptlog.info("测试并行数: %d" % self.parallels) self.times = self.get_config_value(tool_node, "times", 5, valueType=int) lptlog.info("测试次数: %d" % self.times) jobs_sched = self.get_config_value(tool_node, "lmbench_sched", "DEFAULT", valueType=str) if jobs_sched not in ("DEFAULT", "BALANCED", "BALANCED_SPREAD", "UNIQUE", "NIQUE_SPREAD"): jobs_sched="DEFAULT" lptlog.info("调度模式为:%s" % jobs_sched) testMem = self.get_config_value(tool_node, "testmemory", 1000, valueType=int) lptlog.info("指定测试内存为: %sM, 但如果测试内存大于系统可用内存,lmbench将自动计算可用内存" % testMem) output = self.get_config_value(tool_node, "output", "/dev/tty", valueType=str) lptlog.info("lmbench 将打印信息到: %s" % output) self.mainParameters["parameters"] = "Mem:%dM jobSched:%s" %(testMem, jobs_sched) lptlog.info("----------运行测试脚本") #执行测试程序 os.chdir(self.srcdir) #Now, 配置测试参数 utils.system("./config.sh -p %d -j %s -m %d -o %s" %(self.parallels, jobs_sched, testMem, output)) #clean 之前的测试数据 for rootdir, subdirs, files in os.walk("./results"): for dir in subdirs: subabsdir = os.path.join(rootdir, dir) if os.path.exists(subabsdir): shutil.rmtree(os.path.join(rootdir, dir)) #run for iter in range(self.times): utils.system("./run.sh") time.sleep(60) #gen results utils.system("./genResults.sh %s" % os.path.join(self.resultsdir, "lmbench.out")) #数据处理 self.create_result() self.save_results_to_xml() #create txt report self.txt_report()
def report(self, test): # Output kernel per-symbol profile report reportfile = test.profdir + '/oprofile.kernel' if self.vmlinux: report = self.opreport + ' -l ' + self.vmlinux if os.path.exists(utils.get_modules_dir()): report += ' -p ' + utils.get_modules_dir() logging.info('Starting oprofile: %s' % self.start_time) utils.system(report + ' > ' + reportfile) logging.info('Ending oprofile: %s' % self.stop_time) else: utils.system("echo 'no vmlinux found.' > %s" % reportfile) # output profile summary report reportfile = test.profdir + '/oprofile.user' logging.info('Starting oprofile: %s' % self.start_time) utils.system(self.opreport + ' --long-filenames ' + ' >> ' + reportfile) logging.info('Ending oprofile: %s' % self.stop_time) utils.system(self.opcontrol + ' --shutdown')
def _create_partition(self): """ Create and initialize the sparse file. """ # Recreate sparse_file. utils.system('dd if=/dev/zero of=%s bs=1M seek=1024 count=1' % self.sparse_file) # Format sparse_file. utils.system('echo "y" | mkfs -t %s %s' % (self.file_system, self.sparse_file)) # Mount sparse_file. utils.system('mount -o loop -t %s %s %s' % (self.file_system, self.sparse_file, self.mount_point))
def mount(src, dst, fstype=None, options=None, verbose=False): """ Mount src under dst if it's really mounted, then remout with options. :param src: source device or directory, if None will skip to check :param dst: mountpoint, if None will skip to check :param fstype: filesystem type need to mount :return: if mounted return True else return False """ options = (options and [options] or [''])[0] if is_mount(src, dst): if 'remount' not in options: options = 'remount,%s' % options cmd = ['mount'] if fstype: cmd.extend(['-t', fstype]) if options: cmd.extend(['-o', options]) cmd.extend([src, dst]) cmd = ' '.join(cmd) return utils.system(cmd, verbose=verbose) == 0
def stop(self, test): utils.system(self.lttctl + ' -n test -R') time.sleep(10) if self.outputsize != -1: # truncate lttng output file to the specified limit for filename in os.listdir(self.output): file_path = os.path.join(self.output, filename) if os.path.isdir(file_path): continue size = os.stat(file_path)[6] # grab file size if size > self.outputsize: f = open(file_path, 'r+') f.truncate(self.outputsize) f.close() tarball = os.path.join(test.profdir, 'lttng.tar.bz2') utils.system("tar -cvjf %s -C %s %s" % (tarball, test.profdir, 'lttng')) utils.system('rm -rf ' + self.output)
def force_bind(self): """ Force bind Assign a routable address to the interface. Run avahi-autoipd which should not assign IP as already address is configured on the interface force-bind will assign the IP. Verify using ip command """ utils.system("ip addr add 192.168.122.2 dev veth0") utils.system("echo > %s" % self.log_messages) utils.system("avahi-autoipd -s veth0 &") time.sleep(10) force_bind_fail = None result = open(self.log_messages, "r").readlines() for i in result: if "Routable address already assigned, sleeping" in i: force_bind_fail = 1 if force_bind_fail is None: self.nfail += 1 raise error.TestError( '\n avahi-autopid assigned ip evenif routable address was configured' ) self.stop_avahi() utils.system_output( "avahi-autoipd -S 169.254.8.224 --force-bind veth0 &") time.sleep(10) status, ip_out = commands.getstatusoutput( "ip addr show veth0 | grep -w inet") if "169.254.8.224" not in ip_out: self.nfail += 1 raise error.TestError('\n avahi-autopid --force-bind failed') else: logging.info('\n avahi-autoipd --force-bind passed') self.stop_avahi()
def get_driver_hardware_id(driver_path, mount_point="/tmp/mnt-virtio", storage_path="/tmp/prewhql.iso", re_hw_id="(PCI.{14,50})", run_cmd=True): """ Get windows driver's hardware id from inf files. :param dirver: Configurable driver name. :param mount_point: Mount point for the driver storage :param storage_path: The path of the virtio driver storage :param re_hw_id: the pattern for getting hardware id from inf files :param run_cmd: Use hardware id in windows cmd command or not Return: Windows driver's hardware id """ if not os.path.exists(mount_point): os.mkdir(mount_point) if not os.path.ismount(mount_point): utils.system("mount %s %s -o loop" % (storage_path, mount_point), timeout=60) driver_link = os.path.join(mount_point, driver_path) txt_file = "" try: txt_file = open(driver_link, "r") txt = txt_file.read() hwid = re.findall(re_hw_id, txt)[-1].rstrip() if run_cmd: hwid = '^&'.join(hwid.split('&')) txt_file.close() utils.system("umount %s" % mount_point) return hwid except Exception, e: logging.error("Fail to get hardware id with exception: %s" % e) if txt_file: txt_file.close() utils.system("umount %s" % mount_point, ignore_status=True) return ""
def stop(self, test): """ Stop ftrace profiler. :param test: Autotest test in which the profiler will operate on. """ os.kill(self.record_job.sp.pid, signal.SIGINT) utils.join_bg_jobs([self.record_job]) # shrink the buffer to free memory. utils.system('%s reset -b 1' % self.trace_cmd) # compress output utils.system('bzip2 %s' % self.output) compressed_output = self.output + '.bz2' # if the compressed trace file is large (10MB), just delete it. compressed_output_size = os.path.getsize(compressed_output) if compressed_output_size > 10 * 1024 * 1024: logging.warn('Deleting large trace file %s (%d bytes)', compressed_output, compressed_output_size) os.remove(compressed_output) # remove per-cpu files in case trace-cmd died. utils.system('rm -f %s.cpu*' % self.output)
def setup(self): utils.system_output('rm /etc/*/S99autotest || true', retain_output=True) utils.system_output('useradd fsgqa || true', retain_output=True) utils.system_output( 'echo \"fsgqa ALL=(ALL)NOPASSWD: ALL\" >> /etc/sudoers', retain_output=True) print "Extracting xfstests.tar.bz2 tarball.." tarball = utils.unmap_url(self.bindir, 'xfstests-bld.tar.bz2', self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(os.path.join(self.srcdir, 'xfstests-dev')) print "Patching xfstests tarball.." utils.system( 'patch -p1 < %s/0001-xfstests-add-minimal-support-for-zfs.patch' % self.bindir) os.chdir(self.srcdir) print "Building xfstests" utils.system('make') utils.system('modprobe zfs')
def reboot(self, tag=LAST_BOOT_TAG): if tag == LAST_BOOT_TAG: tag = self.last_boot_tag else: self.last_boot_tag = tag self.reboot_setup() self.harness.run_reboot() default = self.config_get('boot.set_default') if default: self.bootloader.set_default(tag) else: self.bootloader.boot_once(tag) # HACK: using this as a module sometimes hangs shutdown, so if it's # installed unload it first utils.system("modprobe -r netconsole", ignore_status=True) # sync first, so that a sync during shutdown doesn't time out utils.system("sync; sync", ignore_status=True) utils.system("(sleep 5; reboot) </dev/null >/dev/null 2>&1 &") self.quit()
vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) client_ip = vm.get_address(0) try: error.context("Test Env setup") iperf_url = linux_iperf_url utils.get_file(iperf_url, host_path) error.context("install iperf in host", logging.info) default_install_cmd = "tar zxvf %s; cd iperf-%s;" default_install_cmd += " ./configure; make; make install" install_cmd = params.get("linux_install_cmd", default_install_cmd) utils.system(install_cmd % (host_path, iperf_version)) error.context("install iperf in guest", logging.info) if os_type == "linux": guest_path = (tmp_dir + "iperf.tgz") clean_cmd = "rm -rf %s iperf-%s" % (guest_path, iperf_version) else: guest_path = (tmp_dir + "iperf.exe") iperf_url = win_iperf_url utils.get_file(iperf_url, host_path) clean_cmd = "del %s" % guest_path vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout) if os_type == "linux": session.cmd(install_cmd % (guest_path, iperf_version))
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} def create_iface_xml(iface_mac): """ Create interface xml file """ iface = Interface(type_name=iface_type) source = ast.literal_eval(iface_source) if source: iface.source = source iface.model = iface_model if iface_model else "virtio" iface.mac_address = iface_mac driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) logging.debug("Create new interface xml: %s", iface) return iface def modify_iface_xml(update, status_error=False): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if iface_model: iface.model = iface_model else: del iface.model if iface_type: iface.type_name = iface_type del iface.source source = ast.literal_eval(iface_source) if source: net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and source.has_key('dev') and source['dev'] not in net_ifs): logging.warn( "Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] iface.source = source backend = ast.literal_eval(iface_backend) if backend: iface.backend = backend driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) if iface.address: del iface.address logging.debug("New interface xml file: %s", iface) if unprivileged_user: # Create disk image for unprivileged user disk_index = xml_devices.index( xml_devices.by_device_tag("disk")[0]) disk_xml = xml_devices[disk_index] logging.debug("source: %s", disk_xml.source) disk_source = disk_xml.source.attrs["file"] cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}" "".format(disk_source, dst_disk, unprivileged_user)) utils.run(cmd) disk_xml.source = disk_xml.new_disk_source( attrs={"file": dst_disk}) vmxml.devices = xml_devices # Remove all channels to avoid of permission problem channels = vmxml.get_devices(device_type="channel") for channel in channels: vmxml.del_device(channel) vmxml.xmltreefile.write() logging.debug("New VM xml: %s", vmxml) utils.run("chmod a+rw %s" % vmxml.xml) virsh.define(vmxml.xml, **virsh_dargs) # Try to modify interface xml by update-device or edit xml elif update: iface.xmltreefile.write() ret = virsh.update_device(vm_name, iface.xml, ignore_status=True) libvirt.check_exit_status(ret, status_error) else: vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() def check_offloads_option(if_name, driver_options, session=None): """ Check interface offloads by ethtool output """ offloads = { "csum": "tx-checksumming", "gso": "generic-segmentation-offload", "tso4": "tcp-segmentation-offload", "tso6": "tx-tcp6-segmentation", "ecn": "tx-tcp-ecn-segmentation", "ufo": "udp-fragmentation-offload" } if session: ret, output = session.cmd_status_output("ethtool -k %s | head" " -18" % if_name) else: out = utils.run("ethtool -k %s | head -18" % if_name) ret, output = out.exit_status, out.stdout if ret: raise error.TestFail("ethtool return error code") logging.debug("ethtool output: %s", output) for offload in driver_options.keys(): if offloads.has_key(offload): if (output.count(offloads[offload]) and not output.count( "%s: %s" % (offloads[offload], driver_options[offload]))): raise error.TestFail( "offloads option %s: %s isn't" " correct in ethtool output" % (offloads[offload], driver_options[offload])) def run_xml_test(iface_mac): """ Test for interface options in vm xml """ # Get the interface object according the mac address vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_devices = vmxml.get_devices(device_type="interface") iface = None for iface_dev in iface_devices: if iface_dev.mac_address == iface_mac: iface = iface_dev if not iface: raise error.TestFail("Can't find interface with mac" " '%s' in vm xml" % iface_mac) driver_dict = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) for driver_opt in driver_dict.keys(): if not driver_dict[driver_opt] == iface.driver.driver_attr[ driver_opt]: raise error.TestFail( "Can't see driver option %s=%s in vm xml" % (driver_opt, driver_dict[driver_opt])) if iface_target: if (not iface.target.has_key("dev") or not iface.target["dev"].startswith(iface_target)): raise error.TestFail("Can't see device target dev in vm xml") # Check macvtap mode by ip link command if iface_target == "macvtap" and iface.source.has_key("mode"): cmd = "ip -d link show %s" % iface.target["dev"] output = utils.run(cmd).stdout logging.debug("ip link output: %s", output) mode = iface.source["mode"] if mode == "passthrough": mode = "passthru" if not output.count("macvtap mode %s" % mode): raise error.TestFail("Failed to verify macvtap mode") def run_cmdline_test(iface_mac): """ Test for qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) ret = utils.run(cmd) logging.debug("Command line %s", ret.stdout) if test_vhost_net: if not ret.stdout.count("vhost=on") and not rm_vhost_driver: raise error.TestFail("Can't see vhost options in" " qemu-kvm command line") if iface_model == "virtio": model_option = "device virtio-net-pci" else: model_option = "device rtl8139" iface_cmdline = re.findall( r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout) if not iface_cmdline: raise error.TestFail("Can't see %s with mac %s in command" " line" % (model_option, iface_mac)) cmd_opt = {} for opt in iface_cmdline[0].split(','): tmp = opt.rsplit("=") cmd_opt[tmp[0]] = tmp[1] logging.debug("Command line options %s", cmd_opt) driver_dict = {} # Test <driver> xml options. if iface_driver: iface_driver_dict = ast.literal_eval(iface_driver) for driver_opt in iface_driver_dict.keys(): if driver_opt == "name": continue elif driver_opt == "txmode": if iface_driver_dict["txmode"] == "iothread": driver_dict["tx"] = "bh" else: driver_dict["tx"] = iface_driver_dict["txmode"] elif driver_opt == "queues": driver_dict["mq"] = "on" driver_dict["vectors"] = str( int(iface_driver_dict["queues"]) * 2 + 2) else: driver_dict[driver_opt] = iface_driver_dict[driver_opt] # Test <driver><host/><driver> xml options. if iface_driver_host: driver_dict.update(ast.literal_eval(iface_driver_host)) # Test <driver><guest/><driver> xml options. if iface_driver_guest: driver_dict.update(ast.literal_eval(iface_driver_guest)) for driver_opt in driver_dict.keys(): if (not cmd_opt.has_key(driver_opt) or not cmd_opt[driver_opt] == driver_dict[driver_opt]): raise error.TestFail("Can't see option '%s=%s' in qemu-kvm " " command line" % (driver_opt, driver_dict[driver_opt])) if test_backend: guest_pid = ret.stdout.rsplit()[1] cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid) if utils.system(cmd, ignore_status=True): raise error.TestFail("Guest process didn't open backend file" " %s" % backend["tap"]) cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid) if utils.system(cmd, ignore_status=True): raise error.TestFail("Guest process didn't open backend file" " %s" % backend["tap"]) def get_guest_ip(session, mac): """ Wrapper function to get guest ip address """ utils_net.restart_guest_network(session, mac) # Wait for IP address is ready utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac), 10) return utils_net.get_guest_ip_addr(session, mac) def check_user_network(session): """ Check user network ip address on guest """ vm_ips = [] vm_ips.append(get_guest_ip(session, iface_mac_old)) if attach_device: vm_ips.append(get_guest_ip(session, iface_mac)) logging.debug("IP address on guest: %s", vm_ips) if len(vm_ips) != len(set(vm_ips)): raise error.TestFail("Duplicated IP address on guest. " "Check bug: https://bugzilla.redhat." "com/show_bug.cgi?id=1147238") for vm_ip in vm_ips: if vm_ip is None or not vm_ip.startswith("10.0.2."): raise error.TestFail("Found wrong IP address" " on guest") # Check gateway address gateway = utils_net.get_net_gateway(session.cmd_output) if gateway != "10.0.2.2": raise error.TestFail("The gateway on guest is not" " right") # Check dns server address ns_list = utils_net.get_net_nameserver(session.cmd_output) if "10.0.2.3" not in ns_list: raise error.TestFail("The dns server can't be found" " on guest") def check_mcast_network(session): """ Check multicast ip address on guests """ username = params.get("username") password = params.get("password") src_addr = ast.literal_eval(iface_source)['address'] add_session = additional_vm.wait_for_serial_login(username=username, password=password) vms_sess_dict = {vm_name: session, additional_vm.name: add_session} # Check mcast address on host cmd = "netstat -g | grep %s" % src_addr if utils.run(cmd, ignore_status=True).exit_status: raise error.TestFail("Can't find multicast ip address" " on host") vms_ip_dict = {} # Get ip address on each guest for vms in vms_sess_dict.keys(): vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms) vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac) if not vm_ip: raise error.TestFail("Can't get multicast ip" " address on guest") vms_ip_dict.update({vms: vm_ip}) if len(set(vms_ip_dict.values())) != len(vms_sess_dict): raise error.TestFail("Got duplicated multicast ip address") logging.debug("Found ips on guest: %s", vms_ip_dict) # Run omping server on host if not utils_package.package_install(["omping"]): raise error.TestError("Failed to install omping" " on host") cmd = ("iptables -F;omping -m %s %s" % (src_addr, "192.168.122.1 %s" % ' '.join(vms_ip_dict.values()))) # Run a backgroup job waiting for connection of client bgjob = utils.AsyncJob(cmd) # Run omping client on guests for vms in vms_sess_dict.keys(): # omping should be installed first if not utils_package.package_install(["omping"], vms_sess_dict[vms]): raise error.TestError("Failed to install omping" " on guest") cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" % (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms])) ret, output = vms_sess_dict[vms].cmd_status_output(cmd) logging.debug("omping ret: %s, output: %s", ret, output) if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')): raise error.TestFail("omping failed on guest") # Kill the backgroup job bgjob.kill_func() status_error = "yes" == params.get("status_error", "no") start_error = "yes" == params.get("start_error", "no") unprivileged_user = params.get("unprivileged_user") # Interface specific attributes. iface_type = params.get("iface_type", "network") iface_source = params.get("iface_source", "{}") iface_driver = params.get("iface_driver") iface_model = params.get("iface_model", "virtio") iface_target = params.get("iface_target") iface_backend = params.get("iface_backend", "{}") iface_driver_host = params.get("iface_driver_host") iface_driver_guest = params.get("iface_driver_guest") attach_device = params.get("attach_iface_device") change_option = "yes" == params.get("change_iface_options", "no") update_device = "yes" == params.get("update_iface_device", "no") additional_guest = "yes" == params.get("additional_guest", "no") serial_login = "******" == params.get("serial_login", "no") rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no") test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no") test_option_xml = "yes" == params.get("test_iface_option_xml", "no") test_vhost_net = "yes" == params.get("test_vhost_net", "no") test_option_offloads = "yes" == params.get("test_option_offloads", "no") test_iface_user = "******" == params.get("test_iface_user", "no") test_iface_mcast = "yes" == params.get("test_iface_mcast", "no") test_libvirtd = "yes" == params.get("test_libvirtd", "no") test_guest_ip = "yes" == params.get("test_guest_ip", "no") test_backend = "yes" == params.get("test_backend", "no") if iface_driver_host or iface_driver_guest or test_backend: if not libvirt_version.version_compare(1, 2, 8): raise error.TestNAError("Offloading/backend options not " "supported in this libvirt version") if iface_driver and "queues" in ast.literal_eval(iface_driver): if not libvirt_version.version_compare(1, 0, 6): raise error.TestNAError("Queues options not supported" " in this libvirt version") if unprivileged_user: if not libvirt_version.version_compare(1, 1, 1): raise error.TestNAError("qemu-bridge-helper not supported" " on this host") virsh_dargs["unprivileged_user"] = unprivileged_user # Create unprivileged user if needed cmd = ("grep {0} /etc/passwd || " "useradd {0}".format(unprivileged_user)) utils.run(cmd) # Need another disk image for unprivileged user to access dst_disk = "/tmp/%s.img" % unprivileged_user # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name) # iface_mac will update if attach a new interface iface_mac = iface_mac_old # Additional vm for test additional_vm = None libvirtd = utils_libvirtd.Libvirtd() try: # Build the xml and run test. try: # Prepare interface backend files if test_backend: if not os.path.exists("/dev/vhost-net"): utils.run("modprobe vhost-net") backend = ast.literal_eval(iface_backend) backend_tap = "/dev/net/tun" backend_vhost = "/dev/vhost-net" if not backend: backend["tap"] = backend_tap backend["vhost"] = backend_vhost if not start_error: # Create backend files for normal test if not os.path.exists(backend["tap"]): os.rename(backend_tap, backend["tap"]) if not os.path.exists(backend["vhost"]): os.rename(backend_vhost, backend["vhost"]) # Edit the interface xml. if change_option: modify_iface_xml(update=False) if rm_vhost_driver: # Check vhost driver. kvm_version = os.uname()[2] driver_path = ("/lib/modules/%s/kernel/drivers/vhost/" "vhost_net.ko" % kvm_version) driver_backup = driver_path + ".bak" cmd = ("modprobe -r {0}; lsmod | " "grep {0}".format("vhost_net")) if not utils.system(cmd, ignore_status=True): raise error.TestError("Failed to remove vhost_net driver") # Move the vhost_net driver if os.path.exists(driver_path): os.rename(driver_path, driver_backup) else: # Load vhost_net driver by default cmd = "modprobe vhost_net" utils.system(cmd) # Attach a interface when vm is shutoff if attach_device == 'config': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--config", ignore_status=True) libvirt.check_exit_status(ret) # Clone additional vm if additional_guest: guest_name = "%s_%s" % (vm_name, '1') # Clone additional guest timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout) additional_vm = vm.clone(guest_name) additional_vm.start() # additional_vm.wait_for_login() # Start the VM. if unprivileged_user: virsh.start(vm_name, **virsh_dargs) cmd = ("su - %s -c 'virsh console %s'" % (unprivileged_user, vm_name)) session = aexpect.ShellSession(cmd) session.sendline() remote.handle_prompts(session, params.get("username"), params.get("password"), "[\#\$]", 30) # Get ip address on guest if not get_guest_ip(session, iface_mac): raise error.TestError("Can't get ip address on guest") else: # Will raise VMStartError exception if start fails vm.start() if serial_login: session = vm.wait_for_serial_login() else: session = vm.wait_for_login() if start_error: raise error.TestFail("VM started unexpectedly") # Attach a interface when vm is running if attach_device == 'live': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--live", ignore_status=True) libvirt.check_exit_status(ret) # Need sleep here for attachment take effect time.sleep(5) # Update a interface options if update_device: modify_iface_xml(update=True, status_error=status_error) # Run tests for qemu-kvm command line options if test_option_cmd: run_cmdline_test(iface_mac) # Run tests for vm xml if test_option_xml: run_xml_test(iface_mac) # Run tests for offloads options if test_option_offloads: if iface_driver_host: ifname_guest = utils_net.get_linux_ifname( session, iface_mac) check_offloads_option(ifname_guest, ast.literal_eval(iface_driver_host), session) if iface_driver_guest: ifname_host = libvirt.get_ifname_host(vm_name, iface_mac) check_offloads_option(ifname_host, ast.literal_eval(iface_driver_guest)) if test_iface_user: # Test user type network check_user_network(session) if test_iface_mcast: # Test mcast type network check_mcast_network(session) # Check guest ip address if test_guest_ip: if not get_guest_ip(session, iface_mac): raise error.TestFail("Guest can't get a" " valid ip address") session.close() # Restart libvirtd and guest, then test again if test_libvirtd: libvirtd.restart() vm.destroy() vm.start() if test_option_xml: run_xml_test(iface_mac) # Detach hot/cold-plugged interface at last if attach_device: ret = virsh.detach_device(vm_name, iface_xml_obj.xml, flagstr="", ignore_status=True) libvirt.check_exit_status(ret) except virt_vm.VMStartError as e: logging.info(str(e)) if not start_error: raise error.TestFail('VM failed to start\n%s' % e) finally: # Recover VM. logging.info("Restoring vm...") # Restore interface backend files if test_backend: if not os.path.exists(backend_tap): os.rename(backend["tap"], backend_tap) if not os.path.exists(backend_vhost): os.rename(backend["vhost"], backend_vhost) if rm_vhost_driver: # Restore vhost_net driver if os.path.exists(driver_backup): os.rename(driver_backup, driver_path) if unprivileged_user: virsh.remove_domain(vm_name, "--remove-all-storage", **virsh_dargs) if additional_vm: virsh.remove_domain(additional_vm.name, "--remove-all-storage") # Kill all omping server process on host utils.system("pidof omping && killall omping", ignore_status=True) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
def run_cmdline_test(iface_mac): """ Test for qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) ret = utils.run(cmd) logging.debug("Command line %s", ret.stdout) if test_vhost_net: if not ret.stdout.count("vhost=on") and not rm_vhost_driver: raise error.TestFail("Can't see vhost options in" " qemu-kvm command line") if iface_model == "virtio": model_option = "device virtio-net-pci" else: model_option = "device rtl8139" iface_cmdline = re.findall( r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout) if not iface_cmdline: raise error.TestFail("Can't see %s with mac %s in command" " line" % (model_option, iface_mac)) cmd_opt = {} for opt in iface_cmdline[0].split(','): tmp = opt.rsplit("=") cmd_opt[tmp[0]] = tmp[1] logging.debug("Command line options %s", cmd_opt) driver_dict = {} # Test <driver> xml options. if iface_driver: iface_driver_dict = ast.literal_eval(iface_driver) for driver_opt in iface_driver_dict.keys(): if driver_opt == "name": continue elif driver_opt == "txmode": if iface_driver_dict["txmode"] == "iothread": driver_dict["tx"] = "bh" else: driver_dict["tx"] = iface_driver_dict["txmode"] elif driver_opt == "queues": driver_dict["mq"] = "on" driver_dict["vectors"] = str( int(iface_driver_dict["queues"]) * 2 + 2) else: driver_dict[driver_opt] = iface_driver_dict[driver_opt] # Test <driver><host/><driver> xml options. if iface_driver_host: driver_dict.update(ast.literal_eval(iface_driver_host)) # Test <driver><guest/><driver> xml options. if iface_driver_guest: driver_dict.update(ast.literal_eval(iface_driver_guest)) for driver_opt in driver_dict.keys(): if (not cmd_opt.has_key(driver_opt) or not cmd_opt[driver_opt] == driver_dict[driver_opt]): raise error.TestFail("Can't see option '%s=%s' in qemu-kvm " " command line" % (driver_opt, driver_dict[driver_opt])) if test_backend: guest_pid = ret.stdout.rsplit()[1] cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid) if utils.system(cmd, ignore_status=True): raise error.TestFail("Guest process didn't open backend file" " %s" % backend["tap"]) cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid) if utils.system(cmd, ignore_status=True): raise error.TestFail("Guest process didn't open backend file" " %s" % backend["tap"])
def run(test, params, env): """ Test Step 1. boot up two virtual machine 2. Transfer data: host <--> guest1 <--> guest2 <-->host via ipv6 3. after data transfer, check data have no change Params: :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ timeout = int(params.get("login_timeout", '360')) client = params.get("file_transfer_client") port = params.get("file_transfer_port") password = params.get("password") username = params.get("username") tmp_dir = params.get("tmp_dir", "/tmp/") filesize = int(params.get("filesize", '4096')) dd_cmd = params.get("dd_cmd") file_trans_timeout = int(params.get("file_trans_timeout", '1200')) file_md5_check_timeout = int(params.get("file_md5_check_timeout", '600')) def get_linux_ipv6_linklocal_address(ifname, session=None): """ Get host/guest ipv6 linklocal address via ifname """ if session is not None: o = session.cmd_output("ifconfig %s" % ifname) else: o = utils.system_output("ifconfig %s" % ifname) ipv6_address_reg = re.compile(r"(fe80::[^\s|/]*)") if o: ipv6_linklocal_address = ipv6_address_reg.findall(o) if not ipv6_linklocal_address: raise error.TestError("Can't get %s linklocal address" % ifname) return ipv6_linklocal_address[0] else: return None def get_file_md5sum(file_name, session, timeout): """ Get file md5sum from guest. """ logging.info("Get md5sum of the file:'%s'" % file_name) try: o = session.cmd_output("md5sum %s" % file_name, timeout=timeout) file_md5sum = re.findall("\w+", o)[0] except IndexError: raise error.TestError("Could not get file md5sum in guest") return file_md5sum sessions = {} addresses = {} inet_name = {} vms = [] error.context("Boot vms for test", logging.info) for vm_name in params.get("vms", "vm1 vm2").split(): vms.append(env.get_vm(vm_name)) # config ipv6 address host and guest. host_ifname = params.get("netdst") for vm in vms: vm.verify_alive() sessions[vm] = vm.wait_for_login(timeout=timeout) inet_name[vm] = utils_net.get_linux_ifname(sessions[vm], vm.get_mac_address()) addresses[vm] = get_linux_ipv6_linklocal_address( inet_name[vm], sessions[vm]) # prepare test data guest_path = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) dest_path = (tmp_dir + "dst-%s" % utils_misc.generate_random_string(8)) host_path = os.path.join(test.tmpdir, "tmp-%s" % utils_misc.generate_random_string(8)) logging.info("Test setup: Creating %dMB file on host", filesize) utils.run(dd_cmd % (host_path, filesize)) try: src_md5 = (utils.hash_file(host_path, method="md5")) # transfer data for vm in vms: error.context("Transfer data from host to %s" % vm.name, logging.info) remote.copy_files_to(addresses[vm], client, username, password, port, host_path, guest_path, timeout=file_trans_timeout, interface=host_ifname) dst_md5 = get_file_md5sum(guest_path, sessions[vm], timeout=file_md5_check_timeout) if dst_md5 != src_md5: raise error.TestFail("File changed after transfer host -> %s" % vm.name) for vm_src in addresses: for vm_dst in addresses: if vm_src != vm_dst: error.context( "Transferring data from %s to %s" % (vm_src.name, vm_dst.name), logging.info) remote.scp_between_remotes(addresses[vm_src], addresses[vm_dst], port, password, password, username, username, guest_path, dest_path, timeout=file_trans_timeout, src_inter=host_ifname, dst_inter=inet_name[vm_src]) dst_md5 = get_file_md5sum(dest_path, sessions[vm_dst], timeout=file_md5_check_timeout) if dst_md5 != src_md5: raise error.TestFail("File changed transfer %s -> %s" % (vm_src.name, vm_dst.name)) for vm in vms: error.context("Transfer data from %s to host" % vm.name, logging.info) remote.copy_files_from(addresses[vm], client, username, password, port, dest_path, host_path, timeout=file_trans_timeout, interface=host_ifname) error.context("Check whether the file changed after trans", logging.info) dst_md5 = (utils.hash_file(host_path, method="md5")) if dst_md5 != src_md5: raise error.TestFail("File changed after transfer", "Files md5sum mismatch!") utils.system_output("rm -rf %s" % host_path, timeout=timeout) finally: utils.system("rm -rf %s" % host_path, timeout=timeout, ignore_status=True) for vm in vms: sessions[vm].cmd("rm -rf %s %s || true" % (guest_path, dest_path), timeout=timeout, ignore_all_errors=True) sessions[vm].close()
def run_whql_env_setup(test, params, env): """ KVM whql env setup test: 1) Log into a guest 2) Update Windows kernel to the newest version 3) Un-check Automatically restart in system failure 4) Disable UAC 5) Get the symbol files 6) Set VM to physical memory + 100M 7) Update the nic configuration 8) Install debug view and make it auto run @param test: QEMU test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ log_path = "%s/../debug" % test.resultsdir # Prepare the tools iso error.context("Prepare the tools iso", logging.info) src_list = params.get("src_list") src_path = params.get("src_path", "%s/whql_src" % test.tmpdir) if not os.path.exists(src_path): os.makedirs(src_path) if src_list is not None: for i in re.split(",", src_list): utils.unmap_url(src_path, i, src_path) # Make iso for src cdrom_whql = params.get("cdrom_whql") cdrom_whql = utils_misc.get_path(data_dir.get_data_dir(), cdrom_whql) cdrom_whql_dir = os.path.split(cdrom_whql)[0] if not os.path.exists(cdrom_whql_dir): os.makedirs(cdrom_whql_dir) cmd = "mkisofs -J -o %s %s" % (cdrom_whql, src_path) utils.system(cmd) params["cdroms"] += " whql" vm = "vm1" vm_params = params.object_params(vm) env_process.preprocess_vm(test, vm_params, env, vm) vm = env.get_vm(vm) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) error_log = utils_misc.get_path(log_path, "whql_setup_error_log") run_guest_log = params.get("run_guest_log", "%s/whql_qemu_comman" % test.tmpdir) # Record qmmu command line in a log file error.context("Record qemu command line", logging.info) if os.path.isfile(run_guest_log): fd = open(run_guest_log, "r+") fd.read() else: fd = open(run_guest_log, "w") fd.write("%s\n" % vm.qemu_command) fd.close() # Get set up commands update_cmd = params.get("update_cmd", "") timezone_cmd = params.get("timezone_cmd", "") auto_restart = params.get("auto_restart", "") qxl_install = params.get("qxl_install", "") debuggers_install = params.get("debuggers_install", "") disable_uas = params.get("disable_uas", "") symbol_files = params.get("symbol_files", "") vm_size = int(params.get("mem")) + 100 nic_cmd = params.get("nic_config_cmd", "") dbgview_cmd = params.get("dbgview_cmd", "") format_cmd = params.get("format_cmd", "") disable_firewall = params.get("disable_firewall", "") disable_update = params.get("disable_update", "") setup_timeout = int(params.get("setup_timeout", "7200")) disk_init_cmd = params.get("disk_init_cmd", "") disk_driver_install = params.get("disk_driver_install", "") vm_ma_cmd = "wmic computersystem set AutomaticManagedPagefile=False" vm_cmd = "wmic pagefileset where name=\"C:\\\\pagefile.sys\" set " vm_cmd += "InitialSize=%s,MaximumSize=%s" % (vm_size, vm_size) vm_ma_cmd = "" vm_cmd = "" if symbol_files: symbol_cmd = "del C:\\\\symbols &&" symbol_cmd += "git clone %s C:\\\\symbol_files C:\\\\symbols" % \ symbol_files else: symbol_cmd = "" wmic_prepare_cmd = "echo exit > cmd && cmd /s wmic" error.context("Configure guest system", logging.info) cmd_list = [ wmic_prepare_cmd, auto_restart, disable_uas, symbol_cmd, vm_ma_cmd, vm_cmd, dbgview_cmd, qxl_install, disable_firewall, timezone_cmd ] if nic_cmd: for index, nic in enumerate(re.split("\s+", params.get("nics"))): setup_params = params.get("nic_setup_params_%s" % nic, "") if params.get("platform", "") == "x86_64": nic_cmd = re.sub("set", "set_64", nic_cmd) cmd_list.append("%s %s %s" % (nic_cmd, str(index + 1), setup_params)) if disk_init_cmd: disk_num = len(re.split("\s+", params.get("images"))) if disk_driver_install: cmd_list.append(disk_driver_install + str(disk_num - 1)) labels = "IJKLMNOPQRSTUVWXYZ" for index, images in enumerate(re.split("\s+", params.get("images"))): if index > 0: cmd_list.append(disk_init_cmd % (str(index), labels[index - 1])) format_cmd_image = format_cmd % ( labels[index - 1], params.get("win_format_%s" % images)) if params.get("win_extra_%s" % images): format_cmd_image += " %s" % params.get( "win_extra_%s" % images) cmd_list.append(format_cmd_image) cmd_list += [update_cmd, disable_update] failed_flag = 0 # Check symbol files in guest if symbol_files: error.context("Update symbol files", logging.info) install_check_tool = False check_tool_chk = params.get("check_tool_chk", "C:\debuggers\symchk.exe") output = session.cmd_output(check_tool_chk) if "cannot find" in output: install_check_tool = True if install_check_tool: output = session.cmd_output(debuggers_install) symbol_file_check = params.get("symbol_file_check") symbol_file_download = params.get("symbol_file_download") symbol_check_pattern = params.get("symbol_check_pattern") symbol_pid_pattern = params.get("symbol_pid_pattern") download = utils_test.BackgroundTest( session.cmd, (symbol_file_download, setup_timeout)) sessioncheck = vm.wait_for_login(timeout=timeout) download.start() while download.is_alive(): o = sessioncheck.cmd_output(symbol_file_check, setup_timeout) if symbol_check_pattern in o: # Check is done kill download process cmd = "tasklist /FO list" s, o = sessioncheck.cmd_status_output(cmd) pid = re.findall(symbol_pid_pattern, o, re.S) if pid: cmd = "taskkill /PID %s /F" % pid[0] try: sessioncheck.cmd(cmd) except Exception: pass break time.sleep(5) sessioncheck.close() download.join() for cmd in cmd_list: if len(cmd) > 0: s = 0 try: s, o = session.cmd_status_output(cmd, timeout=setup_timeout) except Exception, err: failed_flag += 1 utils_misc.log_line(error_log, "Unexpected exception: %s" % err) if s != 0: failed_flag += 1 utils_misc.log_line(error_log, o)
def run_nicdriver_unload(test, params, env): """ Test nic driver load/unload. 1) Boot a VM. 2) Get the NIC driver name. 3) Multi-session TCP transfer on test interface. 4) Repeatedly unload/load NIC driver during file transfer. 5) Check whether the test interface should still work. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def all_threads_done(threads): for thread in threads: if thread.isAlive(): return False else: continue return True def all_threads_alive(threads): for thread in threads: if not thread.isAlive(): return False else: continue return True timeout = int(params.get("login_timeout", 360)) transfer_timeout = int(params.get("transfer_timeout", 1000)) filesize = int(params.get("filesize", 512)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error.base_context("Test env prepare") error.context("Get NIC interface name in guest.", logging.info) ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0)) # get ethernet driver from '/sys' directory. # ethtool can do the same thing and doesn't care about os type. # if we make sure all guests have ethtool, we can make a change here. sys_path = params.get("sys_path") % (ethname) # readlink in RHEL4.8 doesn't have '-e' param, should use '-f' in RHEL4.8. readlink_cmd = params.get("readlink_command", "readlink -e") driver = os.path.basename( session.cmd("%s %s" % (readlink_cmd, sys_path)).strip()) logging.info("The guest interface %s using driver %s" % (ethname, driver)) error.context( "Host test file prepare, create %dMB file on host" % filesize, logging.info) tmp_dir = data_dir.get_tmp_dir() host_path = os.path.join( tmp_dir, "host_file_%s" % utils_misc.generate_random_string(8)) guest_path = os.path.join( "/home", "guest_file_%s" % utils_misc.generate_random_string(8)) cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_path, filesize) utils.run(cmd) file_checksum = utils.hash_file(host_path, "md5") error.context( "Guest test file prepare, Copy file %s from host to guest" % host_path, logging.info) vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout) if session.cmd_status("md5sum %s | grep %s" % (guest_path, file_checksum)): raise error.TestNAError("File MD5SUMs changed after copy to guest") logging.info("Test env prepare successfully") error.base_context("Nic driver load/unload testing", logging.info) session_serial = vm.wait_for_serial_login(timeout=timeout) try: error.context("Transfer file between host and guest", logging.info) threads = [] file_paths = [] host_file_paths = [] for sess_index in range(int(params.get("sessions_num", "10"))): sess_path = os.path.join("/home", "dst-%s" % sess_index) host_sess_path = os.path.join(tmp_dir, "dst-%s" % sess_index) thread1 = utils.InterruptedThread(vm.copy_files_to, (host_path, sess_path), {"timeout": transfer_timeout}) thread2 = utils.InterruptedThread(vm.copy_files_from, (guest_path, host_sess_path), {"timeout": transfer_timeout}) thread1.start() threads.append(thread1) thread2.start() threads.append(thread2) file_paths.append(sess_path) host_file_paths.append(host_sess_path) utils_misc.wait_for(lambda: all_threads_alive(threads), 60, 10, 1) time.sleep(5) error.context("Repeatedly unload/load NIC driver during file transfer", logging.info) while not all_threads_done(threads): error.context("Shutdown the driver for NIC interface.", logging.info) session_serial.cmd_output_safe("ifconfig %s down" % ethname) error.context("Unload NIC driver.", logging.info) session_serial.cmd_output_safe("modprobe -r %s" % driver) error.context("Load NIC driver.", logging.info) session_serial.cmd_output_safe("modprobe %s" % driver) error.context("Activate NIC driver.", logging.info) session_serial.cmd_output_safe("ifconfig %s up" % ethname) session_serial.cmd_output_safe("sleep %s" % random.randint(10, 60)) # files md5sums check error.context("File transfer finished, checking files md5sums", logging.info) err_info = [] for copied_file in file_paths: if session_serial.cmd_status("md5sum %s | grep %s" % (copied_file, file_checksum)): err_msg = "Guest file %s md5sum changed" err_info.append(err_msg % copied_file) for copied_file in host_file_paths: if utils.system("md5sum %s | grep %s" % (copied_file, file_checksum)): err_msg = "Host file %s md5sum changed" err_info.append(err_msg % copied_file) if err_info: raise error.TestError("files MD5SUMs changed after copying %s" % err_info) except Exception: for thread in threads: thread.join(suppress_exception=True) raise else: for thread in threads: thread.join() for copied_file in file_paths: session_serial.cmd("rm -rf %s" % copied_file) for copied_file in host_file_paths: utils.system("rm -rf %s" % copied_file) session_serial.cmd("%s %s" % ("rm -rf", guest_path)) os.remove(host_path) session.close() session_serial.close()