def run_once(self, test_name): self.job.require_gcc() stress_ng = os.path.join(self.srcdir, 'stress-ng', 'stress-ng') # # device to use for btrfs # dev = self.dev # # mount point for btrfs # mnt = '/tmp/mnt-btrfs' # # temp logfile # log = '/tmp/btrfs-falure.log' # # stress-ng "quick fire" short life tests # dur = '5s' cmd = 'DEV=%s MNT=%s LOG=%s STRESS_NG=%s DURATION=%s %s/ubuntu_stress_btrfs.sh 2>&1' % (dev, mnt, log, stress_ng, dur, self.bindir) self.results = utils.system_output(cmd, retain_output=True) # # stress-ng "long soak" tests # dur = '1m' cmd = 'DEV=%s MNT=%s LOG=%s STRESS_NG=%s DURATION=%s %s/ubuntu_stress_btrfs.sh 2>&1' % (dev, mnt, log, stress_ng, dur, self.bindir) self.results = utils.system_output(cmd, retain_output=True)
def execute(self, iterations=1, workfile='workfile.short', start=1, end=10, increment=2, extra_args='', tmpdir=None): if not tmpdir: tmpdir = self.tmpdir # -f workfile # -s <number of users to start with> # -e <number of users to end with> # -i <number of users to increment> workfile = os.path.join('data', workfile) args = "-f %s -s %d -e %d -i %d" % (workfile, start, end, increment) config = os.path.join(self.srcdir, 'reaim.config') utils.system('cp -f %s/reaim.config %s' % (self.bindir, config)) args += ' -c ./reaim.config' open(config, 'a+').write("DISKDIR %s\n" % tmpdir) os.chdir(self.srcdir) cmd = self.ldlib + ' ./reaim ' + args + ' ' + extra_args results = [] profilers = self.job.profilers if not profilers.only(): for i in range(iterations): results.append(utils.system_output(cmd, retain_output=True)) # Do a profiling run if necessary if profilers.present(): profilers.start(self) results.append(utils.system_output(cmd, retain_output=True)) profilers.stop(self) profilers.report(self) self.__format_results("\n".join(results))
def install_required_pkgs(self): arch = platform.processor() series = platform.dist()[2] pkgs = [ 'perl', 'build-essential', 'gdb', 'git', 'ksh', 'autoconf', 'acl', 'dump', 'kpartx', 'pax', 'nfs-kernel-server', 'xfsprogs', 'libattr1-dev', ] gcc = 'gcc' if arch in ['ppc64le', 'aarch64', 's390x'] else 'gcc-multilib' pkgs.append(gcc) if series in ['precise', 'trusty']: utils.system_output('add-apt-repository ppa:zfs-native/stable -y', retain_output=True) utils.system_output('apt-get update || true', retain_output=True) pkgs.append('ubuntu-zfs') elif series == 'wily': pkgs.append('zfs-dkms') pkgs.append('zfsutils-linux') else: pkgs.append('zfsutils-linux') cmd = 'apt-get install --yes --force-yes ' + ' '.join(pkgs) self.results = utils.system_output(cmd, retain_output=True)
def run_once(self, testname=['compiler'], do='run'): cmd = 'phoronix-test-suite' utils.system_output(cmd + ' list-available-suites',retain_output=True) for test in testname: res_dir = test + '-result' des_conf = test + '-conf' self.results_path = os.path.join(self.resultsdir,'raw_output_%s' % test) if (test == 'desktop-graphics'): base_cmd = cmd + ' ' + do + ' ' + test todo = ''' <<TODO y ''' + res_dir + \ ''' ''' + des_conf + ''' n TODO''' self.results = utils.system_output('export DISPLAY=:0.0;' + base_cmd + todo, retain_output=True) utils.open_write_close(self.results_path, self.results) else: fout = open(self.results_path, 'w') runpts = pexpect.spawn('%s %s %s' % (cmd, do, test)) runpts.logfile = fout while 1: index = runpts.expect(['\(Y/n\):', pexpect.EOF, pexpect.TIMEOUT]) print index logging.info("now start first expect.........") if index == 0 : break elif index == 1 : pass elif index == 2 : pass runpts.sendline('y') logging.debug(runpts.before) runpts.expect('under:') runpts.sendline(res_dir) logging.debug(runpts.before) runpts.expect('configuration:') runpts.sendline(des_conf) logging.debug(runpts.before) runpts.expect('Description:') runpts.sendline('') logging.debug(runpts.before) logging.info("now start run test..............") while 1: index = runpts.expect(['OpenBenchmarking.org', pexpect.EOF, pexpect.TIMEOUT]) index = runpts.expect(['\(Y/n\):', pexpect.EOF, pexpect.TIMEOUT]) if index == 0 : break elif index == 1 : pass elif index == 2 : pass runpts.sendline('n') logging.debug(runpts.before) runpts.expect(pexpect.EOF) fout.close()
def unmount_force(self): """ Kill all other jobs accessing this partition. Use fuser and ps to find all mounts on this mountpoint and unmount them. :return: true for success or false for any errors """ logging.debug("Standard umount failed, will try forcing. Users:") try: cmd = 'fuser ' + self.get_mountpoint() logging.debug(cmd) fuser = utils.system_output(cmd) logging.debug(fuser) users = re.sub('.*:', '', fuser).split() for user in users: m = re.match('(\d+)(.*)', user) (pid, usage) = (m.group(1), m.group(2)) try: ps = utils.system_output('ps -p %s | sed 1d' % pid) logging.debug('%s %s %s' % (usage, pid, ps)) except Exception: pass utils.system('ls -l ' + self.device) umount_cmd = "umount -f " + self.device utils.system(umount_cmd) return True except error.CmdError: logging.debug('Umount_force failed for %s' % self.device) return False
def run_once(self, dev="", devices="", extra_args="", tmpdir=None): # @dev: The device against which the trace will be replayed. # e.g. "sdb" or "md_d1" # @devices: A space-separated list of the underlying devices # which make up dev, e.g. "sdb sdc". You only need to set # devices if dev is an MD, LVM, or similar device; # otherwise leave it as an empty string. if not tmpdir: tmpdir = self.tmpdir os.chdir(self.srcdir) alldevs = "-d /dev/" + dev alldnames = dev for d in devices.split(): alldevs += " -d /dev/" + d alldnames += " " + d # convert the trace (assumed to be in this test's base # directory) into btreplay's required format # # TODO: The test currently halts here as there is no trace in the # test's base directory. cmd = "./btreplay/btrecord -d .. -D %s %s" % (tmpdir, dev) self.results.append(utils.system_output(cmd, retain_output=True)) # time a replay that omits "thinktime" between requests # (by use of the -N flag) cmd = ( self.ldlib + " /usr/bin/time ./btreplay/btreplay -d " + tmpdir + " -N -W " + dev + " " + extra_args + " 2>&1" ) self.results.append(utils.system_output(cmd, retain_output=True)) # trace a replay that reproduces inter-request delays, and # analyse the trace with btt to determine the average request # completion latency utils.system("./blktrace -D %s %s >/dev/null &" % (tmpdir, alldevs)) cmd = self.ldlib + " ./btreplay/btreplay -d %s -W %s %s" % (tmpdir, dev, extra_args) self.results.append(utils.system_output(cmd, retain_output=True)) utils.system("killall -INT blktrace") # wait until blktrace is really done slept = 0.0 while utils.system("ps -C blktrace > /dev/null", ignore_status=True) == 0: time.sleep(0.1) slept += 0.1 if slept > 30.0: utils.system("killall -9 blktrace") raise error.TestError("blktrace failed to exit in 30 seconds") utils.system("./blkparse -q -D %s -d %s/trace.bin -O %s >/dev/null" % (tmpdir, tmpdir, alldnames)) cmd = "./btt/btt -i %s/trace.bin" % tmpdir self.results.append(utils.system_output(cmd, retain_output=True))
def convert(package, destination_format): """\ Convert packages with the 'alien' utility. If alien is not installed, it throws a NotImplementedError exception. returns: filename of the package generated. """ try: os_dep.command('alien') except Exception: e_msg = 'Cannot convert to %s, alien not installed' % destination_format raise error.TestError(e_msg) # alien supports converting to many formats, but its interesting to map # convertions only for the implemented package types. if destination_format == 'dpkg': deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]') conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package) converted_package = re.findall(deb_pattern, conv_output)[0] elif destination_format == 'rpm': rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]') conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package) converted_package = re.findall(rpm_pattern, conv_output)[0] else: e_msg = 'Convertion to format %s not implemented' % destination_format raise NotImplementedError(e_msg) print 'Package %s successfuly converted to %s' % \ (os.path.basename(package), os.path.basename(converted_package)) return os.path.abspath(converted_package)
def start_teamd_with_conf(self): """ Specify configuration file for teamd Start teamd with -f option Verify team2 is created using ip command Verify the same using teamdctl Not using teamnl here as its covered in teamnl test """ self.teamd_conf_p = subprocess.Popen(["teamd", "-f" "%s/teamd_conf" % self.tmpdir]) time.sleep(5) self.pid_conf = self.teamd_conf_p.pid if self.teamd_conf_p.poll() is not None: self.nfail += 1 raise error.TestError("\nFailed to start teamd using -f option") teamd_out = utils.system_output("ip link") if "team2:" not in teamd_out: self.nfail += 1 raise error.TestError("\nFailed to start teamd with specified device") # verify using teamdctl out_state = utils.system_output("teamdctl team2 state") expected_out = """ ports: veth0 """ if (expected_out and "veth1") not in out_state: self.nfail += 1 raise error.TestError("\nteamcdtl failed to display veth0 and veth1 in state output")
def setup(self): cmd = 'apt-get install zlib1g-dev libbsd-dev libattr1-dev libkeyutils-dev libapparmor-dev apparmor libaio-dev --assume-yes --allow-downgrades --allow-change-held-packages' utils.system_output(cmd, retain_output=True) os.chdir(self.srcdir) cmd = 'git clone git://kernel.ubuntu.com/cking/stress-ng' self.results = utils.system_output(cmd, retain_output=True) os.chdir(os.path.join(self.srcdir, 'stress-ng')) self.results = utils.system_output('make', retain_output=True)
def setup(self): os.chdir(self.srcdir) cmd = 'git clone https://github.com/redpig/seccomp.git' self.results = utils.system_output(cmd, retain_output=True) os.chdir(os.path.join(self.srcdir, 'seccomp', 'tests')) cmd = 'make' self.results = utils.system_output(cmd, retain_output=True)
def initialize(self): # Yes, the following is a horrible hack. # utils.system_output('apt-get update', retain_output=True) time.sleep(60) utils.system_output('apt-get update', retain_output=True) self.install_required_pkgs() self.job.require_gcc()
def run_once(self, test_name): os.chdir(os.path.join(self.srcdir, 'unionmount-testsuite')) if test_name == 'unionmount': cmd = './run --um' self.results = utils.system_output(cmd, retain_output=True) elif test_name == 'overlayfs': cmd = './run --ov' self.results = utils.system_output(cmd, retain_output=True)
def setup(self, source_type, source_location, disk_addr, patches, **kwargs): if source_type == "tar": tarball = utils.unmap_url(self.bindir, source_location, self.tmpdir) self.repodir = os.path.join(self.tmpdir, "scsi_testsuite") utils.extract_tarball_to_dir(tarball, self.repodir) elif source_type == "git": self.repodir = git.get_repo(source_location) else: raise UnknownSourceType(source_type) sm = software_manager.SoftwareManager() for utility in ['/usr/bin/sg_raw', '/usr/bin/lsscsi']: if not os.access(utility, os.X_OK): logging.debug("%s missing - trying to install", utility) pkg = sm.provides(utility) if pkg is None: raise SCSIUtilNotAvailable(utility) else: sm.install(pkg) self.devname = "" if disk_addr[0] == "scsi": addr = (disk_addr[1]["host"], disk_addr[1]["channel"], disk_addr[1]["target"], disk_addr[1]["lun"]) self.devname = utils.system_output( "lsscsi %d %d %d %d | sed -n 's,.*/dev,/dev,p' " % addr) elif disk_addr[0] == "serial": disklist = os.listdir("/dev/disk/by-id/") for diskfile in disklist: if re.match("scsi-.*%s$" % disk_addr[1], diskfile) is not None: self.devname = os.path.join("/dev/disk/by-id", diskfile) break elif disk_addr[0] == "file": if os.access(disk_addr[1], os.F_OK): self.devname = disk_addr[1] if self.devname == "": output = utils.system_output("lsscsi") logging.debug(output) raise error.TestFail("Disk not found, cannot execute tests") try: cf = open(self.scsi_testsuite_config, "w") cf.write("export TEST_DEV=%s" % self.devname) cf.close() except IOError: logging.warning("Can't write configuration file. Using defaults") for patch in patches: utils.system("cd %s; patch -p1 < %s/%s" % (self.repodir, self.bindir, patch))
def list_mount_devices(): devices = [] # list mounted filesystems for line in utils.system_output('mount').splitlines(): devices.append(line.split()[0]) # list mounted swap devices for line in utils.system_output('swapon -s').splitlines(): if line.startswith('/'): # skip header line devices.append(line.split()[0]) return devices
def setup(self, tarball='ubuntu_qrt_kernel.tar.bz2'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) print(utils.system_output('head %s/scripts/bzr.log' % self.srcdir, retain_output=True)) # For these tests we need to run as a non-root user. Part of the test is # compiling some test code and so the non-root user needs to be able to # create files in the tmp directory space. # self.results = utils.system_output('find %s -type d | xargs chmod 777' % self.srcdir, retain_output=True)
def get_macvtap_device_on_ifname(ifname): macvtaps = [] ip_link_out = utils.system_output("ip -d link show") re_str = "(\S*)@%s" % ifname devices = re.findall(re_str, ip_link_out) for device in devices: out = utils.system_output("ip -d link show %s" % device) if "macvtap mode" in out: macvtaps.append(device) return macvtaps
def install(self, tag='autotest', install_vmlinux=True): self.installed_as = tag self.image = None self.initrd = '' for rpm_pack in self.rpm_package: rpm_name = utils.system_output('rpm -qp ' + rpm_pack) # install without dependencies (e.g., kernel-firmware) utils.system('rpm -i --force --nodeps ' + rpm_pack) # get file list files = utils.system_output('rpm -ql ' + rpm_name).splitlines() # search for vmlinuz for file in files: if file.startswith(self.kernel_string): self.full_version = file[len(self.kernel_string + '-'):] self.image = file self.rpm_flavour = rpm_name.split('-')[1] # get version and release number r_cmd = ('rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" ' '-q ' + rpm_name) (self.version, self.release) = utils.system_output( r_cmd).splitlines()[0:2] # prefer /boot/kernel-version before /boot/kernel if self.full_version: break # search for initrd for file in files: if file.startswith('/boot/init'): self.initrd = file # prefer /boot/initrd-version before /boot/initrd if len(file) > len('/boot/initrd'): break if self.image is None: errmsg = "specified rpm file(s) don't contain /boot/vmlinuz" raise error.TestError(errmsg) # install vmlinux if install_vmlinux: for rpm_pack in self.rpm_package: vmlinux = utils.system_output( 'rpm -q -l -p %s | grep /boot/vmlinux' % rpm_pack) utils.system('cd /; rpm2cpio %s | cpio -imuv .%s 2>&1' % (rpm_pack, vmlinux)) if not os.path.exists(vmlinux): raise error.TestError('%s does not exist after installing %s' % (vmlinux, rpm_pack))
def check_installed(self, name): if os.path.isfile(name): n_cmd = self.lowlevel_base_cmd + " -f " + name + " Package 2>/dev/null" name = utils.system_output(n_cmd) i_cmd = self.lowlevel_base_cmd + "--show -f='${Status}' " + name + " 2>/dev/null" # Checking if package is installed package_status = utils.system_output(i_cmd, ignore_status=True) dpkg_not_installed = package_status != self.INSTALLED_OUTPUT if dpkg_not_installed: return False return True
def setup(self): os.chdir(self.srcdir) cmd = 'git clone git://git.kernel.org/pub/scm/linux/kernel/git/dvhart/futextest.git' self.results = utils.system_output(cmd, retain_output=True) os.chdir(os.path.join(self.srcdir, 'futextest', 'functional')) cmd = 'sed -i s/lpthread/pthread/ Makefile' self.results = utils.system_output(cmd, retain_output=True) cmd = 'make' self.results = utils.system_output(cmd, retain_output=True)
def _dpkg_info(dpkg_package): """\ Private function that returns a dictionary with information about a dpkg package file - type: Package management program that handles the file - system_support: If the package management program is installed on the system or not - source: If it is a source (True) our binary (False) package - version: The package version (or name), that is used to check against the package manager if the package is installed - arch: The architecture for which a binary package was built - installed: Whether the package is installed (True) on the system or not (False) """ # We will make good use of what the file command has to tell us about the # package :) file_result = utils.system_output('file ' + dpkg_package) package_info = {} package_info['type'] = 'dpkg' # There's no single debian source package as is the case # with RPM package_info['source'] = False try: os_dep.command('dpkg') # Build the command strings that will be used to get package info # a_cmd - Command to determine package architecture # v_cmd - Command to determine package version # i_cmd - Command to determiine if package is installed a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null' v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null' i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>&1' package_info['system_support'] = True package_info['version'] = utils.system_output(v_cmd) package_info['arch'] = utils.system_output(a_cmd) # Checking if package is installed package_status = utils.system_output(i_cmd, ignore_status=True) not_inst_pattern = re.compile('not[ -]installed', re.IGNORECASE) dpkg_not_installed = re.search(not_inst_pattern, package_status) if dpkg_not_installed: package_info['installed'] = False else: package_info['installed'] = True except Exception: package_info['system_support'] = False package_info['installed'] = False # The output of file is not as generous for dpkg files as # it is with rpm files package_info['arch'] = 'Not Available' package_info['version'] = 'Not Available' return package_info
def teamnl_test(self): """ Test teamnl Switches tested: ports getoption item get setoption : setoption should fail if ports present remove ports and try setoption monitor: change an option using "teamnl setoption" and verify if monitor will report this. """ teamnl_out = utils.system_output("teamnl team2 ports") if ("veth0:" and "veth1:") not in teamnl_out: self.nfail += 1 raise error.TestError("\n teamnl failed to display ports veth0 and veth1") teamnl_getopt = utils.system_output("teamnl team2 getoption mode") if "roundrobin" not in teamnl_getopt: self.nfail += 1 raise error.TestError("\n teamnl getoption failed") # Verify this using teamdctl team_mode = utils.system_output("teamdctl team0 state item get setup.kernel_team_mode_name") if "roundrobin" not in team_mode: self.nfail += 1 raise error.TestError("\n teamdctl state item get failed") # Setoption using teamnl status, result = commands.getstatusoutput("teamnl team2 setoption mode activebackup") if status == 0: self.nfail += 1 raise error.TestError("\n teamnl succeeded to setoption. No ports can be present during mode change") else: logging.info("\n teamnl succeeded to identify no ports can be present during mode change.") # Remove ports and try utils.system("teamdctl team2 port remove veth0") utils.system("teamdctl team2 port remove veth1") # Testing monitor p = aexpect.Spawn("teamnl team2 monitor all") os.system("teamnl team2 setoption mode activebackup") teamnl_getopt = utils.system_output("teamnl team2 getoption mode") if "activebackup" not in teamnl_getopt: self.nfail += 1 raise error.TestError("\n teamnl setoption failed") output_aexpect = p.get_output() p.close() if "mode activebackup changed" not in output_aexpect: self.nfail += 1 raise error.TestError("\n teamnl monitor failed to detect mode change")
def check_installed(self, name): if os.path.isfile(name): n_cmd = (self.lowlevel_base_cmd + ' -f ' + name + ' Package 2>/dev/null') name = utils.system_output(n_cmd) i_cmd = self.lowlevel_base_cmd + ' -s ' + name + ' 2>/dev/null' # Checking if package is installed package_status = utils.system_output(i_cmd, ignore_status=True) not_inst_pattern = re.compile('not-installed', re.IGNORECASE) dpkg_not_installed = re.search(not_inst_pattern, package_status) if dpkg_not_installed: return False return True
def mkfs(self, fstype=None, args='', record=True): """ Format a partition to filesystem type :param fstype: the filesystem type, e.g.. "ext3", "ext2" :param args: arguments to be passed to mkfs command. :param record: if set, output result of mkfs operation to autotest output """ if list_mount_devices().count(self.device): raise NameError('Attempted to format mounted device %s' % self.device) if not fstype: if self.fstype: fstype = self.fstype else: fstype = 'ext2' if self.mkfs_flags: args += ' ' + self.mkfs_flags if fstype == 'xfs': args += ' -f' if self.loop: # BAH. Inconsistent mkfs syntax SUCKS. if fstype.startswith('ext'): args += ' -F' elif fstype == 'reiserfs': args += ' -f' # If there isn't already a '-t <type>' argument, add one. if "-t" not in args: args = "-t %s %s" % (fstype, args) args = args.strip() mkfs_cmd = "%s %s %s" % (self.mkfs_exec(fstype), args, self.device) sys.stdout.flush() try: # We throw away the output here - we only need it on error, in # which case it's in the exception utils.system_output("yes | %s" % mkfs_cmd) except error.CmdError, e: logging.error(e.result_obj) if record: self.job.record('FAIL', None, mkfs_cmd, error.format_error()) raise
def setup(self): utils.system('cp %s/ubuntu_stress_btrfs.sh %s' % (self.bindir, self.srcdir)) os.chdir(self.srcdir) cmd = 'git clone git://kernel.ubuntu.com/cking/stress-ng 2>&1' self.results = utils.system_output(cmd, retain_output=True) os.chdir(os.path.join(self.srcdir, 'stress-ng')) cmd = 'make -j 4' self.results = utils.system_output(cmd, retain_output=True) cmd = 'ls -al ' + self.bindir self.results = utils.system_output(cmd, retain_output=True) os.chdir(self.srcdir)
def install(self, tag="autotest", install_vmlinux=True): self.installed_as = tag self.image = None self.initrd = "" for rpm_pack in self.rpm_package: rpm_name = utils.system_output("rpm -qp " + rpm_pack) # install utils.system("rpm -i --force " + rpm_pack) # get file list files = utils.system_output("rpm -ql " + rpm_name).splitlines() # search for vmlinuz for file in files: if file.startswith("/boot/vmlinuz"): self.full_version = file[len("/boot/vmlinuz-") :] self.image = file self.rpm_flavour = rpm_name.split("-")[1] # get version and release number self.version, self.release = utils.system_output( 'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + rpm_name ).splitlines()[0:2] # prefer /boot/kernel-version before /boot/kernel if self.full_version: break # search for initrd for file in files: if file.startswith("/boot/init"): self.initrd = file # prefer /boot/initrd-version before /boot/initrd if len(file) > len("/boot/initrd"): break if self.image == None: errmsg = "specified rpm file(s) don't contain /boot/vmlinuz" raise error.TestError(errmsg) # install vmlinux if install_vmlinux: for rpm_pack in self.rpm_package: vmlinux = utils.system_output("rpm -q -l -p %s | grep /boot/vmlinux" % rpm_pack) utils.system("cd /; rpm2cpio %s | cpio -imuv .%s 2>&1" % (rpm_pack, vmlinux)) if not os.path.exists(vmlinux): raise error.TestError("%s does not exist after installing %s" % (vmlinux, rpm_pack))
def setup(self): utils.system_output('rm /etc/*/S99autotest || true', retain_output=True) pkgs = [ 'btrfs-tools', 'xfsprogs', 'jfsutils' ] for pkg in pkgs: print "Installing package " + pkg utils.system_output('apt-get install ' + pkg + ' --yes --force-yes ', retain_output=True) print "Extracting fstest tarball.." tarball = utils.unmap_url(self.bindir, 'fstest.tar.bz2', self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) print "Building fstest.." utils.system('make')
def run_once(self, test_name): if test_name == 'setup': return # # We need to be sure we run this on the right target machines # as this is really quite destructive! # if not os.uname()[1] in self.valid_clients: return date_start = time.strftime("%Y-%m-%d") time_start = time.strftime("%H%M") output = '' # # Test 3 different I/O schedulers: # for iosched in ['cfq', 'deadline', 'noop']: # # Test 5 different file systems, across 20+ tests.. # os.chdir(self.fio_tests_dir) cmd = './test.sh' cmd += ' -d ' + self.dev + '1 -m 8G -S -s ' + iosched + ' -f ext2,ext3,ext4,xfs,btrfs' cmd += ' -D ' + date_start + ' -T ' + time_start output += utils.system_output(cmd, retain_output=True) # # Move the results from the src tree into the autotest results tree where it will automatically # get picked up and copied over to the jenkins server. # os.rename(os.path.join(self.srcdir, 'fs-test-proto'), os.path.join(self.resultsdir, 'fs-test-proto'))
def check_installed(self, name, version=None, arch=None): """ Check if package [name] is installed. @param name: Package name. @param version: Package version. @param arch: Package architecture. """ if arch: cmd = self.lowlevel_base_cmd + " -q --qf %{ARCH} " + name + " 2> /dev/null" inst_archs = utils.system_output(cmd, ignore_status=True) inst_archs = inst_archs.split("\n") for inst_arch in inst_archs: if inst_arch == arch: return self._check_installed_version(name, version) return False elif version: return self._check_installed_version(name, version) else: cmd = "rpm -q " + name + " 2> /dev/null" try: utils.system(cmd) return True except error.CmdError: return False
def provides(self, name): """ Searches for what provides a given file. @param name: File path. """ p_cmd = self.base_command + " what-provides " + name list_provides = [] try: p_output = utils.system_output(p_cmd).split("\n")[4:] for line in p_output: line = [a.strip() for a in line.split("|")] try: # state, pname, type, version, arch, repository = line pname = line[1] if pname not in list_provides: list_provides.append(pname) except IndexError: pass if len(list_provides) > 1: logging.warning("More than one package found, " "opting by the first queue result") if list_provides: logging.info("Package %s provides %s", list_provides[0], name) return list_provides[0] return None except error.CmdError: return None
def get_version(session, result_file, kvm_ver_chk_cmd, guest_ver_cmd, type, driver_format, timeout): """ collect qemu, kernel and driver version info and write them info results file :param session: VM session :param results_file: save fio results, host info and other info :param guest_ver_cmd: command of getting guest kernel or virtio_win driver version :param type: guest type :param driver_format: driver format :param timeout: Timeout in seconds """ kvm_ver = utils.system_output(kvm_ver_chk_cmd) host_ver = os.uname()[2] result_file.write("### kvm-userspace-ver : %s\n" % kvm_ver) result_file.write("### kvm_version : %s\n" % host_ver) if driver_format != "ide": result = session.cmd_output(guest_ver_cmd, timeout) if type == "windows": guest_ver = re.findall(".*?(\d{2}\.\d{2}\.\d{3}\.\d{4}).*?", result) result_file.write("### guest-kernel-ver :Microsoft Windows [Version %s]\n" % guest_ver[0]) else: result_file.write("### guest-kernel-ver :%s" % result) else: result_file.write("### guest-kernel-ver : Microsoft Windows [Version ide driver format]\n")
def run_virsh_find_storage_pool_sources_as(test, params, env): """ Test command: virsh find-storage-pool-sources-as 1. Prepare env to provide source storage: 1). For 'netfs' source type, setup nfs server 2). For 'iscsi' source type, setup iscsi server 3). For 'logcial' type pool, setup iscsi storage to create vg 2. Find the pool source by running virsh cmd """ source_type = params.get("source_type", "") source_host = params.get("source_host", "localhost") source_port = params.get("source_port", "") options = params.get("extra_options", "") vg_name = params.get("vg_name", "virttest_vg_0") ro_flag = "yes" == params.get("readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") if not source_type: raise error.TestFail("Command requires <type> value") cleanup_nfs = False cleanup_iscsi = False cleanup_logical = False if source_host == "localhost": if source_type == "netfs": # Set up nfs utils_test.libvirt.setup_or_cleanup_nfs(True) cleanup_nfs = True if source_type in ["iscsi", "logical"]: # Set up iscsi iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True) cleanup_iscsi = True if source_type == "logical": # Create VG by using iscsi device lv_utils.vg_create(vg_name, iscsi_device) cleanup_logical = True # Run virsh cmd options = "%s %s " % (source_host, source_port) + options if ro_flag: logging.debug("Readonly mode test") try: cmd_result = virsh.find_storage_pool_sources_as(source_type, options, ignore_status=True, debug=True, readonly=ro_flag) output = cmd_result.stdout.strip() err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: raise error.TestFail(err) else: logging.debug("Command outout:\n%s", output) elif status_error and status == 0: raise error.TestFail("Expect fail, but run successfully") finally: # Clean up if cleanup_logical: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = utils.system_output(cmd) lv_utils.vg_remove(vg_name) utils.run("pvremove %s" % pv_name) if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_nfs: utils_test.libvirt.setup_or_cleanup_nfs(False)
def get_huge_page_size(): output = utils.system_output('grep Hugepagesize /proc/meminfo') return int(output.split()[1]) # Assumes units always in kB. :(
def get_num_huge_pages(): raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages') return int(raw_hugepages.split()[2])
def run_qemu_io_blkdebug(test, params, env): """ Run qemu-io blkdebug tests: 1. Create image with given parameters 2. Write the blkdebug config file 3. Try to do operate in image with qemu-io and get the error message 4. Get the error message from perror by error number set in config file 5. Compare the error message @param test: kvm test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ tmp_dir = params.get("tmp_dir", "/tmp") blkdebug_cfg = utils_misc.get_path( tmp_dir, params.get("blkdebug_cfg", "blkdebug.cfg")) err_command = params["err_command"] err_event = params["err_event"] errn_list = re.split("\s+", params["errn_list"].strip()) re_std_msg = params["re_std_msg"] test_timeout = int(params.get("test_timeout", "60")) pre_err_commands = params.get("pre_err_commands") image = params.get("images") blkdebug_default = params.get("blkdebug_default") error.context("Create image", logging.info) image_io = QemuImg(params.object_params(image), test.bindir, image) image_name = image_io.create(params.object_params(image)) template_name = utils_misc.get_path(test.virtdir, blkdebug_default) template = ConfigParser.ConfigParser() template.read(template_name) for errn in errn_list: log_filename = utils_misc.get_path(test.outputdir, "qemu-io-log-%s" % errn) error.context("Write the blkdebug config file", logging.info) template.set("inject-error", "event", '"%s"' % err_event) template.set("inject-error", "errno", '"%s"' % errn) error.context("Write blkdebug config file", logging.info) blkdebug = None try: blkdebug = open(blkdebug_cfg, 'w') template.write(blkdebug) finally: if blkdebug is not None: blkdebug.close() error.context("Operate in qemu-io to trigger the error", logging.info) session = qemu_io.QemuIOShellSession(test, params, image_name, blkdebug_cfg=blkdebug_cfg, log_filename=log_filename) if pre_err_commands: for cmd in re.split(",", pre_err_commands.strip()): session.cmd_output(cmd, timeout=test_timeout) output = session.cmd_output(err_command, timeout=test_timeout) error.context("Get error message from command perror", logging.info) perror_cmd = "perror %s" % errn std_msg = utils.system_output(perror_cmd) std_msg = re.findall(re_std_msg, std_msg) if std_msg: std_msg = std_msg[0] else: std_msg = "" logging.warning("Can not find error message from perror") session.close() error.context("Compare the error message", logging.info) if std_msg: if std_msg in output: logging.info("Error message is correct in qemu-io") else: fail_log = "The error message is mismatch:" fail_log += "qemu-io reports: '%s'," % output fail_log += "perror reports: '%s'" % std_msg raise error.TestFail(fail_log) else: logging.warning("Can not find error message from perror." " The output from qemu-io is %s" % output)
try: sess.cmd_output(dd_cmd, timeout=360) except aexpect.ShellCmdError, e: return failure else: tcpdump_cmd += " and dst %s" % guest_ip copy_files_func = vm.copy_files_to try: utils.system(dd_cmd) except error.CmdError, e: return failure # only capture the new tcp port after offload setup original_tcp_ports = re.findall( "tcp.*:(\d+).*%s" % guest_ip, utils.system_output("/bin/netstat -nap")) for i in original_tcp_ports: tcpdump_cmd += " and not port %s" % i txt = "Listening traffic using command: %s" % tcpdump_cmd error.context(txt, logging.info) sess.sendline(tcpdump_cmd) if not utils_misc.wait_for( lambda: session.cmd_status("pgrep tcpdump") == 0, 30): return (False, "Tcpdump process wasn't launched") txt = "Transferring file %s from %s" % (filename, src) error.context(txt, logging.info) try: copy_files_func(filename, filename)
def run_transfer_file_over_ipv6(test, params, env): """ Test Step 1. boot up two virtual machine 2. Transfer data: host <--> guest1 <--> guest2 <-->host via ipv6 3. after data transfer, check data have no change Params: @param test: QEMU test object @param params: Dictionary with the test parameters @param env: Dictionary with test environment. """ timeout = int(params.get("login_timeout", '360')) client = params.get("file_transfer_client") port = params.get("file_transfer_port") password = params.get("password") username = params.get("username") tmp_dir = params.get("tmp_dir", "/tmp/") filesize = int(params.get("filesize", '4096')) dd_cmd = params.get("dd_cmd") file_trans_timeout = int(params.get("file_trans_timeout", '1200')) file_md5_check_timeout = int(params.get("file_md5_check_timeout", '600')) def get_linux_ipv6_linklocal_address(ifname, session=None): """ Get host/guest ipv6 linklocal address via ifname """ if session is not None: o = session.cmd_output("ifconfig %s" % ifname) else: o = utils.system_output("ifconfig %s" % ifname) ipv6_address_reg = re.compile(r"(fe80::[^\s|/]*)") if o: ipv6_linklocal_address = ipv6_address_reg.findall(o) if not ipv6_linklocal_address: raise error.TestError("Can't get %s linklocal address" % ifname) return ipv6_linklocal_address[0] else: return None def get_file_md5sum(file_name, session, timeout): """ Get file md5sum from guest. """ logging.info("Get md5sum of the file:'%s'" % file_name) try: o = session.cmd_output("md5sum %s" % file_name, timeout=timeout) file_md5sum = re.findall("\w+", o)[0] except IndexError: raise error.TestError("Could not get file md5sum in guest") return file_md5sum sessions = {} addresses = {} inet_name = {} vms = [] error.context("Boot vms for test", logging.info) for vm_name in params.get("vms", "vm1 vm2").split(): vms.append(env.get_vm(vm_name)) #config ipv6 address host and guest. host_ifname = params.get("netdst") for vm in vms: vm.verify_alive() sessions[vm] = vm.wait_for_login(timeout=timeout) inet_name[vm] = utils_net.get_linux_ifname(sessions[vm], vm.get_mac_address()) addresses[vm] = get_linux_ipv6_linklocal_address( inet_name[vm], sessions[vm]) #prepare test data guest_path = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) dest_path = (tmp_dir + "dst-%s" % utils_misc.generate_random_string(8)) host_path = os.path.join(test.tmpdir, "tmp-%s" % utils_misc.generate_random_string(8)) logging.info("Test setup: Creating %dMB file on host", filesize) utils.run(dd_cmd % (host_path, filesize)) try: src_md5 = (utils.hash_file(host_path, method="md5")) #transfer data for vm in vms: error.context("Transfer date from host to %s" % vm.name, logging.info) remote.copy_files_to("%s%%%s" % (addresses[vm], host_ifname), client, username, password, port, host_path, guest_path, timeout=file_trans_timeout) dst_md5 = get_file_md5sum(guest_path, sessions[vm], timeout=file_md5_check_timeout) if dst_md5 != src_md5: raise error.TestFail("File changed after transfer host -> %s" % vm.name) for vm_src in addresses: for vm_dst in addresses: if vm_src != vm_dst: error.context( "Transfering data from %s to %s" % (vm_src.name, vm_dst.name), logging.info) remote.scp_between_remotes( "%s%%%s" % (addresses[vm_src], host_ifname), "%s%%%s" % (addresses[vm_dst], inet_name[vm_src]), port, password, password, username, username, guest_path, dest_path, timeout=file_trans_timeout) dst_md5 = get_file_md5sum(dest_path, sessions[vm_dst], timeout=file_md5_check_timeout) if dst_md5 != src_md5: raise error.TestFail("File changed transfer %s -> %s" % (vm_src.name, vm_dst.name)) for vm in vms: error.context("Transfer date from %s to host" % vm.name, logging.info) remote.copy_files_from("%s%%%s" % (addresses[vm], host_ifname), client, username, password, port, dest_path, host_path, timeout=file_trans_timeout) error.context("Check whether the file changed after trans", logging.info) dst_md5 = (utils.hash_file(host_path, method="md5")) if dst_md5 != src_md5: raise error.TestFail("File changed after transfer", "Files md5sum mismatch!") utils.system_output("rm -rf %s" % host_path, timeout=timeout) finally: utils.system_output("rm -rf %s" % host_path, timeout=timeout) for vm in vms: sessions[vm].cmd("rm -rf %s %s" % (guest_path, dest_path), timeout=timeout) sessions[vm].close()
def run_ping(test, params, env): """ Ping the guest with different size of packets. 1) Login to guest 2) Ping test on nic(s) from host 2.1) Ping with packet size from 0 to 65507 2.2) Flood ping test 2.3) Ping test after flood ping, Check if the network is still alive 3) Ping test from guest side, packet size is from 0 to 65507 (win guest is up to 65500) (Optional) :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _get_loss_ratio(output): if params.get("strict_check", "no") == "yes": ratio = utils_test.get_loss_ratio(output) if ratio != 0: raise error.TestFail("Loss ratio is %s" % ratio) timeout = int(params.get("login_timeout", 360)) ping_ext_host = params.get("ping_ext_host", "no") == "yes" vm = env.get_vm(params["main_vm"]) vm.verify_alive() error.context("Login to guest", logging.info) session = vm.wait_for_login(timeout=timeout) if ping_ext_host: default_host = "www.redhat.com" ext_host_get_cmd = params.get("ext_host_get_cmd", "") try: ext_host = utils.system_output(ext_host_get_cmd) except error.CmdError: logging.warn( "Can't get specified host with cmd '%s'," " Fallback to default host '%s'", ext_host_get_cmd, default_host) ext_host = default_host if not ext_host: # Fallback to a hardcode host, eg: ext_host = default_host counts = params.get("ping_counts", 100) flood_minutes = float(params.get("flood_minutes", 10)) packet_sizes = [ 0, 1, 4, 48, 512, 1440, 1500, 1505, 4054, 4055, 4096, 4192, 8878, 9000, 32767, 65507 ] for i, nic in enumerate(vm.virtnet): ip = vm.get_address(i) nic_name = nic.get("nic_name") if not ip: logging.error("Could not get the ip of nic index %d: %s", i, nic_name) continue error.base_context( "Ping test on nic %s (index %d) from host" " side" % (nic_name, i), logging.info) for size in packet_sizes: error.context("Ping with packet size %s" % size, logging.info) status, output = utils_test.ping(ip, 10, packetsize=size, timeout=20) _get_loss_ratio(output) if status != 0: raise error.TestFail("Ping failed, status: %s," " output: %s" % (status, output)) error.context("Flood ping test", logging.info) utils_test.ping(ip, None, flood=True, output_func=None, timeout=flood_minutes * 60) error.context( "Ping test after flood ping, Check if the network is" " still alive", logging.info) status, output = utils_test.ping(ip, counts, timeout=float(counts) * 1.5) _get_loss_ratio(output) if status != 0: raise error.TestFail("Ping returns non-zero value %s" % output) if ping_ext_host: error.base_context( "Ping test from guest side," " dest: '%s'" % ext_host, logging.info) pkt_sizes = packet_sizes # There is no ping program for guest, so let's hardcode... cmd = ['ping'] cmd.append(ext_host) # external host if params.get("os_type") == "windows": cmd.append("-n 10") cmd.append("-l %s") # Windows doesn't support ping with packet # larger than '65500' pkt_sizes = [p for p in packet_sizes if p < 65500] # Add a packet size just equal '65500' for windows pkt_sizes.append(65500) else: cmd.append("-c 10") # ping 10 times cmd.append("-s %s") # packet size cmd = " ".join(cmd) for size in pkt_sizes: error.context("Ping with packet size %s" % size, logging.info) status, output = session.cmd_status_output(cmd % size, timeout=60) _get_loss_ratio(output) if status != 0: raise error.TestFail( ("Ping external host failed," " status: %s, output: %s" % (status, output)))
def sr_iov_setup(self): """ Ensure the PCI device is working in sr_iov mode. Check if the PCI hardware device drive is loaded with the appropriate, parameters (number of VFs), and if it's not, perform setup. @return: True, if the setup was completed successfuly, False otherwise. """ # Check if the host support interrupt remapping error.context("Set up host env for PCI assign test", logging.info) kvm_re_probe = False o = utils.system_output("cat /var/log/dmesg") ecap = re.findall("ecap\s+(.\w+)", o) if ecap and int(ecap[0], 16) & 8 == 0: if self.kvm_params is not None: if self.auai_path and self.kvm_params[self.auai_path] == "N": kvm_re_probe = True else: kvm_re_probe = True # Try to re probe kvm module with interrupt remapping support if kvm_re_probe: kvm_arch = kvm_control.get_kvm_arch() utils.system("modprobe -r %s" % kvm_arch) utils.system("modprobe -r kvm") cmd = "modprobe kvm allow_unsafe_assigned_interrupts=1" if self.kvm_params is not None: for i in self.kvm_params: if "allow_unsafe_assigned_interrupts" not in i: if self.kvm_params[i] == "Y": params_name = os.path.split(i)[1] cmd += " %s=1" % params_name error.context("Loading kvm with: %s" % cmd, logging.info) try: utils.system(cmd) except Exception: logging.debug("Can not enable the interrupt remapping support") utils.system("modprobe %s" % kvm_arch) re_probe = False s, o = commands.getstatusoutput('lsmod | grep %s' % self.driver) if s: re_probe = True elif not self.check_vfs_count(): os.system("modprobe -r %s" % self.driver) re_probe = True else: return True # Re-probe driver with proper number of VFs if re_probe: cmd = "modprobe %s %s" % (self.driver, self.driver_option) error.context("Loading the driver '%s' with command '%s'" %\ (self.driver, cmd), logging.info) s, o = commands.getstatusoutput(cmd) utils.system("/etc/init.d/network restart", ignore_status=True) if s: return False return True
def run_timerdevice_tscsync_longtime(test, params, env): """ Timer device check TSC synchronity for long time test: 1) Check for an appropriate clocksource on host. 2) Check host has more than one cpu socket. 3) Boot the guest with specified cpu socket. 4) Copy time-warp-test.c to guest. 5) Compile the time-warp-test.c. 6) Run time-warp-test for minimum 4 hours. @param test: QEMU test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ error.context("Check for an appropriate clocksource on host", logging.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if not "tsc" in utils.system_output(host_cmd): raise error.TestNAError("Host must use 'tsc' clocksource") error.context("Check host has more than one cpu socket", logging.info) host_socket_cnt_cmd = params["host_socket_cnt_cmd"] if utils.system_output(host_socket_cnt_cmd).strip() == "1": raise error.TestNAError("Host must have more than 1 socket") error.context("Boot the guest with one cpu socket", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) error.context("Copy time-warp-test.c to guest", logging.info) src_file_name = os.path.join(data_dir.get_root_dir(), "shared", "deps", "time-warp-test.c") vm.copy_files_to(src_file_name, "/tmp") error.context("Compile the time-warp-test.c", logging.info) cmd = "cd /tmp/;" cmd += " yum install -y popt-devel;" cmd += " rm -f time-warp-test;" cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt" session.cmd(cmd) error.context("Run time-warp-test for minimum 4 hours", logging.info) test_run_timeout = int(params.get("test_run_timeout", 14400)) session.sendline("$(sleep %d; pkill time-warp-test) &" % test_run_timeout) cmd = "/tmp/time-warp-test" _, output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60)) re_str = "fail:(\d+).*?fail:(\d+).*fail:(\d+)" fail_cnt = re.findall(re_str, output) if not fail_cnt: raise error.TestError("Could not get correct test output." " Output: '%s'" % output) tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] raise error.TestFail("Get error when running time-warp-test." " Output (last 5 lines): '%s'" % msg)
def run(test, params, env): """ Run Pktgen test between host/guest 1) Boot the main vm, or just grab it if it's already booted. 2) Configure pktgen server(only linux) 3) Run pktgen test, finish when timeout or env["pktgen_run"] != True :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ login_timeout = float(params.get("login_timeout", 360)) error.context("Init the VM, and try to login", logging.info) external_host = params.get("external_host") if not external_host: get_host_cmd = "ip route | awk '/default/ {print $3}'" external_host = utils.system_output(get_host_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) error.context("Pktgen server environment prepare", logging.info) # pktgen server only support linux, since pktgen is a linux kernel module pktgen_server = params.get("pktgen_server", "localhost") params_server = params.object_params("pktgen_server") s_shell_client = params_server.get("shell_client", "ssh") s_shell_port = params_server.get("shell_port", "22") s_username = params_server.get("username", "root") s_passwd = params_server.get("password", "123456") s_shell_prompt = params_server.get("shell_prompt") server_session = "" # pktgen server is autotest virtual guest(only linux) if pktgen_server in params.get("vms", "vm1 vm2"): vm_pktgen = env.get_vm(pktgen_server) vm_pktgen.verify_alive() server_session = vm_pktgen.wait_for_login(timeout=login_timeout) runner = server_session.cmd_output_safe pktgen_ip = vm_pktgen.get_address() pktgen_mac = vm_pktgen.get_mac_address() server_interface = utils_net.get_linux_ifname(server_session, pktgen_mac) # pktgen server is a external host assigned elif re.match(r"((\d){1,3}\.){3}(\d){1,3}", pktgen_server): pktgen_ip = pktgen_server server_session = remote.wait_for_login(s_shell_client, pktgen_ip, s_shell_port, s_username, s_passwd, s_shell_prompt) runner = server_session.cmd_output_safe server_interface = params.get("server_interface") if not server_interface: raise error.TestNAError("Must config server interface before test") else: # using host as a pktgen server server_interface = params.get("netdst", "switch") host_nic = utils_net.Interface(server_interface) pktgen_ip = host_nic.get_ip() pktgen_mac = host_nic.get_mac() runner = utils.system # copy pktgen_test scipt to the test server. local_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/pktgen.sh") remote_path = "/tmp/pktgen.sh" remote.scp_to_remote(pktgen_ip, s_shell_port, s_username, s_passwd, local_path, remote_path) error.context("Run pktgen test", logging.info) run_threads = params.get("pktgen_threads", 1) pktgen_stress_timeout = float(params.get("pktgen_test_timeout", 600)) exec_cmd = "%s %s %s %s %s" % (remote_path, vm.get_address(), vm.get_mac_address(), server_interface, run_threads) try: env["pktgen_run"] = True try: # Set a run flag in env, when other case call this case as a sub # backgroud process, can set run flag to False to stop this case. start_time = time.time() stop_time = start_time + pktgen_stress_timeout while (env["pktgen_run"] and time.time() < stop_time): runner(exec_cmd, timeout=pktgen_stress_timeout) # using ping to kill the pktgen stress except aexpect.ShellTimeoutError: session.cmd("ping %s" % pktgen_ip, ignore_all_errors=True) finally: env["pktgen_run"] = False error.context("Verify Host and guest kernel no error and call trace", logging.info) vm.verify_kernel_crash() utils_misc.verify_host_dmesg() error.context("Ping external host after pktgen test", logging.info) session_ping = vm.wait_for_login(timeout=login_timeout) status, output = utils_test.ping(dest=external_host, session=session_ping, timeout=240, count=20) loss_ratio = utils_test.get_loss_ratio(output) if (loss_ratio > int(params.get("packet_lost_ratio", 5)) or loss_ratio == -1): logging.debug("Ping %s output: %s" % (external_host, output)) raise error.TestFail("Guest network connction unusable," + "packet lost ratio is '%d%%'" % loss_ratio) if server_session: server_session.close() if session: session.close() if session_ping: session_ping.close()
def __make_libvirt_command(self, name=None, params=None, root_dir=None): """ Generate a libvirt command line. All parameters are optional. If a parameter is not supplied, the corresponding value stored in the class attributes is used. @param name: The name of the object @param params: A dict containing VM params @param root_dir: Base directory for relative filenames @note: The params dict should contain: mem -- memory size in MBs cdrom -- ISO filename to use with the qemu -cdrom parameter extra_params -- a string to append to the qemu command shell_port -- port of the remote shell daemon on the guest (SSH, Telnet or the home-made Remote Shell Server) shell_client -- client program to use for connecting to the remote shell daemon on the guest (ssh, telnet or nc) x11_display -- if specified, the DISPLAY environment variable will be be set to this value for the qemu process (useful for SDL rendering) images -- a list of image object names, separated by spaces nics -- a list of NIC object names, separated by spaces For each image in images: drive_format -- string to pass as 'if' parameter for this image (e.g. ide, scsi) image_snapshot -- if yes, pass 'snapshot=on' to qemu for this image image_boot -- if yes, pass 'boot=on' to qemu for this image In addition, all parameters required by get_image_filename. For each NIC in nics: nic_model -- string to pass as 'model' parameter for this NIC (e.g. e1000) """ # helper function for command line option wrappers def has_option(help, option): return bool(re.search(r"--%s" % option, help, re.MULTILINE)) # Wrappers for all supported libvirt command line parameters. # This is meant to allow support for multiple libvirt versions. # Each of these functions receives the output of 'libvirt --help' as a # parameter, and should add the requested command line option # accordingly. def add_name(help, name): return " --name '%s'" % name def add_machine_type(help, machine_type): if has_option(help, "machine"): return " --machine %s" % machine_type else: return "" def add_hvm_or_pv(help, hvm_or_pv): if hvm_or_pv == "hvm": return " --hvm --accelerate" elif hvm_or_pv == "pv": return " --paravirt" else: logging.warning("Unknown virt type hvm_or_pv, using default.") return "" def add_mem(help, mem): return " --ram=%s" % mem def add_check_cpu(help): if has_option(help, "check-cpu"): return " --check-cpu" else: return "" def add_smp(help, smp): return " --vcpu=%s" % smp def add_location(help, location): if has_option(help, "location"): return " --location %s" % location else: return "" def add_cdrom(help, filename, index=None): if has_option(help, "cdrom"): return " --cdrom %s" % filename else: return "" def add_pxe(help): if has_option(help, "pxe"): return " --pxe" else: return "" def add_import(help): if has_option(help, "import"): return " --import" else: return "" def add_drive(help, filename, pool=None, vol=None, device=None, bus=None, perms=None, size=None, sparse=False, cache=None, format=None): cmd = " --disk" if filename: cmd += " path=%s" % filename elif pool: if vol: cmd += " vol=%s/%s" % (pool, vol) else: cmd += " pool=%s" % pool if device: cmd += ",device=%s" % device if bus: cmd += ",bus=%s" % bus if perms: cmd += ",%s" % perms if size: cmd += ",size=%s" % size.rstrip("Gg") if sparse: cmd += ",sparse=false" if format: cmd += ",format=%s" % format if cache: cmd += ",cache=%s" % cache return cmd def add_floppy(help, filename): return " --disk path=%s,device=floppy,ro" % filename def add_vnc(help, vnc_port=None): if vnc_port: return " --vnc --vncport=%d" % (vnc_port) else: return " --vnc" def add_vnclisten(help, vnclisten): if has_option(help, "vnclisten"): return " --vnclisten=%s" % (vnclisten) else: return "" def add_sdl(help): if has_option(help, "sdl"): return " --sdl" else: return "" def add_nographic(help): return " --nographics" def add_video(help, video_device): if has_option(help, "video"): return " --video=%s" % (video_device) else: return "" def add_uuid(help, uuid): if has_option(help, "uuid"): return " --uuid %s" % uuid else: return "" def add_os_type(help, os_type): if has_option(help, "os-type"): return " --os-type %s" % os_type else: return "" def add_os_variant(help, os_variant): if has_option(help, "os-variant"): return " --os-variant %s" % os_variant else: return "" def add_pcidevice(help, pci_device): if has_option(help, "host-device"): return " --host-device %s" % pci_device else: return "" def add_soundhw(help, sound_device): if has_option(help, "soundhw"): return " --soundhw %s" % sound_device else: return "" def add_serial(help, filename): if has_option(help, "serial"): return " --serial file,path=%s --serial pty" % filename else: self.only_pty = True return "" def add_kernel_cmdline(help, cmdline): return " -append %s" % cmdline def add_connect_uri(help, uri): if uri and has_option(help, "connect"): return " --connect=%s" % uri else: return "" def add_nic(help, nic_params): """ Return additional command line params based on dict-like nic_params """ mac = nic_params.get('mac') nettype = nic_params.get('nettype') netdst = nic_params.get('netdst') nic_model = nic_params.get('nic_model') if nettype: result = " --network=%s" % nettype else: result = "" if has_option(help, "bridge"): # older libvirt (--network=NATdev --bridge=bridgename --mac=mac) if nettype != 'user': result += ':%s' % netdst if mac: # possible to specify --mac w/o --network result += " --mac=%s" % mac else: # newer libvirt (--network=mynet,model=virtio,mac=00:11) if nettype != 'user': result += '=%s' % netdst if nettype and nic_model: # only supported along with nettype result += ",model=%s" % nic_model if nettype and mac: result += ',mac=%s' % mac elif mac: # possible to specify --mac w/o --network result += " --mac=%s" % mac logging.debug("vm.__make_libvirt_command.add_nic returning: %s" % result) return result # End of command line option wrappers if name is None: name = self.name if params is None: params = self.params if root_dir is None: root_dir = self.root_dir # Clone this VM using the new params vm = self.clone(name, params, root_dir, copy_state=True) virt_install_binary = utils_misc.get_path( root_dir, params.get("virt_install_binary", "virt-install")) help = utils.system_output("%s --help" % virt_install_binary) # Start constructing the qemu command virt_install_cmd = "" # Set the X11 display parameter if requested if params.get("x11_display"): virt_install_cmd += "DISPLAY=%s " % params.get("x11_display") # Add the qemu binary virt_install_cmd += virt_install_binary # set connect uri virt_install_cmd += add_connect_uri(help, self.connect_uri) # hvm or pv specificed by libvirt switch (pv used by Xen only) hvm_or_pv = params.get("hvm_or_pv", "hvm") if hvm_or_pv: virt_install_cmd += add_hvm_or_pv(help, hvm_or_pv) # Add the VM's name virt_install_cmd += add_name(help, name) machine_type = params.get("machine_type") if machine_type: virt_install_cmd += add_machine_type(help, machine_type) mem = params.get("mem") if mem: virt_install_cmd += add_mem(help, mem) # TODO: should we do the check before we call ? negative case ? check_cpu = params.get("use_check_cpu") if check_cpu: virt_install_cmd += add_check_cpu(help) smp = params.get("smp") if smp: virt_install_cmd += add_smp(help, smp) # TODO: directory location for vmlinuz/kernel for cdrom install ? location = None if params.get("medium") == 'url': location = params.get('url') elif params.get("medium") == 'kernel_initrd': # directory location of kernel/initrd pair (directory layout must # be in format libvirt will recognize) location = params.get("image_dir") elif params.get("medium") == 'nfs': location = "nfs:%s:%s" % (params.get("nfs_server"), params.get("nfs_dir")) elif params.get("medium") == 'cdrom': if params.get("use_libvirt_cdrom_switch") == 'yes': virt_install_cmd += add_cdrom(help, params.get("cdrom_cd1")) elif params.get("unattended_delivery_method") == "integrated": virt_install_cmd += add_cdrom(help, params.get("cdrom_unattended")) else: location = params.get("image_dir") kernel_dir = os.path.dirname(params.get("kernel")) kernel_parent_dir = os.path.dirname(kernel_dir) pxeboot_link = os.path.join(kernel_parent_dir, "pxeboot") if os.path.islink(pxeboot_link): os.unlink(pxeboot_link) if os.path.isdir(pxeboot_link): logging.info("Removed old %s leftover directory", pxeboot_link) shutil.rmtree(pxeboot_link) os.symlink(kernel_dir, pxeboot_link) elif params.get("medium") == "import": virt_install_cmd += add_import(help) if location: virt_install_cmd += add_location(help, location) if params.get("display") == "vnc": if params.get("vnc_autoport") == "yes": vm.vnc_autoport = True else: vm.vnc_autoport = False if not vm.vnc_autoport and params.get("vnc_port"): vm.vnc_port = int(params.get("vnc_port")) virt_install_cmd += add_vnc(help, vm.vnc_port) if params.get("vnclisten"): vm.vnclisten = params.get("vnclisten") virt_install_cmd += add_vnclisten(help, vm.vnclisten) elif params.get("display") == "sdl": virt_install_cmd += add_sdl(help) elif params.get("display") == "nographic": virt_install_cmd += add_nographic(help) video_device = params.get("video_device") if video_device: virt_install_cmd += add_video(help, video_device) sound_device = params.get("sound_device") if sound_device: virt_install_cmd += add_soundhw(help, sound_device) # if none is given a random UUID will be generated by libvirt if params.get("uuid"): virt_install_cmd += add_uuid(help, params.get("uuid")) # selectable OS type if params.get("use_os_type") == "yes": virt_install_cmd += add_os_type(help, params.get("os_type")) # selectable OS variant if params.get("use_os_variant") == "yes": virt_install_cmd += add_os_variant(help, params.get("os_variant")) # Add serial console virt_install_cmd += add_serial(help, self.get_serial_console_filename()) # If the PCI assignment step went OK, add each one of the PCI assigned # devices to the command line. if self.pci_devices: for pci_id in self.pci_devices: virt_install_cmd += add_pcidevice(help, pci_id) for image_name in params.objects("images"): image_params = params.object_params(image_name) filename = storage.get_image_filename(image_params, root_dir) if image_params.get("use_storage_pool") == "yes": filename = None virt_install_cmd += add_drive(help, filename, image_params.get("image_pool"), image_params.get("image_vol"), image_params.get("image_device"), image_params.get("image_bus"), image_params.get("image_perms"), image_params.get("image_size"), image_params.get("drive_sparse"), image_params.get("drive_cache"), image_params.get("image_format")) if image_params.get("boot_drive") == "no": continue if filename: virt_install_cmd += add_drive(help, filename, None, None, None, image_params.get("drive_format"), None, image_params.get("image_size"), image_params.get("drive_sparse"), image_params.get("drive_cache"), image_params.get("image_format")) if (params.get('unattended_delivery_method') != 'integrated' and not (self.driver_type == 'xen' and params.get('hvm_or_pv') == 'pv')): for cdrom in params.objects("cdroms"): cdrom_params = params.object_params(cdrom) iso = cdrom_params.get("cdrom") if params.get("use_libvirt_cdrom_switch") == 'yes': # we don't want to skip the winutils iso if not cdrom == 'winutils': logging.debug( "Using --cdrom instead of --disk for install") logging.debug("Skipping CDROM:%s:%s", cdrom, iso) continue if params.get("medium") == 'cdrom_no_kernel_initrd': if iso == params.get("cdrom_cd1"): logging.debug("Using cdrom or url for install") logging.debug("Skipping CDROM: %s", iso) continue if iso: virt_install_cmd += add_drive( help, utils_misc.get_path(root_dir, iso), image_params.get("iso_image_pool"), image_params.get("iso_image_vol"), 'cdrom', None, None, None, None, None, None) # We may want to add {floppy_otps} parameter for -fda # {fat:floppy:}/path/. However vvfat is not usually recommended. # Only support to add the main floppy if you want to add the second # one please modify this part. floppy = params.get("floppy_name") if floppy: floppy = utils_misc.get_path(root_dir, floppy) virt_install_cmd += add_drive(help, floppy, None, None, 'floppy', None, None, None, None, None, None) # setup networking parameters for nic in vm.virtnet: # __make_libvirt_command can be called w/o vm.create() nic = vm.add_nic(**dict(nic)) logging.debug("__make_libvirt_command() setting up command for" " nic: %s" % str(nic)) virt_install_cmd += add_nic(help, nic) if params.get("use_no_reboot") == "yes": virt_install_cmd += " --noreboot" if params.get("use_autostart") == "yes": virt_install_cmd += " --autostart" if params.get("virt_install_debug") == "yes": virt_install_cmd += " --debug" # bz still open, not fully functional yet if params.get("use_virt_install_wait") == "yes": virt_install_cmd += (" --wait %s" % params.get("virt_install_wait_time")) kernel_params = params.get("kernel_params") if kernel_params: virt_install_cmd += " --extra-args '%s'" % kernel_params virt_install_cmd += " --noautoconsole" return virt_install_cmd
def run(test, params, env): """ Run qemu-io blkdebug tests: 1. Create image with given parameters 2. Write the blkdebug config file 3. Try to do operate in image with qemu-io and get the error message 4. Get the error message from perror by error number set in config file 5. Compare the error message :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ if params.get("blkdebug_event_name_separator") == 'underscore': blkdebug_event = params.get('err_event') if "." in blkdebug_event: params['err_event'] = blkdebug_event.replace(".", "_") tmp_dir = params.get("tmp_dir", "/tmp") blkdebug_cfg = utils_misc.get_path( tmp_dir, params.get("blkdebug_cfg", "blkdebug.cfg")) err_command = params["err_command"] err_event = params["err_event"] errn_list = re.split("\s+", params["errn_list"].strip()) re_std_msg = params["re_std_msg"] test_timeout = int(params.get("test_timeout", "60")) pre_err_commands = params.get("pre_err_commands") image = params.get("images") blkdebug_default = params.get("blkdebug_default") session_reload = params.get("session_reload", "no") == "yes" pre_snapshot = params.get("pre_snapshot", "no") == "yes" del_snapshot = params.get("del_snapshot", "no") == "yes" error.context("Create image", logging.info) image_io = QemuImg(params.object_params(image), data_dir.get_data_dir(), image) image_name, _ = image_io.create(params.object_params(image)) template_name = utils_misc.get_path(test.virtdir, blkdebug_default) template = ConfigParser.ConfigParser() template.read(template_name) for errn in errn_list: log_filename = utils_misc.get_path(test.outputdir, "qemu-io-log-%s" % errn) error.context("Write the blkdebug config file", logging.info) template.set("inject-error", "event", '"%s"' % err_event) template.set("inject-error", "errno", '"%s"' % errn) error.context("Write blkdebug config file", logging.info) blkdebug = None try: blkdebug = open(blkdebug_cfg, 'w') template.write(blkdebug) finally: if blkdebug is not None: blkdebug.close() error.context("Create image", logging.info) image_io = QemuImg(params.object_params(image), data_dir.get_data_dir(), image) image_name = image_io.create(params.object_params(image))[0] error.context("Operate in qemu-io to trigger the error", logging.info) session = qemu_io.QemuIOShellSession(test, params, image_name, blkdebug_cfg=blkdebug_cfg, log_filename=log_filename) if pre_err_commands: for cmd in re.split(",", pre_err_commands.strip()): session.cmd_output(cmd, timeout=test_timeout) if session_reload or pre_snapshot: session.close() if pre_snapshot: image_io.snapshot_create() image_sn = image_io.snapshot_tag session = qemu_io.QemuIOShellSession(test, params, image_name, blkdebug_cfg=blkdebug_cfg, log_filename=log_filename) if not del_snapshot: output = session.cmd_output(err_command, timeout=test_timeout) session.close() else: session.close() try: image_io.snapshot_del(blkdebug_cfg=blkdebug_cfg) output = "" except process.CmdError, err: output = err.result.stderr # Remove the snapshot and base image after a round of test image_io.remove() if pre_snapshot and not del_snapshot: params_sn = params.object_params(image_sn) image_snapshot = QemuImg(params_sn, data_dir.get_data_dir(), image_sn) image_snapshot.remove() error.context("Get error message from command perror", logging.info) perror_cmd = "perror %s" % errn std_msg = utils.system_output(perror_cmd) std_msg = re.findall(re_std_msg, std_msg) if std_msg: std_msg = std_msg[0] else: std_msg = "" logging.warning("Can not find error message from perror") session.close() error.context("Compare the error message", logging.info) if std_msg: if std_msg in output: logging.info("Error message is correct in qemu-io") else: fail_log = "The error message is mismatch:" fail_log += "qemu-io reports: '%s'," % output fail_log += "perror reports: '%s'" % std_msg raise error.TestFail(fail_log) else: logging.warning("Can not find error message from perror." " The output from qemu-io is %s" % output)
def get_image_filename(params, root_dir): """ Generate an image path from params and root_dir. @param params: Dictionary containing the test parameters. @param root_dir: Base directory for relative filenames. @note: params should contain: image_name -- the name of the image file, without extension image_format -- the format of the image (qcow2, raw etc) @raise VMDeviceError: When no matching disk found (in indirect method). """ def sort_cmp(x, y): """ This function used for sort to suit for this test, first sort by len then by value. """ has_digit_x = re.findall('[vhs]d[a-z]*[\d]+', x) has_digit_y = re.findall('[vhs]d[a-z]*[\d]+', y) if not has_digit_x and not has_digit_y: if len(x) > len(y): return 1 elif len(x) < len(y): return -1 if len(x) == len(y): if has_digit_x and has_digit_y: return cmp(x, y) elif has_digit_x: return -1 elif has_digit_y: return 1 return cmp(x, y) image_name = params.get("image_name", "image") indirect_image_select = params.get("indirect_image_select") if indirect_image_select: re_name = image_name indirect_image_select = int(indirect_image_select) matching_images = utils.system_output("ls -1d %s" % re_name) matching_images = sorted(matching_images.split('\n'), cmp=sort_cmp) if matching_images[-1] == '': matching_images = matching_images[:-1] try: image_name = matching_images[indirect_image_select] except IndexError: raise virt_vm.VMDeviceError( "No matching disk found for " "name = '%s', matching = '%s' and " "selector = '%s'" % (re_name, matching_images, indirect_image_select)) for protected in params.get('indirect_image_blacklist', '').split(' '): match_image = re.match(protected, image_name) if match_image and match_image.group(0) == image_name: """ We just need raise an error if it is totally match, such as sda sda1 and so on, but sdaa should not raise an error. """ raise virt_vm.VMDeviceError( "Matching disk is in blacklist. " "name = '%s', matching = '%s' and " "selector = '%s'" % (re_name, matching_images, indirect_image_select)) image_format = params.get("image_format", "qcow2") gluster_image = params.get("gluster_brick") if params.get("image_raw_device") == "yes": return image_name if image_format: image_filename = "%s.%s" % (image_name, image_format) else: image_filename = image_name if gluster_image: image_filename = gluster.get_image_filename(params, image_name, image_format) else: image_filename = utils_misc.get_path(root_dir, image_filename) return image_filename
def run(test, params, env): """ block_stream_without_backingfile test: 1). bootup guest 2). create snapshots chian(base->sn1->sn2), verify backingfile should sn1 3). merge sn1 to sn2 (sn1->sn2) aka block stream with special base, after job done, then check backingfile is base and sn1 not opening by qemu 4). merge base to sn2(base->sn2) after this step sn2 should no backingfile and sn1 and base should not opening by qemu 5). reboot guest vierfy it works correctly 6). verify not backingfile with qemu-img command too; :param test: Qemu test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) alive_check_cmd = params.get("alive_check_cmd", "dir") image_file = storage.get_image_filename(params, data_dir.get_data_dir()) image_dir = os.path.dirname(image_file) qemu_img = utils_misc.get_qemu_img_binary(params) speed = int(params.get("limited_speed", 0)) wait_timeout = int(params.get("wait_timeout", 3600)) def wait_job_done(timeout=3600): """ Wait for job on the device done, raise TestFail exception if timeout; """ if utils_misc.wait_for( lambda: not vm.monitor.query_block_job(device_id), timeout, first=0.2, step=2.0, text="Wait for canceling block job") is None: raise error.TestFail("Wait job finish timeout in %ss" % timeout) def verify_backingfile(except_backingfile): """ Got backingfile from monitor then verfiy it except_backingfile, if not raise TestFail exception; """ backing_file = vm.monitor.get_backingfile(device_id) if backing_file != except_backingfile: raise error.TestFail("Unexpect backingfile(%s)" % backing_file) def get_openingfiles(): """ Return files which opening by qemu process; """ pid = vm.get_pid() cmd = params.get("snapshot_check_cmd") % pid return set(utils.system_output(cmd, ignore_status=True).splitlines()) snapshots = map(lambda x: os.path.join(image_dir, x), ["sn1", "sn2"]) try: error.context("Create snapshots-chain(base->sn1->sn2)", logging.info) for index, snapshot in enumerate(snapshots): base_file = index and snapshots[index - 1] or image_file device_id = vm.live_snapshot(base_file, snapshot) if not device_id: raise error.TestFail("Fail to create %s" % snapshot) error.context("Check backing-file of sn2", logging.info) verify_backingfile(snapshots[0]) error.context("Merge sn1 to sn2", logging.info) vm.monitor.block_stream(device_id, base=image_file, speed=speed) wait_job_done(wait_timeout) error.context("Check backing-file of sn2", logging.info) verify_backingfile(image_file) error.context("Check sn1 is not opening by qemu process", logging.info) if snapshots[0] in get_openingfiles(): raise error.TestFail("sn1 (%s) is opening by qemu" % snapshots[0]) error.context("Merge base to sn2", logging.info) vm.monitor.block_stream(device_id) wait_job_done(wait_timeout) error.context("Check backing-file of sn2", logging.info) verify_backingfile(None) error.context("check sn1 and base are not opening by qemu process", logging.info) if set([snapshots[0], image_file]).issubset(get_openingfiles()): raise error.TestFail("%s is opening by qemu" % set([snapshots[0], image_file])) error.context("Check backing-file of sn2 by qemu-img", logging.info) cmd = "%s info %s" % (qemu_img, snapshots[1]) if re.search("backing file", utils.system_output(cmd, ignore_status=True)): raise error.TestFail("should no backing-file in this step") error.context("Reboot VM to check it works fine", logging.info) session = vm.reboot(session=session, timeout=timeout) session.cmd(alive_check_cmd) finally: map(lambda x: utils.system("rm -rf %s" % x), snapshots)
def pre_pool(pool_name, pool_type, pool_target, emulated_image): """ Preapare the specific type pool Note: 1. For scsi type pool, it only could be created from xml file 2. Other type pools can be created by pool_creat_as function """ extra = "" if pool_type == "dir": logging.info(test.tmpdir) pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) elif pool_type == "disk": device_name = login_iscsi(emulated_image, "1G") mk_part(device_name) extra = " --source-dev %s" % device_name elif pool_type == "fs": device_name = login_iscsi(emulated_image, "1G") cmd = "mkfs.ext4 -F %s" % device_name pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) extra = " --source-dev %s" % device_name utils.run(cmd) elif pool_type == "logical": logical_device = login_iscsi(emulated_image, "1G") cmd_pv = "pvcreate %s" % logical_device vg_name = "vg_%s" % pool_type cmd_vg = "vgcreate %s %s" % (vg_name, logical_device) extra = "--source-name %s" % vg_name utils.run(cmd_pv) utils.run(cmd_vg) elif pool_type == "netfs": nfs_path = os.path.join(test.tmpdir, nfs_server_dir) if not os.path.exists(nfs_path): os.mkdir(nfs_path) pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) set_nfs_server("%s *(rw,async,no_root_squash)" % nfs_path) extra = "--source-host %s --source-path %s" % (source_host, nfs_path) elif pool_type == "iscsi": logical_device = login_iscsi(emulated_image, "100M") iscsi_session = iscsi.iscsi_get_sessions() iscsi_device = () for iscsi_node in iscsi_session: if iscsi_node[1].count(emulated_image): iscsi_device = iscsi_node break if iscsi_device == (): raise error.TestFail("No iscsi device.") if "::" in iscsi_device[0]: iscsi_device = ('localhost', iscsi_device[1]) extra = " --source-host %s --source-dev %s" % iscsi_device elif pool_type == "scsi": scsi_xml_file = params.get("scsi_xml_file") if not os.path.exists(scsi_xml_file): scsi_xml_file = os.path.join(test.tmpdir, scsi_xml_file) logical_device = login_iscsi(emulated_image, "100M") cmd = "iscsiadm -m session -P 3 |grep -B3 %s| \ grep Host|awk '{print $3}'" % logical_device.split('/')[2] scsi_host = utils.system_output(cmd) scsi_xml = """ <pool type='scsi'> <name>%s</name> <source> <adapter type='scsi_host' name='host%s'/> </source> <target> <path>/dev/disk/by-path</path> </target> </pool> """ % (pool_name, scsi_host) logging.debug("Prepare the scsi pool xml: %s", scsi_xml) xml_object = open(scsi_xml_file, 'w') xml_object.write(scsi_xml) xml_object.close() # Create pool if pool_type == "scsi": re_v = virsh.pool_create(scsi_xml_file) else: re_v = virsh.pool_create_as(pool_name, pool_type, pool_target, extra) if not re_v: raise error.TestFail("Create pool failed.") # Check the created pool if not check_pool(pool_name): raise error.TestFail("Can't find active pool: %s", pool_name)
def netload_kill_problem(session_serial): netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") setup_cmd = params.get("setup_cmd") clean_cmd = params.get("clean_cmd") firewall_flush = "iptables -F" try: utils.run(firewall_flush) except Exception: logging.warning("Could not flush firewall rules on host") try: session_serial.cmd(firewall_flush) except aexpect.ShellError: logging.warning("Could not flush firewall rules on guest") for i in params.get("netperf_files").split(): vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp") guest_ip = vm.get_address(0) server_ip = get_corespond_ip(guest_ip) logging.info("Setup and run netperf on host and guest") session_serial.cmd(setup_cmd % "/tmp", timeout=200) utils.run(setup_cmd % netperf_dir) try: session_serial.cmd(clean_cmd) except Exception: pass session_serial.cmd(params.get("netserver_cmd") % "/tmp") utils.run(clean_cmd, ignore_status=True) utils.run(params.get("netserver_cmd") % netperf_dir) server_netperf_cmd = params.get("netperf_cmd") % ( netperf_dir, "TCP_STREAM", guest_ip, params.get("packet_size", "1500")) guest_netperf_cmd = params.get("netperf_cmd") % ( "/tmp", "TCP_STREAM", server_ip, params.get("packet_size", "1500")) tcpdump = env.get("tcpdump") pid = None if tcpdump: # Stop the background tcpdump process try: pid = int(utils.system_output("pidof tcpdump")) logging.debug("Stopping the background tcpdump") os.kill(pid, signal.SIGSTOP) except Exception: pass try: logging.info("Start heavy network load host <=> guest.") session_serial.sendline(guest_netperf_cmd) utils.BgJob(server_netperf_cmd) #Wait for create big network usage. time.sleep(10) kill_and_check(vm) finally: utils.run(clean_cmd, ignore_status=True) if tcpdump and pid: logging.debug("Resuming the background tcpdump") logging.info("pid is %s" % pid) os.kill(pid, signal.SIGCONT)
timerdevice_drift_threshold = params.get("timerdevice_drift_threshold", 3) error.context("Check the system time on guest and host", logging.info) (host_time, guest_time) = utils_test.get_time(session, time_command, time_filter_re, time_format) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: raise error.TestFail("The guest's system time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) get_hw_time_cmd = params.get("get_hw_time_cmd") if get_hw_time_cmd: error.context("Check the hardware time on guest and host", logging.info) host_time = utils.system_output(get_hw_time_cmd) guest_time = session.cmd(get_hw_time_cmd) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: raise error.TestFail("The guest's hardware time is different with" " host's. Host time: '%s', guest time:" " '%s'" % (host_time, guest_time)) if params.get("timerdevice_reboot_test") == "yes": sleep_time = params.get("timerdevice_sleep_time") if sleep_time: error.context("Sleep '%s' secs before reboot" % sleep_time, logging.info) sleep_time = int(sleep_time) time.sleep(sleep_time)
def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, image_size="100M", pre_disk_vol=[], source_name=None, source_path=None): """ Preapare the specific type pool Note: 1. For scsi type pool, it only could be created from xml file 2. Other type pools can be created by pool_creat_as function 3. Disk pool will not allow to create volume with virsh commands So we can prepare it before pool created :param pool_name: created pool name :param pool_type: dir, disk, logical, fs, netfs or else :param pool_target: target of storage pool :param emulated_image: use an image file to simulate a scsi disk it could be used for disk, logical pool :param image_size: the size for emulated image :param pre_disk_vol: a list include partition size to be created no more than 4 partition because msdos label """ extra = "" if pool_type == "dir": logging.info("Pool path:%s", self.tmpdir) pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) elif pool_type == "disk": device_name = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) # If pre_vol is None, disk pool will have no volume if type(pre_disk_vol) == list and len(pre_disk_vol): for vol in pre_disk_vol: mk_part(device_name, vol) extra = " --source-dev %s" % device_name elif pool_type == "fs": device_name = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd = "mkfs.ext4 -F %s" % device_name pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) extra = " --source-dev %s" % device_name utils.run(cmd) elif pool_type == "logical": logical_device = setup_or_cleanup_iscsi( is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd_pv = "pvcreate %s" % logical_device vg_name = "vg_%s" % pool_type cmd_vg = "vgcreate %s %s" % (vg_name, logical_device) extra = "--source-name %s" % vg_name utils.run(cmd_pv) utils.run(cmd_vg) # Create a small volume for verification # And VG path will not exist if no any volume in.(bug?) cmd_lv = "lvcreate --name default_lv --size 1M %s" % vg_name utils.run(cmd_lv) elif pool_type == "netfs": nfs_server_dir = self.params.get("nfs_server_dir", "nfs-server") nfs_path = os.path.join(self.tmpdir, nfs_server_dir) if not os.path.exists(nfs_path): os.mkdir(nfs_path) pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) setup_or_cleanup_nfs(is_setup=True, export_options="rw,async,no_root_squash", mount_src=nfs_path) source_host = self.params.get("source_host", "localhost") extra = "--source-host %s --source-path %s" % (source_host, nfs_path) elif pool_type == "iscsi": setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) # Verify if expected iscsi device has been set iscsi_sessions = iscsi.iscsi_get_sessions() iscsi_target = () for iscsi_node in iscsi_sessions: if iscsi_node[1].count(emulated_image): # Remove port for pool operations ip_addr = iscsi_node[0].split(":3260")[0] iscsi_device = (ip_addr, iscsi_node[1]) break if iscsi_device == (): raise error.TestFail("No matched iscsi device.") if "::" in iscsi_device[0]: iscsi_device = ('localhost', iscsi_device[1]) extra = " --source-host %s --source-dev %s" % iscsi_device elif pool_type == "scsi": scsi_xml_file = self.params.get("scsi_xml_file") if not os.path.exists(scsi_xml_file): scsi_xml_file = os.path.join(self.tmpdir, scsi_xml_file) logical_device = setup_or_cleanup_iscsi( is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd = ("iscsiadm -m session -P 3 |grep -B3 %s| grep Host|awk " "'{print $3}'" % logical_device.split('/')[2]) scsi_host = utils.system_output(cmd) scsi_xml = """ <pool type='scsi'> <name>%s</name> <source> <adapter type='scsi_host' name='host%s'/> </source> <target> <path>/dev/disk/by-path</path> </target> </pool> """ % (pool_name, scsi_host) logging.debug("Prepare the scsi pool xml: %s", scsi_xml) xml_object = open(scsi_xml_file, 'w') xml_object.write(scsi_xml) xml_object.close() elif pool_type == "gluster": # Prepare gluster service and create volume hostip = setup_or_cleanup_gluster(True, source_name, pool_name=pool_name) logging.debug("hostip is %s", hostip) cleanup_gluster = True extra = "--source-host %s --source-path %s --source-name %s" % \ (hostip, source_path, source_name) # Create pool if pool_type == "scsi": re_v = virsh.pool_create(scsi_xml_file) else: re_v = virsh.pool_create_as(pool_name, pool_type, pool_target, extra) if not re_v: raise error.TestFail("Create pool failed.") # Check the created pool check_actived_pool(pool_name)
def get_disk_list(std_mounts_only=True, get_all_disks=False): """ Get a list of dictionaries with information about disks on this system. :param std_mounts_only: Whether the function should return only disks that have a mount point defined (True) or even devices that doesn't (False). :param get_all_disks: Whether the function should return only partitioned disks (False) or return every disk, regardless of being partitioned or not (True). :return: List of dictionaries with disk information (see more below). The 'disk_list' array returned by get_disk_list() has an entry for each disk drive we find on the box. Each of these entries is a map with the following 3 string values: 'device' disk device name (i.e. the part after /dev/) 'mountpt' disk mount path 'tunable' disk name for setting scheduler tunables (/sys/block/sd??) The last value is an integer that indicates the current mount status of the drive: 'mounted' 0 = not currently mounted 1 = mounted r/w on the expected path -1 = mounted readonly or at an unexpected path When the 'std_mounts_only' argument is True we don't include drives mounted on 'unusual' mount points in the result. If a given device is partitioned, it will return all partitions that exist on it. If it's not, it will return the device itself (ie, if there are /dev/sdb1 and /dev/sdb2, those will be returned but not /dev/sdb. if there is only a /dev/sdc, that one will be returned). """ # Get hold of the currently mounted file systems mounts = utils.system_output('mount').splitlines() # Grab all the interesting disk partition names from /proc/partitions, # and build up the table of drives present in the system. hd_list = [] # h for IDE drives, s for SATA/SCSI drives, v for Virtio drives hd_regexp = re.compile("([hsv]d[a-z]+3)$") partfile = open(_DISKPART_FILE) for partline in partfile: parts = partline.strip().split() if len(parts) != 4 or partline.startswith('major'): continue # Get hold of the partition name partname = parts[3] if not get_all_disks: # The partition name better end with a digit # (get only partitioned disks) if not partname[-1:].isdigit(): continue # Process any site-specific filters on the partition name if not fd_mgr.use_partition(partname): continue # We need to know the IDE/SATA/... device name for setting tunables tunepath = fd_mgr.map_drive_name(partname) # Check whether the device is mounted (and how) mstat = 0 fstype = '' fsopts = '' fsmkfs = '?' # Prepare the full device path for matching chkdev = '/dev/' + partname mountpt = None for mln in mounts: splt = mln.split() # Typical 'mount' output line looks like this (indices # for the split() result shown below): # # <device> on <mount_point> type <fstp> <options> # 0 1 2 3 4 5 if splt[0].strip() == chkdev.strip(): # Make sure the mount point looks reasonable mountpt = fd_mgr.check_mount_point(partname, splt[2]) if not mountpt: mstat = -1 break # Grab the file system type and mount options fstype = splt[4] fsopts = splt[5] # Check for something other than a r/w mount if fsopts[:3] != '(rw': mstat = -1 break # The drive is mounted at the 'normal' mount point mstat = 1 # Does the caller only want to allow 'standard' mount points? if std_mounts_only and mstat < 0: continue if not get_all_disks: # Was this partition mounted at all? if not mountpt: mountpt = fd_mgr.check_mount_point(partname, None) # Ask the client where we should mount this partition if not mountpt: continue # Looks like we have a valid disk drive, add it to the list hd_list.append({ 'device': partname, 'mountpt': mountpt, 'tunable': tunepath, 'fs_type': fstype, 'fs_opts': fsopts, 'fs_mkfs': fsmkfs, 'mounted': mstat }) return hd_list
def run(test, params, env): """ Timer device check TSC synchronity after change host clocksource: 1) Check for an appropriate clocksource on host. 2) Boot the guest. 3) Check the guest is using vsyscall. 4) Copy time-warp-test.c to guest. 5) Compile the time-warp-test.c. 6) Switch host to hpet clocksource. 6) Run time-warp-test. 7) Check the guest is using vsyscall. :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ error.context("Check for an appropriate clocksource on host", logging.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if not "tsc" in utils.system_output(host_cmd): raise error.TestNAError("Host must use 'tsc' clocksource") error.context("Boot the guest with one cpu socket", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) error.context("Check the guest is using vsyscall", logging.info) date_cmd = "strace date 2>&1|egrep 'clock_gettime|gettimeofday'|wc -l" output = session.cmd(date_cmd) if not '0' in output: raise error.TestFail("Failed to check vsyscall. Output: '%s'" % output) error.context("Copy time-warp-test.c to guest", logging.info) src_file_name = os.path.join(data_dir.get_deps_dir(), "tsc_sync", "time-warp-test.c") vm.copy_files_to(src_file_name, "/tmp") error.context("Compile the time-warp-test.c", logging.info) cmd = "cd /tmp/;" cmd += " yum install -y popt-devel;" cmd += " rm -f time-warp-test;" cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt" session.cmd(cmd) error.context("Run time-warp-test", logging.info) test_run_timeout = int(params.get("test_run_timeout", 10)) session.sendline("$(sleep %d; pkill time-warp-test) &" % test_run_timeout) cmd = "/tmp/time-warp-test" _, output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60)) re_str = "fail:(\d+).*?fail:(\d+).*fail:(\d+)" fail_cnt = re.findall(re_str, output) if not fail_cnt: raise error.TestError("Could not get correct test output." " Output: '%s'" % output) tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] raise error.TestFail("Get error when running time-warp-test." " Output (last 5 lines): '%s'" % msg) try: error.context("Switch host to hpet clocksource", logging.info) cmd = "echo hpet > /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" utils.system(cmd) error.context("Run time-warp-test after change the host clock source", logging.info) cmd = "$(sleep %d; pkill time-warp-test) &" session.sendline(cmd % test_run_timeout) cmd = "/tmp/time-warp-test" _, output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60)) fail_cnt = re.findall(re_str, output) if not fail_cnt: raise error.TestError("Could not get correct test output." " Output: '%s'" % output) tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] raise error.TestFail("Get error when running time-warp-test." " Output (last 5 lines): '%s'" % msg) output = session.cmd(date_cmd) if not "1" in output: raise error.TestFail("Failed to check vsyscall." " Output: '%s'" % output) finally: error.context("Restore host to tsc clocksource", logging.info) cmd = "echo tsc > /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" try: utils.system(cmd) except Exception, detail: logging.error("Failed to restore host clocksource." "Detail: %s" % detail)
def run(test, params, env): """ Network stress test with netperf. 1) Boot up VM(s), setup SSH authorization between host and guest(s)/external host 2) Prepare the test environment in server/client/host 3) Execute netperf tests, collect and analyze the results :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def env_setup(session, ip, user, port, password): error.context("Setup env for %s" % ip) ssh_cmd(session, "iptables -F", ignore_status=True) ssh_cmd(session, "service iptables stop", ignore_status=True) ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") download_link = params.get("netperf_download_link") download_dir = data_dir.get_download_dir() md5sum = params.get("pkg_md5sum") pkg = utils.unmap_url_cache(download_dir, download_link, md5sum) remote.scp_to_remote(ip, shell_port, username, password, pkg, "/tmp") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp") def _pin_vm_threads(vm, node): if node: if not isinstance(node, utils_misc.NumaNode): node = utils_misc.NumaNode(int(node)) utils_test.qemu.pin_vm_threads(vm, node) return node vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) queues = int(params.get("queues", 1)) if queues > 1: if params.get("os_type") == "linux": ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0)) session.cmd_status_output("ethtool -L %s combined %s" % (ethname, queues)) else: logging.info("FIXME: support to enable MQ for Windows guest!") config_cmds = params.get("config_cmds") if config_cmds: for config_cmd in config_cmds.split(","): cmd = params.get(config_cmd.strip()) if cmd: s, o = session.cmd_status_output(cmd) if s: msg = "Config command %s failed. Output: %s" % (cmd, o) raise error.TestError(msg) if params.get("reboot_after_config", "yes") == "yes": session = vm.reboot(session=session, timeout=login_timeout) if params.get("rh_perf_envsetup_script"): utils_test.service_setup(vm, session, test.virtdir) session.close() server_ip = vm.wait_for_get_address(0, timeout=5) server_ctl = vm.wait_for_login(timeout=login_timeout) server_ctl_ip = server_ip if (params.get("os_type") == "windows" and params.get("use_cygwin") == "yes"): cygwin_prompt = params.get("cygwin_prompt", r"\$\s+$") cygwin_start = params.get("cygwin_start") server_cyg = vm.wait_for_login(timeout=login_timeout) server_cyg.set_prompt(cygwin_prompt) server_cyg.cmd_output(cygwin_start) else: server_cyg = None if len(params.get("nics", "").split()) > 1: vm.wait_for_login(nic_index=1, timeout=login_timeout) server_ctl_ip = vm.wait_for_get_address(1, timeout=5) logging.debug(commands.getoutput("numactl --hardware")) logging.debug(commands.getoutput("numactl --show")) # pin guest vcpus/memory/vhost threads to last numa node of host by default numa_node = _pin_vm_threads(vm, params.get("numa_node")) host = params.get("host", "localhost") host_ip = host if host != "localhost": params_host = params.object_params("host") host = remote.wait_for_login(params_host.get("shell_client"), host_ip, params_host.get("shell_port"), params_host.get("username"), params_host.get("password"), params_host.get("shell_prompt")) client = params.get("client", "localhost") client_ip = client clients = [] # client session 1 for control, session 2 for data communication for i in range(2): if client in params.get("vms"): vm_client = env.get_vm(client) tmp = vm_client.wait_for_login(timeout=login_timeout) client_ip = vm_client.wait_for_get_address(0, timeout=5) elif client != "localhost": tmp = remote.wait_for_login(params.get("shell_client_client"), client_ip, params.get("shell_port_client"), params.get("username_client"), params.get("password_client"), params.get("shell_prompt_client")) else: tmp = "localhost" clients.append(tmp) client = clients[0] vms_list = params["vms"].split() if len(vms_list) > 1: vm2 = env.get_vm(vms_list[-1]) vm2.verify_alive() session2 = vm2.wait_for_login(timeout=login_timeout) if params.get("rh_perf_envsetup_script"): utils_test.service_setup(vm2, session2, test.virtdir) client = vm2.wait_for_login(timeout=login_timeout) client_ip = vm2.get_address() session2.close() _pin_vm_threads(vm2, numa_node) error.context("Prepare env of server/client/host", logging.info) prepare_list = set([server_ctl, client, host]) tag_dict = {server_ctl: "server", client: "client", host: "host"} ip_dict = {server_ctl: server_ctl_ip, client: client_ip, host: host_ip} for i in prepare_list: params_tmp = params.object_params(tag_dict[i]) if params_tmp.get("os_type") == "linux": shell_port = int(params_tmp["shell_port"]) password = params_tmp["password"] username = params_tmp["username"] env_setup(i, ip_dict[i], username, shell_port, password) env.stop_tcpdump() error.context("Start netperf testing", logging.info) start_test(server_ip, server_ctl, host, clients, test.resultsdir, l=int(params.get('l')), sessions_rr=params.get('sessions_rr'), sessions=params.get('sessions'), sizes_rr=params.get('sizes_rr'), sizes=params.get('sizes'), protocols=params.get('protocols'), ver_cmd=params.get('ver_cmd', "rpm -q qemu-kvm"), netserver_port=params.get('netserver_port', "12865"), params=params, server_cyg=server_cyg, test=test) if params.get("log_hostinfo_script"): src = os.path.join(test.virtdir, params.get("log_hostinfo_script")) path = os.path.join(test.resultsdir, "sysinfo") utils.system_output("bash %s %s &> %s" % (src, test.resultsdir, path)) if params.get("log_guestinfo_script") and params.get("log_guestinfo_exec"): src = os.path.join(test.virtdir, params.get("log_guestinfo_script")) path = os.path.join(test.resultsdir, "sysinfo") destpath = params.get("log_guestinfo_path", "/tmp/log_guestinfo.sh") vm.copy_files_to(src, destpath) logexec = params.get("log_guestinfo_exec", "bash") output = server_ctl.cmd_output("%s %s" % (logexec, destpath)) logfile = open(path, "a+") logfile.write(output) logfile.close()
def is_down(self): output = utils.system_output('ifconfig %s' % self._name) if output: return 'UP' not in output return False
def run_once(self, test_name): stress_ng = os.path.join(self.srcdir, 'stress-ng', 'stress-ng') cmd = '%s/ubuntu_overlayfs_smoke_test.sh' % (self.bindir) self.results = utils.system_output(cmd, retain_output=True) print self.results
def run(test, params, env): """ soft lockup/drift test with stress. 1) Boot up a VM. 2) Build stress on host and guest. 3) run heartbeat with the given options on server and host. 3) Run for a relatively long time length. ex: 12, 18 or 24 hours. 4) Output the test result and observe drift. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ stress_setup_cmd = params.get("stress_setup_cmd", None) stress_cmd = params.get("stress_cmd") server_setup_cmd = params.get("server_setup_cmd") drift_cmd = params.get("drift_cmd") kill_stress_cmd = params.get("kill_stress_cmd") kill_monitor_cmd = params.get("kill_monitor_cmd") threshold = int(params.get("stress_threshold")) monitor_log_file_server = params.get("monitor_log_file_server") monitor_log_file_client = params.get("monitor_log_file_client") test_length = int(3600 * float(params.get("test_length"))) monitor_port = int(params.get("monitor_port")) vm = env.get_vm(params["main_vm"]) login_timeout = int(params.get("login_timeout", 360)) auto_dir = os.environ.get("AUTODIR", os.environ.get("AUTOTEST_PATH")) stress_dir = os.path.join(auto_dir, "tests", "stress") monitor_dir = params.get("monitor_dir", data_dir.get_deps_dir("softlockup")) def _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd): logging.info("Kill stress and monitor on guest") try: session.cmd(kill_stress_cmd) except Exception: pass try: session.cmd(kill_monitor_cmd) except Exception: pass def _kill_host_programs(kill_stress_cmd, kill_monitor_cmd): logging.info("Kill stress and monitor on host") utils.run(kill_stress_cmd, ignore_status=True) utils.run(kill_monitor_cmd, ignore_status=True) def host(): logging.info("Setup monitor server on host") # Kill previous instances of the host load programs, if any _kill_host_programs(kill_stress_cmd, kill_monitor_cmd) # Cleanup previous log instances if os.path.isfile(monitor_log_file_server): os.remove(monitor_log_file_server) # Opening firewall ports on host utils.run("iptables -F", ignore_status=True) # Run heartbeat on host utils.run( server_setup_cmd % (monitor_dir, threshold, monitor_log_file_server, monitor_port)) if stress_setup_cmd is not None: logging.info("Build stress on host") # Uncompress and build stress on host utils.run(stress_setup_cmd % stress_dir) logging.info("Run stress on host") # stress_threads = 2 * n_cpus threads_host = 2 * utils.count_cpus() # Run stress test on host utils.run(stress_cmd % (stress_dir, threads_host)) def guest(): try: host_ip = socket.gethostbyname(socket.gethostname()) except socket.error: try: # Hackish, but works well on stand alone (laptop) setups # with access to the internet. If this fails, well, then # not much else can be done... s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("redhat.com", 80)) host_ip = s.getsockname()[0] except socket.error, (value, e): raise error.TestError("Could not determine host IP: %d %s" % (value, e)) # Now, starting the guest vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) # Kill previous instances of the load programs, if any _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd) # Clean up previous log instances session.cmd("rm -f %s" % monitor_log_file_client) # Opening firewall ports on guest try: session.cmd("iptables -F") except Exception: pass # Get monitor files and copy them from host to guest monitor_path = os.path.join(data_dir.get_deps_dir(), 'softlockup', 'heartbeat_slu.py') vm.copy_files_to(monitor_path, "/tmp") logging.info("Setup monitor client on guest") # Start heartbeat on guest session.cmd( params.get("client_setup_cmd") % ("/tmp", host_ip, monitor_log_file_client, monitor_port)) if stress_setup_cmd is not None: # Copy, uncompress and build stress on guest stress_source = params.get("stress_source") stress_path = os.path.join(stress_dir, stress_source) vm.copy_files_to(stress_path, "/tmp") logging.info("Build stress on guest") session.cmd(stress_setup_cmd % "/tmp", timeout=200) logging.info("Run stress on guest") # stress_threads = 2 * n_vcpus threads_guest = 2 * int(params.get("smp", 1)) # Run stress test on guest session.cmd(stress_cmd % ("/tmp", threads_guest)) # Wait and report logging.debug("Wait for %d s", test_length) time.sleep(test_length) # Kill instances of the load programs on both guest and host _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd) _kill_host_programs(kill_stress_cmd, kill_monitor_cmd) # Collect drift drift = utils.system_output(drift_cmd % monitor_log_file_server) logging.info("Drift noticed: %s", drift)
def run_once(self, test_name): os.chdir(os.path.join(self.srcdir, 'stress-ng')) cmd = '%s/ubuntu_stress_smoke_test.sh' % (self.bindir) self.results = utils.system_output(cmd, retain_output=True)