def install_packages(self): ''' Install necessary packages ''' smm = SoftwareManager() detected_distro = distro.detect() self.log.info("Test is running on %s", detected_distro.name) if not smm.check_installed("ksh") and not smm.install("ksh"): self.cancel('ksh is needed for the test to be run') if detected_distro.name == "Ubuntu": if not smm.check_installed("python-paramiko") and not \ smm.install("python-paramiko"): self.cancel('python-paramiko is needed for the test to be run') ubuntu_url = self.params.get('ubuntu_url', default=None) debs = self.params.get('debs', default=None) if not ubuntu_url or not debs: self.cancel("No url specified") for deb in debs: deb_url = os.path.join(ubuntu_url, deb) deb_install = self.fetch_asset(deb_url, expire='7d') shutil.copy(deb_install, self.workdir) process.system("dpkg -i %s/%s" % (self.workdir, deb), ignore_status=True, sudo=True) else: url = self.params.get('url', default=None) if not url: self.cancel("No url specified") rpm_install = self.fetch_asset(url, expire='7d') shutil.copy(rpm_install, self.workdir) os.chdir(self.workdir) process.run('chmod +x ibmtools') process.run('./ibmtools --install --managed')
def setUp(self): ''' set up required packages and gather necessary test inputs ''' sm = SoftwareManager() detected_distro = distro.detect() self.log.info("Test is running on:" + detected_distro.name) if not sm.check_installed("ksh") and not sm.install("ksh"): self.error('ksh is needed for the test to be run') if detected_distro.name == "Ubuntu": if not sm.check_installed("python-paramiko") and not \ sm.install("python-paramiko"): self.error('python-paramiko is needed for the test to be run') ubuntu_url = self.params.get('ubuntu_url', default=None) debs = self.params.get('debs', default=None) for deb in debs: deb_url = os.path.join(ubuntu_url, deb) deb_install = self.fetch_asset(deb_url, expire='7d') shutil.copy(deb_install, self.srcdir) process.system("dpkg -i %s/%s" % (self.srcdir, deb), ignore_status=True, sudo=True) else: url = self.params.get('url', default=None) rpm_install = self.fetch_asset(url, expire='7d') shutil.copy(rpm_install, self.srcdir) os.chdir(self.srcdir) process.run('chmod +x ibmtools') process.run('./ibmtools --install --managed') self.hmc_ip = self.params.get("hmc_ip", '*', default=None) self.hmc_pwd = self.params.get("hmc_pwd", '*', default=None) self.hmc_username = self.params.get("hmc_username", '*', default=None) self.lpar_1 = self.params.get("lpar_1", '*', default=None) self.lpar_2 = self.params.get("lpar_2", '*', default=None) self.pci_device = self.params.get("pci_device", '*', default=None) self.server = self.params.get("server", '*', default=None) self.loc_code = pci.get_slot_from_sysfs(self.pci_device) self.num_of_dlpar = int(self.params.get("num_of_dlpar", default='1')) if self.loc_code is None: self.cancel("Failed to get the location code for the pci device") self.login(self.hmc_ip, self.hmc_username, self.hmc_pwd) self.run_command("uname -a") cmd = 'lshwres -r io -m ' + self.server + \ ' --rsubtype slot --filter lpar_names=' + self.lpar_1 + \ ' -F drc_index,lpar_id,drc_name | grep -i %s ' % self.loc_code output = self.run_command(cmd) self.drc_index = output[-1].split(',')[0] self.lpar_id = output[-1].split(',')[1] self.log.info("lpar_id : %s, loc_code: %s, drc_index: %s", self.lpar_id, self.loc_code, self.drc_index)
def setUp(self): """ Install all the required dependencies Building source tarball requires packages specific to os that needs to be installed, if not installed test will stop. """ backend = SoftwareManager() dist = distro.detect() if not backend.check_installed("gcc") and not backend.install("gcc"): self.error("gcc is needed for the test to be run") pkgs = ['git', 'wget', 'autoconf', 'automake', 'dejagnu', 'binutils'] if dist.name == 'sles': sles_deps = ['build', 'libdw-devel', 'libelf-devel', 'elfutils', 'binutils-devel', 'libtool', 'gcc-c++'] pkgs += sles_deps elif dist.name in ("redhat", "fedora"): rhel_deps = ['elfutils-devel', 'elfutils-libelf-devel', 'elfutils-libelf', 'elfutils-libs', 'libtool-ltdl'] pkgs += rhel_deps elif dist.name == 'ubuntu': ubuntu_deps = ['elfutils', 'libelf-dev', 'libtool', 'libelf1', 'librpmbuild3', 'binutils-dev'] pkgs += ubuntu_deps else: self.log.warn("Unsupported OS!") for pkg in pkgs: if not backend.check_installed(pkg): if backend.install(pkg): self.log.warn("%s installed successfully", pkg) else: self.error("Fail to install package- %s required for " "this test" % pkg) # Source: git clone git://git.debian.org/git/collab-maint/ltrace.git git.get_repo('git://git.debian.org/git/collab-maint/ltrace.git', destination_dir=os.path.join(self.srcdir, 'ltrace')) self.src_lt = os.path.join(self.srcdir, "ltrace") os.chdir(self.src_lt) process.run('./autogen.sh') process.run('./configure') build.make(self.src_lt)
def setUp(self): """ Check pre-requisites before running sensors command Testcase should be executed only on bare-metal environment. """ s_mg = SoftwareManager() d_distro = distro.detect() if d_distro.name == "Ubuntu": if not s_mg.check_installed("lm-sensors") and not s_mg.install( "lm-sensors"): self.cancel('Need sensors to run the test') elif d_distro.name == "SuSE": if not s_mg.check_installed("sensors") and not s_mg.install( "sensors"): self.cancel('Need sensors to run the test') else: if not s_mg.check_installed("lm_sensors") and not s_mg.install( "lm_sensors"): self.cancel('Need sensors to run the test') if d_distro.arch in ["ppc64", "ppc64le"]: if not cpu._list_matches(open('/proc/cpuinfo').readlines(), 'platform\t: PowerNV\n'): self.cancel( 'sensors test is applicable to bare-metal environment.') config_check = linux_modules.check_kernel_config( 'CONFIG_SENSORS_IBMPOWERNV') if config_check == 0: self.cancel('Config is not set') elif config_check == 1: if linux_modules.load_module('ibmpowernv'): if linux_modules.module_is_loaded('ibmpowernv'): self.log.info('Module Loaded Successfully') else: self.cancel('Module Loading Failed') else: self.log.info('Module is Built In') if not d_distro.name == "Ubuntu": try: process.run('service lm_sensors stop', sudo=True) process.run('service lm_sensors start', sudo=True) process.run('service lm_sensors status', sudo=True) except process.CmdError: self.error( 'Starting Service Failed. Make sure module is loaded') cmd = "yes | sudo sensors-detect" det_op = process.run(cmd, shell=True, ignore_status=True).stdout if 'no sensors were detected' in det_op: self.cancel('No sensors found to test !')
def setUp(self): ''' Install the basic packages to support perf ''' # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() kernel_ver = platform.uname()[2] deps = ['gcc', 'make'] if 'Ubuntu' in detected_distro.name: deps.extend(['linux-tools-common', 'linux-tools-%s' % kernel_ver]) # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif detected_distro.name in ['rhel', 'SuSE', 'fedora', 'centos', 'redhat']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported\ by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) locations = ["https://github.com/rfmvh/perftool-testsuite/archive/" "master.zip"] tarball = self.fetch_asset("perftool.zip", locations=locations, expire='7d') archive.extract(tarball, self.srcdir) self.sourcedir = os.path.join(self.srcdir, 'perftool-testsuite-master')
def setUp(self): ''' Build interbench Source: http://ck.kolivas.org/apps/interbench/interbench-0.31.tar.bz2 ''' sm_manager = SoftwareManager() for pkg in ['gcc', 'patch']: if (not sm_manager.check_installed(pkg) and not sm_manager.install(pkg)): self.cancel("%s is needed for the test to be run" % pkg) disk_free_mb = (disk.freespace(self.teststmpdir) / 1024) / 1024 if memory.memtotal()/1024 > disk_free_mb: self.cancel('Disk space is less than total memory. Skipping test') tarball = self.fetch_asset('http://slackware.cs.utah.edu/pub/kernel' '.org/pub/linux/kernel/people/ck/apps/' 'interbench/interbench-0.31.tar.gz') data_dir = os.path.abspath(self.datadir) archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.srcdir, version) # Patch for make file os.chdir(self.sourcedir) makefile_patch = 'patch -p1 < %s ' % ( os.path.join(data_dir, 'makefile_fix.patch')) process.run(makefile_patch, shell=True) build.make(self.sourcedir)
def setUp(self): """ Build 'fio and ezfio'. """ self.disk = self.params.get('disk', default='/dev/nvme0n1') cmd = 'ls %s' % self.disk if process.system(cmd, ignore_status=True) is not 0: self.cancel("%s does not exist" % self.disk) fio_path = os.path.join(self.teststmpdir, 'fio') fio_link = 'https://github.com/axboe/fio.git' git.get_repo(fio_link, destination_dir=fio_path) build.make(fio_path, make='./configure') build.make(fio_path) build.make(fio_path, extra_args='install') self.ezfio_path = os.path.join(self.teststmpdir, 'ezfio') ezfio_link = 'https://github.com/earlephilhower/ezfio.git' git.get_repo(ezfio_link, destination_dir=self.ezfio_path) self.utilization = self.params.get('utilization', default='100') # aio-max-nr is 65536 by default, and test fails if QD is 256 or above genio.write_file("/proc/sys/fs/aio-max-nr", "1048576") smm = SoftwareManager() # Not a package that must be installed, so not skipping. if not smm.check_installed("sdparm") and not smm.install("sdparm"): self.log.debug("Can not install sdparm") self.cwd = os.getcwd()
def setUp(self): ''' check the availability of perftest package installed perftest package should be installed ''' sm = SoftwareManager() depends = ["openssh-clients", "perftest"] for pkg in depends: if not sm.check_installed(pkg) and not sm.install(pkg): self.skip("%s package is need to test" % pkg) interfaces = netifaces.interfaces() self.flag = self.params.get("ext_flag", default="0") self.IF = self.params.get("Iface", default="") self.PEER_IP = self.params.get("PEERIP", default="") if self.IF not in interfaces: self.skip("%s interface is not available" % self.IF) if self.PEER_IP == "": self.skip("%s peer machine is not available" % self.PEER_IP) self.CA = self.params.get("CA_NAME", default="mlx4_0") self.PORT = self.params.get("PORT_NUM", default="1") self.PEER_CA = self.params.get("PEERCA", default="mlx4_0") self.PEER_PORT = self.params.get("PEERPORT", default="1") self.to = self.params.get("timeout", default="600") self.tool_name = self.params.get("tool", default="") if self.tool_name == "": self.skip("should specify tool name") self.log.info("test with %s" % (self.tool_name)) self.test_op = self.params.get("test_opt", default="").split(",") self.ext_test_op = self.params.get("ext_opt", default="").split(",")
def setUp(self): ''' Build FileBench Source: https://github.com/filebench/filebench/releases/download/1.5-alpha3/filebench-1.5-alpha3.tar.gz ''' # Check for basic utilities smm = SoftwareManager() deps = ['libtool', 'automake', 'autoconf', 'bison', 'gcc', 'flex'] for package in deps: if not smm.check_installed(package) and not smm.install(package): self.error(package + ' is needed for the test to be run') self._testfile = self.params.get('testfile', default='fileserver.f') tarball = self.fetch_asset('https://github.com/filebench/' 'filebench/releases/ownload/1.5-alpha3/' 'filebench-1.5-alpha3.tar.gz', expire='7d') archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.srcdir = os.path.join(self.srcdir, version) os.chdir(self.srcdir) process.run('./configure', shell=True, sudo=True) build.make(self.srcdir) build.make(self.srcdir, extra_args='install') # Setup test file t_dir = '/usr/local/share/filebench/workloads/' shutil.copyfile(os.path.join(t_dir, self._testfile), os.path.join(self.srcdir, self._testfile))
def setUp(self): ''' Build Stutter Test Source: https://github.com/gaowanlong/stutter/archive/master.zip ''' # Check for basic utilities smm = SoftwareManager() if not smm.check_installed("gcc") and not smm.install("gcc"): self.error('Gcc is needed for the test to be run') locations = ["https://github.com/gaowanlong/stutter/archive/" "master.zip"] tarball = self.fetch_asset("stutter.zip", locations=locations, expire='7d') archive.extract(tarball, self.srcdir) self.srcdir = os.path.join(self.srcdir, 'stutter-master') mem_byte = str(memory.memtotal()) print mem_byte self._memory = self.params.get('memory', default=mem_byte) self._iteration = self.params.get('iteration', default='10') self._logdir = self.params.get('logdir', default='/var/tmp/logdir') self._rundir = self.params.get('rundir', default='/tmp') process.run('mkdir -p %s' % self._logdir) # export env variable, used by test script os.environ['MEMTOTAL_BYTES'] = self._memory os.environ['ITERATIONS'] = self._iteration os.environ['LOGDIR_RESULTS'] = self._logdir os.environ['TESTDISK_DIR'] = self._rundir build.make(self.srcdir)
def setUp(self): self.tlbflush_max_entries = self.params.get('entries', default=200) self.tlbflush_iteration = self.params.get('iterations', default=50) self.nr_threads = self.params.get('nr_threads', default=50) # Check for basic utilities smm = SoftwareManager() if not smm.check_installed("gcc") and not smm.install("gcc"): self.error( "Fail to install %s required for this test." % package) data_dir = os.path.abspath(self.datadir) shutil.copyfile(os.path.join(data_dir, 'tlbflush.c'), os.path.join(self.srcdir, 'tlbflush.c')) os.chdir(self.srcdir) os.system('cp tlbflush.c /root/pp/tlbflush.c') tlbflush_patch = 'patch -p1 < %s' % ( os.path.join(data_dir, 'tlbflush.patch')) process.run(tlbflush_patch, shell=True) cmd = 'gcc -DFILE_SIZE=$((128*1048576)) -g -O2 tlbflush.c \ -lpthread -o tlbflush' process.run(cmd, shell=True)
def setUp(self): self.test_file = self.params.get('tmp_file', default='/tmp/dummy') self.duration = self.params.get('duration', default='30') self.threads = self.params.get( 'threads', default=cpu.online_cpus_count()) self.size = self.params.get( 'memory_to_test', default=int(0.9 * memory.meminfo.MemFree.m)) smm = SoftwareManager() for package in ['gcc', 'libtool', 'autoconf', 'automake', 'make']: if not smm.check_installed(package) and not smm.install(package): self.cancel("Failed to install %s, which is needed for" "the test to be run" % package) if not os.path.exists(self.test_file): try: os.mknod(self.test_file) except OSError: self.cancel("Skipping test since test file creation failed") loc = ["https://github.com/stressapptest/" "stressapptest/archive/master.zip"] tarball = self.fetch_asset("stressapp.zip", locations=loc, expire='7d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, 'stressapptest-master') os.chdir(self.sourcedir) process.run('./configure', shell=True) build.make(self.sourcedir)
def setUp(self): ''' Build Bcc Test Source: https://github.com/iovisor/bcc ''' # Check for basic utilities detected_distro = distro.detect().name.lower() smm = SoftwareManager() # TODO: Add support for other distributions if not detected_distro == "ubuntu": self.cancel("Upsupported OS %s" % detected_distro) for package in ['bison', 'build-essential', 'cmake', 'flex', 'libedit-dev', 'libllvm3.8', 'llvm-3.8-dev', 'libclang-3.8-dev', 'python', 'zlib1g-dev', 'libelf-dev', 'clang-format-3.8', 'python-netaddr', 'python-pyroute2', 'arping', 'iperf', 'netperf', 'ethtool']: if not smm.check_installed(package) and not smm.install(package): self.cancel("Failed to install %s, which is needed for" "the test to be run" % package) locations = ["https://github.com/iovisor/bcc/archive/master.zip"] tarball = self.fetch_asset("bcc.zip", locations=locations, expire='7d') archive.extract(tarball, self.srcdir) self.sourcedir = os.path.join(self.srcdir, 'bcc-master') os.makedirs('%s/build' % self.sourcedir) self.builddir = '%s/build' % self.sourcedir os.chdir(self.builddir) process.run('cmake .. -DCMAKE_INSTALL_PREFIX=/usr', shell=True) build.make(self.builddir)
def setUp(self): ''' Build Connectathon Source: git://git.linux-nfs.org/projects/steved/cthon04.git ''' self.nfail = 0 # Check for root permission if os.geteuid() != 0: exit("You need to have root privileges to run this script." "\nPlease try again, using 'sudo'. Exiting.") # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() packages = ['gcc', 'make'] if detected_distro.name == "SuSE": packages.extend(['git-core']) else: packages.extend(['git']) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.error("Fail to install %s required for this test." % package) self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) data_dir = os.path.abspath(self.datadir) git.get_repo('git://git.linux-nfs.org/projects/steved/cthon04.git', destination_dir=self.srcdir) os.chdir(self.srcdir) build.make(self.srcdir)
def setUp(self): ''' Build linsched Test Source: https://github.com/thejinxters/linux-scheduler-testing ''' # Check for basic utilities smm = SoftwareManager() deps = ['gcc', 'make', 'patch'] if distro.detect().name == "SuSE": deps.append('git-core') else: deps.append('git') for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel( "Fail to install %s required for this test." % package) self.args = self.params.get('args', default='pi 100') git.get_repo('https://github.com/thejinxters/linux-scheduler-testing', destination_dir=self.srcdir) os.chdir(self.srcdir) fix_patch = 'patch -p1 < %s' % ( os.path.join(self.datadir, 'fix.patch')) process.run(fix_patch, shell=True, ignore_status=True) build.make(self.srcdir)
def setUp(self): """ Download 'nvme-cli'. """ self.device = self.params.get('device', default='/dev/nvme0') self.disk = self.params.get('disk', default='/dev/nvme0n1') cmd = 'ls %s' % self.device if process.system(cmd, ignore_status=True) is not 0: self.skip("%s does not exist" % self.device) smm = SoftwareManager() if not smm.check_installed("nvme-cli") and not \ smm.install("nvme-cli"): self.skip('nvme-cli is needed for the test to be run') python_packages = pip.get_installed_distributions() python_packages_list = [i.key for i in python_packages] python_pkgs = ['nose', 'nose2', 'pep8', 'flake8', 'pylint', 'epydoc'] for py_pkg in python_pkgs: if py_pkg not in python_packages_list: self.skip("python package %s not installed" % py_pkg) url = 'https://codeload.github.com/linux-nvme/nvme-cli/zip/master' tarball = self.fetch_asset("nvme-cli-master.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) self.nvme_dir = os.path.join(self.teststmpdir, "nvme-cli-master") print os.listdir(self.nvme_dir) os.chdir(os.path.join(self.nvme_dir, 'tests')) msg = ['{'] msg.append(' \"controller\": \"%s\",' % self.device) msg.append(' \"ns1\": \"%s\",' % self.disk) msg.append(' \"log_dir\": \"%s\"' % self.outputdir) msg.append('}') with open('config.json', 'w') as config_file: config_file.write("\n".join(msg)) process.system("cat config.json")
def setUp(self): ''' Build IOZone Source: http://www.iozone.org/src/current/iozone3_434.tar ''' self.base_dir = os.path.abspath(self.basedir) smm = SoftwareManager() for package in ['gcc', 'make', 'patch']: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) tarball = self.fetch_asset( 'http://www.iozone.org/src/current/iozone3_434.tar') archive.extract(tarball, self.teststmpdir) version = os.path.basename(tarball.split('.tar')[0]) self.sourcedir = os.path.join(self.teststmpdir, version) make_dir = os.path.join(self.sourcedir, 'src', 'current') os.chdir(make_dir) patch = self.params.get('patch', default='makefile.patch') patch = os.path.join(self.datadir, patch) process.run('patch -p3 < %s' % patch, shell=True) d_distro = distro.detect() arch = d_distro.arch if arch == 'ppc': build.make(make_dir, extra_args='linux-powerpc') elif arch == 'ppc64' or arch == 'ppc64le': build.make(make_dir, extra_args='linux-powerpc64') elif arch == 'x86_64': build.make(make_dir, extra_args='linux-AMD64') else: build.make(make_dir, extra_args='linux')
def setUp(self): """ Build xfstest Source: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git """ sm = SoftwareManager() packages = ['xfslibs-dev', 'uuid-dev', 'libtool-bin', 'e2fsprogs', 'automake', 'gcc', 'libuuid1', 'quota', 'attr', 'libattr1-dev', 'make', 'libacl1-dev', 'xfsprogs', 'libgdbm-dev', 'gawk', 'fio', 'dbench', 'uuid-runtime'] for package in packages: if not sm.check_installed(package) and not sm.install(package): self.error("Fail to install %s required for this test." % package) self.test_range = self.params.get('test_range', default=None) if self.test_range is None: self.fail('Please provide a test_range.') self.skip_dangerous = self.params.get('skip_dangerous', default=True) git.get_repo('git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git', destination_dir=self.srcdir) data_dir = os.path.abspath(self.datadir) shutil.copyfile(os.path.join(data_dir, 'group'), os.path.join(self.srcdir, 'group')) build.make(self.srcdir) self.available_tests = self._get_available_tests() self.test_list = self._create_test_list() self.log.info("Tests available in srcdir: %s", ", ".join(self.available_tests)) process.run('useradd fsgqa', sudo=True) process.run('useradd 123456-fsgqa', sudo=True)
def setUp(self): ''' Build ebizzy Source: http://liquidtelecom.dl.sourceforge.net/project/ebizzy/ebizzy/0.3 /ebizzy-0.3.tar.gz ''' sm = SoftwareManager() if not sm.check_installed("gcc") and not sm.install("gcc"): self.error("Gcc is needed for the test to be run") tarball = self.fetch_asset('http://liquidtelecom.dl.sourceforge.net' '/project/ebizzy/ebizzy/0.3' '/ebizzy-0.3.tar.gz') data_dir = os.path.abspath(self.datadir) archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.srcdir = os.path.join(self.srcdir, version) patch = self.params.get( 'patch', default='Fix-build-issues-with-ebizzy.patch') os.chdir(self.srcdir) p1 = 'patch -p0 < %s/%s' % (data_dir, patch) process.run(p1, shell=True) process.run('[ -x configure ] && ./configure', shell=True) build.make(self.srcdir)
def setUp(self): ''' Build Rmaptest Source: https://www.kernel.org/pub/linux/kernel/people/mbligh/tools/rmap-test.c ''' # Check for basic utilities smm = SoftwareManager() if not smm.check_installed("gcc") and not smm.install("gcc"): self.error('Gcc is needed for the test to be run') rmaptest = self.fetch_asset('https://www.kernel.org/pub/' 'linux/kernel/people/mbligh/' 'tools/rmap-test.c', expire='7d') shutil.copyfile(rmaptest, os.path.join(self.workdir, 'rmap-test.c')) os.chdir(self.workdir) if 'CC' in os.environ: cc = '$CC' else: cc = 'cc' process.system('%s -Wall -o rmaptest rmap-test.c' % cc, ignore_status=True)
def setUp(self): ''' Build numatop Test Source: https://github.com/01org/numatop.git ''' # Check for basic utilities # TODO: Add support for other distributions self.numa_pid = None detected_distro = distro.detect().name.lower() if not detected_distro == "ubuntu": self.cancel("Upsupported OS %s" % detected_distro) smm = SoftwareManager() for package in ['gcc', 'numatop', 'make', 'libnuma-dev']: if not smm.check_installed(package) and not smm.install(package): self.cancel("Failed to install %s, which is needed for" "the test to be run" % package) locations = ["https://github.com/01org/numatop/archive/master.zip"] tarball = self.fetch_asset("numatop.zip", locations=locations, expire='7d') archive.extract(tarball, self.srcdir) self.sourcedir = os.path.join(self.srcdir, 'numatop-master') os.chdir(self.sourcedir) build.make(self.sourcedir, extra_args='test')
def setUp(self): smm = SoftwareManager() self.minthreads = self.params.get( 'minthrd', default=(500 + cpu.online_cpus_count())) self.maxthreads = self.params.get('maxthrd', default=None) self.iothreads = self.params.get('iothrd', default=self.minthreads/2) self.maxmem = self.params.get('maxmem', default=int( memory.meminfo.MemFree.m / self.minthreads)) self.maxio = self.params.get('maxio', default=None) self.longthreads = self.params.get('longthrd', default=False) self.shrtthreads = self.params.get('shortthrd', default=False) self.time = self.params.get('time', default=100) self.iotime = self.params.get('iotime', default=50) if self.longthreads and self.shrtthreads: self.cancel('Please choose right inputs') dist = distro.detect() packages = ['gcc'] if dist.name == 'Ubuntu': packages.extend(['g++']) elif dist.name in ['SuSE', 'fedora', 'rhel']: packages.extend(['gcc-c++']) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) for file_name in ['dwh.cpp', 'Makefile']: self.copyutil(file_name) os.chdir(self.teststmpdir) if dist.name in ['fedora', 'rhel']: process.system('patch -p0 < %s' % self.get_data('fofd.patch'), shell=True) build.make(self.teststmpdir)
def setUp(self): ''' Build Hackbench Source: http://people.redhat.com/~mingo/cfs-scheduler/tools/hackbench.c ''' self._threshold_time = self.params.get('time_val', default=None) self._num_groups = self.params.get('num_groups', default=90) self._iterations = self.params.get('iterations', default=1) self.results = None sm = SoftwareManager() if not sm.check_installed("gcc") and not sm.install("gcc"): self.cancel("Gcc is needed for the test to be run") hackbench = self.fetch_asset('http://people.redhat.com' '/~mingo/cfs-scheduler/' 'tools/hackbench.c') shutil.copyfile(hackbench, os.path.join(self.workdir, 'hackbench.c')) os.chdir(self.workdir) if 'CC' in os.environ: cc = '$CC' else: cc = 'cc' process.system('%s hackbench.c -o hackbench -lpthread' % cc)
def setUp(self): if not memory.check_hotplug(): self.cancel("UnSupported : memory hotplug not enabled\n") sm = SoftwareManager() if not sm.check_installed('stress') and not sm.install('stress'): tarball = self.fetch_asset( 'https://people.seas.harvard.edu/~apw/stress/stress-1.0.4.tar.gz') archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join( self.teststmpdir, os.path.basename(tarball.split('.tar.')[0])) os.chdir(self.sourcedir) process.run('[ -x configure ] && ./configure', shell=True) build.make(self.sourcedir) build.make(self.sourcedir, extra_args='install') self.iteration = self.params.get('iteration', default=1) self.stresstime = self.params.get('stresstime', default=10) self.vmcount = self.params.get('vmcount', default=4) self.iocount = self.params.get('iocount', default=4) self.memratio = self.params.get('memratio', default=5) self.blocks_hotpluggable = get_hotpluggable_blocks( (os.path.join('%s', 'memory*') % mem_path), self.memratio) if os.path.exists("%s/auto_online_blocks" % mem_path): if not self.__is_auto_online(): self.hotplug_all(self.blocks_hotpluggable) clear_dmesg()
def setUp(self): ''' Build FileBench Source: https://github.com/filebench/filebench/releases/download/1.5-alpha3/filebench-1.5-alpha3.tar.gz ''' # Check for basic utilities smm = SoftwareManager() deps = ['libtool', 'automake', 'autoconf', 'bison', 'gcc', 'flex'] for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel(package + ' is needed for the test to be run') name_version = 'filebench-1.5-alpha3' tarball = self.fetch_asset('https://github.com/filebench/' 'filebench/releases/download/1.5-alpha3/' '%s.tar.gz' % name_version) archive.extract(tarball, self.workdir) self.install_prefix = os.path.join(self.workdir, 'install_prefix') build_dir = os.path.join(self.workdir, name_version) os.chdir(build_dir) process.run('./configure --prefix=%s' % self.install_prefix, shell=True) build.make(build_dir) build.make(build_dir, extra_args='install')
def setUp(self): self.tlbflush_max_entries = self.params.get('entries', default=200) self.tlbflush_iteration = self.params.get('iterations', default=50) self.nr_threads = self.params.get('nr_threads', default=50) # Check for basic utilities smm = SoftwareManager() for package in ['gcc', 'make', 'patch']: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for this test." % package) data_dir = os.path.abspath(self.datadir) shutil.copyfile(os.path.join(data_dir, 'tlbflush.c'), os.path.join(self.srcdir, 'tlbflush.c')) os.chdir(self.srcdir) tlbflush_patch = 'patch -p1 < %s' % ( os.path.join(data_dir, 'tlbflush.patch')) process.run(tlbflush_patch, shell=True) cmd = 'gcc -DFILE_SIZE=$((128*1048576)) -g -O2 tlbflush.c \ -lpthread -o tlbflush' process.run(cmd, shell=True)
def setUp(self): """ Build 'fio'. """ default_url = "http://brick.kernel.dk/snaps/fio-2.1.10.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default=self.srcdir) fstype = self.params.get('fs', default='ext4') tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) fio_version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.teststmpdir, fio_version) build.make(self.sourcedir) smm = SoftwareManager() if fstype == 'btrfs': if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dir) self.part_obj.mount()
def setUp(self): ''' Install the basic packages to support perf ''' # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() self.distro_name = detected_distro.name deps = ['gcc', 'make'] if 'Ubuntu' in self.distro_name: deps.extend(['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif self.distro_name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported\ by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) shutil.copyfile(self.get_data('uprobe.c'), os.path.join(self.teststmpdir, 'uprobe.c')) shutil.copyfile(self.get_data('Makefile'), os.path.join(self.teststmpdir, 'Makefile')) build.make(self.teststmpdir) os.chdir(self.teststmpdir) self.temp_file = tempfile.NamedTemporaryFile().name self.cmdProbe = "perf probe -x" self.recProbe = "perf record -o %s -e probe_uprobe_test:doit" % self.temp_file self.report = "perf report --input=%s" % self.temp_file self.distro_version = detected_distro.version
def setUp(self): ''' Build VA Test ''' # Check for basic utilities smm = SoftwareManager() self.scenario_arg = int(self.params.get('scenario_arg', default=1)) dic = {2: 1024, 3: 1024, 4: 131072, 5: 1, 6: 1, 7: 2} if self.scenario_arg not in range(1, 7): self.cancel("Test need to skip as scenario will be 1-7") if self.scenario_arg in [2, 3, 4]: if memory.meminfo.Hugepagesize.mb != 16: self.cancel( "Test need to skip as 16MB huge need to configured") elif self.scenario_arg in [5, 6, 7]: if memory.meminfo.Hugepagesize.gb != 16: self.cancel( "Test need to skip as 16GB huge need to configured") if self.scenario_arg != 1: memory.set_num_huge_pages(dic[self.scenario_arg]) for packages in ['gcc', 'make']: if not smm.check_installed(packages) and not smm.install(packages): self.cancle('%s is needed for the test to be run' % packages) shutil.copyfile(os.path.join(self.datadir, 'va_test.c'), os.path.join(self.teststmpdir, 'va_test.c')) shutil.copyfile(os.path.join(self.datadir, 'Makefile'), os.path.join(self.teststmpdir, 'Makefile')) build.make(self.teststmpdir)
def setUp(self): ''' Use 85% of memory with/without many process forked WARNING: System may go out-of-memory based on the available resource ''' smm = SoftwareManager() self.itern = int(self.params.get('iterations', default='10')) self.procs = int(self.params.get('procs', default='1')) self.minmem = int(self.params.get('minmem', default='10')) self.fails = [] if not (self.itern and self.procs and self.minmem): self.cancel( 'Please use a non-zero value for number' ' of iterations, processes and memory to be used') self.freemem = int((0.85 * memory.freememtotal()) / 1024) # Check for basic utilities for packages in ['gcc', 'make']: if not smm.check_installed(packages) and not smm.install(packages): self.cancel('%s is needed for the test to be run' % packages) shutil.copyfile(self.get_data('forkoff.c'), os.path.join(self.teststmpdir, 'forkoff.c')) shutil.copyfile(self.get_data('Makefile'), os.path.join(self.teststmpdir, 'Makefile')) build.make(self.teststmpdir)
def setUp(self): """ Checking if the required packages are installed, if not found specific packages will be installed. """ smm = SoftwareManager() if not smm.check_installed("lsscsi") and not smm.install("lsscsi"): self.cancel("Unable to install lsscsi") self.crtl_no = self.params.get('crtl_no') self.channel_no = self.params.get('channel_no') self.disk_no = self.params.get('disk_no', default="").split(",") self.pci_id = self.params.get('pci_id', default="").split(",") self.logicaldrive = self.params.get('logicaldrive') self.diskoverwrite = self.params.get('diskoverwrite') self.fs_type = self.params.get('fs_type') self.mount_point = self.params.get('mount_point') self.http_path = self.params.get('http_path') self.tool_name = self.params.get('tool_name') # Gets the list of PCIIDs on the system cmd = r'for device in $(lspci | awk \'{print $1}\') ; do echo \ $(lspci -vmm -nn -s $device | grep "\[" | awk \'{print $NF}\' \ | sed -e "s/\]//g" | sed -e "s/\[//g" | tr \'\\n\' \' \' \ | awk \'{print $2,$3,$4,$5}\') ; done' pci_id = self.cmdop_list(cmd) pci_id_formatted = [] for i in pci_id.splitlines(): pci_id_formatted.append(str(i.replace(" ", ":"))) # check if all the yaml parameters are entered if self.crtl_no is '' or self.channel_no is '' or self.disk_no is \ '' or self.pci_id is '' or len(self.disk_no) <= 1 or \ self.fs_type is '' or self.mount_point is '' or self.tool_name \ is '' or self.http_path is '': self.cancel(" Test skipped!!, please ensure yaml parameters are \ entered or disk count is more than 1") elif self.comp(self.pci_id, pci_id_formatted) == 1: self.cancel(" Test skipped!!, PMC controller not available") detected_distro = distro.detect() if not smm.check_installed("Arcconf"): if detected_distro.name == "Ubuntu": http_repo = "%s%s.deb" % (self.http_path, self.tool_name) self.repo = self.fetch_asset(http_repo, expire='10d') cmd = "dpkg -i %s" % self.repo else: http_repo = "%s%s.rpm" % (self.http_path, self.tool_name) self.repo = self.fetch_asset(http_repo, expire='10d') cmd = "rpm -ivh %s" % self.repo if process.run(cmd, shell=True) == 0: self.cancel("Unable to install arcconf") cmd = "lsscsi | grep LogicalDrv | awk \'{print $7}\'" self.os_drive = self.cmdop_list(cmd) if self.diskoverwrite == 'Y' and self.os_drive != "": # Delete a logical drive only if it is not OS drive cmd = "df -h /boot | grep %s" % self.os_drive if process.system(cmd, timeout=300, ignore_status=True, shell=True) == 0: self.cancel("Test Skipped!! OS disk requested for removal") self.log.info("Deleting the default logical drive %s" % (self.logicaldrive)) cmd = "echo y | arcconf delete %s logicaldrive %s" % \ (self.crtl_no, self.logicaldrive) self.check_pass(cmd, "Failed to delete Logical drive")
def setUp(self): ''' To check and install dependencies for the test ''' detected_distro = distro.detect() smm = SoftwareManager() depends = [] # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon if detected_distro.name == "Ubuntu": depends.extend(["openssh-client", "iputils-ping"]) elif detected_distro.name in ["rhel", "fedora", "centos", "redhat"]: depends.extend(["openssh-clients", "iputils"]) else: depends.extend(["openssh", "iputils"]) for pkg in depends: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) self.mode = self.params.get("bonding_mode", default="") if 'setup' in str(self.name) or 'run' in str(self.name): if not self.mode: self.cancel("test skipped because mode not specified") interfaces = netifaces.interfaces() self.peer_public_ip = self.params.get("peer_public_ip", default="") self.user = self.params.get("user_name", default="root") self.password = self.params.get("peer_password", '*', default="None") self.host_interfaces = self.params.get("bond_interfaces", default="").split(" ") if not self.host_interfaces: self.cancel("user should specify host interfaces") self.peer_interfaces = self.params.get("peer_interfaces", default="").split(" ") for self.host_interface in self.host_interfaces: if self.host_interface not in interfaces: self.cancel("interface is not available") self.peer_first_ipinterface = self.params.get("peer_ip", default="") if not self.peer_interfaces or self.peer_first_ipinterface == "": self.cancel("peer machine should available") self.ipaddr = self.params.get("host_ips", default="").split(" ") self.netmask = self.params.get("netmask", default="") self.localhost = LocalHost() if 'setup' in str(self.name.name): for ipaddr, interface in zip(self.ipaddr, self.host_interfaces): networkinterface = NetworkInterface(interface, self.localhost) try: networkinterface.flush_ipaddr() networkinterface.add_ipaddr(ipaddr, self.netmask) networkinterface.save(ipaddr, self.netmask) except Exception: networkinterface.save(ipaddr, self.netmask) networkinterface.bring_up() self.miimon = self.params.get("miimon", default="100") self.fail_over_mac = self.params.get("fail_over_mac", default="2") self.downdelay = self.params.get("downdelay", default="0") self.bond_name = self.params.get("bond_name", default="tempbond") self.net_path = "/sys/class/net/" self.bond_status = "/proc/net/bonding/%s" % self.bond_name self.bond_dir = os.path.join(self.net_path, self.bond_name) self.bonding_slave_file = "%s/bonding/slaves" % self.bond_dir self.bonding_masters_file = "%s/bonding_masters" % self.net_path self.peer_bond_needed = self.params.get("peer_bond_needed", default=False) self.peer_wait_time = self.params.get("peer_wait_time", default=20) self.sleep_time = int(self.params.get("sleep_time", default=10)) self.mtu = self.params.get("mtu", default=1500) for root, dirct, files in os.walk("/root/.ssh"): for file in files: if file.startswith("avocado-master-"): path = os.path.join(root, file) os.remove(path) self.ib = False if self.host_interface[0:2] == 'ib': self.ib = True self.log.info("Bond Test on IB Interface? = %s", self.ib) ''' An individual interface, that has a LACP PF, cannot communicate without being bonded. So the test uses the public ip address to create an SSH session instead of the private one when setting up a bonding interface. ''' if self.mode == "4" and "setup" in str(self.name.name): self.session = Session(self.peer_public_ip, user=self.user, password=self.password) else: self.session = Session(self.peer_first_ipinterface, user=self.user, password=self.password) if not self.session.connect(): ''' LACP bond interface takes some time to get it to ping peer after it is setup. This code block tries at most 5 times to get it to connect to the peer. ''' if self.mode == "4": connect = False for _ in range(5): if self.session.connect(): connect = True self.log.info("Was able to connect to peer.") break time.sleep(5) if not connect: self.cancel("failed connecting to peer") else: self.cancel("failed connecting to peer") self.setup_ip() self.err = [] if self.mode == "4" and "setup" in str(self.name.name): self.remotehost = RemoteHost(self.peer_public_ip, self.user, password=self.password) else: self.remotehost = RemoteHost(self.peer_first_ipinterface, self.user, password=self.password) if 'setup' in str(self.name.name): for interface in self.peer_interfaces: peer_networkinterface = NetworkInterface( interface, self.remotehost) if peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") for host_interface in self.host_interfaces: self.networkinterface = NetworkInterface( host_interface, self.localhost) if self.networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in host")
def test(self): detected_distro = distro.detect() parser = configparser.ConfigParser() parser.read(self.get_data('services.cfg')) services_list = parser.get(detected_distro.name, 'services').split(',') smm = SoftwareManager() deps = [] if detected_distro.name == 'SuSE': deps.extend(['ppc64-diag', 'libvirt-daemon']) if detected_distro.version >= 15: services_list.append('firewalld') else: services_list.append('SuSEfirewall2') elif detected_distro.name == 'Ubuntu': deps.extend(['opal-prd']) if detected_distro.version >= 17: services_list.remove('networking') for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel(' %s is needed for the test to be run' % package) if 'PowerNV' in open('/proc/cpuinfo', 'r').read(): services_list.extend(['opal_errd', 'opal-prd']) if os.path.exists('/proc/device-tree/bmc'): services_list.remove('opal_errd') else: services_list.extend(['rtas_errd']) services_failed = [] runner = process.run for service in services_list: service_obj = SpecificServiceManager(service, runner) self.log.info("Checking %s service" % service) if service_obj.is_enabled() is False: self.log.info("%s service Not Found !!!" % service) services_failed.append(service) continue original_status = service_obj.status() if original_status is True: service_obj.stop() if not wait_for(lambda: not service_obj.status(), 10): self.log.info("Fail to stop %s service" % service) services_failed.append(service) continue service_obj.start() wait_for(service_obj.status, 10) else: service_obj.start() if not wait_for(service_obj.status, 10): self.log.info("Fail to start %s service" % service) services_failed.append(service) continue service_obj.stop() wait_for(lambda: not service_obj.status(), 10) if not service_obj.status() is original_status: self.log.info("Fail to restore original status of the %s" "service" % service) services_failed.append(service) if services_failed: self.fail("List of services failed: %s" % services_failed) else: self.log.info("All Services Passed the ON/OFF test")
def build_htx(self): """ Build 'HTX' """ packages = ['git', 'gcc', 'make'] detected_distro = distro.detect() if detected_distro.name in ['centos', 'fedora', 'rhel', 'redhat']: packages.extend(['gcc-c++', 'ncurses-devel', 'tar']) elif detected_distro.name == "Ubuntu": packages.extend( ['libncurses5', 'g++', 'ncurses-dev', 'libncurses-dev', 'tar']) elif detected_distro.name == 'SuSE': packages.extend(['libncurses5', 'gcc-c++', 'ncurses-devel', 'tar']) else: self.cancel("Test not supported in %s" % detected_distro.name) smm = SoftwareManager() for pkg in packages: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Can not install %s" % pkg) cmd = "ssh %s@%s \"%s install %s\"" % ( self.peer_user, self.peer_ip, smm.backend.base_command, pkg) if process.system(cmd, shell=True, ignore_status=True) != 0: self.cancel( "unable to install the package %s on peer machine " % pkg) if self.htx_url: htx = self.htx_url.split("/")[-1] htx_rpm = self.fetch_asset(self.htx_url) process.system("rpm -ivh --force %s" % htx_rpm) self.run_command("wget %s -O /tmp/%s" % (self.htx_url, htx)) self.run_command("cd /tmp") self.run_command("rpm -ivh --force %s" % htx) else: url = "https://github.com/open-power/HTX/archive/master.zip" tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) htx_path = os.path.join(self.teststmpdir, "HTX-master") os.chdir(htx_path) exercisers = ["hxecapi_afu_dir", "hxecapi", "hxeocapi"] if not smm.check_installed('dapl-devel'): exercisers.append("hxedapl") for exerciser in exercisers: process.run("sed -i 's/%s//g' %s/bin/Makefile" % (exerciser, htx_path)) build.make(htx_path, extra_args='all') build.make(htx_path, extra_args='tar') process.run('tar --touch -xvzf htx_package.tar.gz') os.chdir('htx_package') if process.system('./installer.sh -f'): self.fail("Installation of htx fails:please refer job.log") try: self.run_command("wget %s -O /tmp/master.zip" % url) self.run_command("cd /tmp") self.run_command("unzip master.zip") self.run_command("cd HTX-master") for exerciser in exercisers: self.run_command("sed -i 's/%s//g' bin/Makefile" % exerciser) self.run_command("make all") self.run_command("make tar") self.run_command("tar --touch -xvzf htx_package.tar.gz") self.run_command("cd htx_package") self.run_command("./installer.sh -f") except CommandFailed: self.cancel("HTX is not installed on Peer")
def setUp(self): """ Build xfstest Source: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git """ self.use_dd = False root_fs = process.system_output( "df -T / | awk 'END {print $2}'", shell=True).decode("utf-8") if root_fs in ['ext3', 'ext4']: self.use_dd = True sm = SoftwareManager() self.detected_distro = distro.detect() packages = ['e2fsprogs', 'automake', 'gcc', 'quota', 'attr', 'make', 'xfsprogs', 'gawk'] if 'Ubuntu' in self.detected_distro.name: packages.extend( ['xfslibs-dev', 'uuid-dev', 'libuuid1', 'libattr1-dev', 'libacl1-dev', 'libgdbm-dev', 'uuid-runtime', 'libaio-dev', 'fio', 'dbench', 'btrfs-tools']) if '14' in self.detected_distro.version: packages.extend(['libtool']) elif '18' in self.detected_distro.version: packages.extend(['libtool-bin', 'libgdbm-compat-dev']) else: packages.extend(['libtool-bin']) # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif self.detected_distro.name in ['centos', 'fedora', 'rhel', 'SuSE']: packages.extend(['acl', 'bc', 'dump', 'indent', 'libtool', 'lvm2', 'xfsdump', 'psmisc', 'sed', 'libacl-devel', 'libattr-devel', 'libaio-devel', 'libuuid-devel', 'openssl-devel', 'xfsprogs-devel']) if self.detected_distro.name == 'SuSE': packages.extend(['libbtrfs-devel', 'libcap-progs']) else: packages.extend(['btrfs-progs-devel']) if self.detected_distro.name == 'rhel' and\ self.detected_distro.version.startswith('8'): packages.remove('indent') packages.remove('btrfs-progs-devel') if self.detected_distro.name in ['centos', 'fedora']: packages.extend(['fio', 'dbench']) else: self.cancel("test not supported in %s" % self.detected_distro.name) for package in packages: if not sm.check_installed(package) and not sm.install(package): self.cancel("Fail to install %s required for this test." % package) self.skip_dangerous = self.params.get('skip_dangerous', default=True) self.test_range = self.params.get('test_range', default=None) self.scratch_mnt = self.params.get( 'scratch_mnt', default='/mnt/scratch') self.test_mnt = self.params.get('test_mnt', default='/mnt/test') self.disk_mnt = self.params.get('disk_mnt', default='/mnt/loop_device') self.dev_type = self.params.get('type', default='loop') self.fs_to_test = self.params.get('fs', default='ext4') if process.system('which mkfs.%s' % self.fs_to_test, ignore_status=True): self.cancel('Unknown filesystem %s' % self.fs_to_test) mount = True self.devices = [] self.log_devices = [] shutil.copyfile(self.get_data('local.config'), os.path.join(self.teststmpdir, 'local.config')) shutil.copyfile(self.get_data('group'), os.path.join(self.teststmpdir, 'group')) if self.dev_type == 'loop': base_disk = self.params.get('disk', default=None) loop_size = self.params.get('loop_size', default='7GiB') if not base_disk: # Using root for file creation by default check = (int(loop_size.split('GiB')[0]) * 2) + 1 if disk.freespace('/') / 1073741824 > check: self.disk_mnt = '' mount = False else: self.cancel('Need %s GB to create loop devices' % check) self._create_loop_device(base_disk, loop_size, mount) else: self.test_dev = self.params.get('disk_test', default=None) self.scratch_dev = self.params.get('disk_scratch', default=None) self.devices.extend([self.test_dev, self.scratch_dev]) # mkfs for devices if self.devices: cfg_file = os.path.join(self.teststmpdir, 'local.config') self.mkfs_opt = self.params.get('mkfs_opt', default='') self.mount_opt = self.params.get('mount_opt', default='') self.log_test = self.params.get('log_test', default='') self.log_scratch = self.params.get('log_scratch', default='') with open(cfg_file, "r") as sources: lines = sources.readlines() with open(cfg_file, "w") as sources: for line in lines: if line.startswith('export TEST_DEV'): sources.write( re.sub(r'export TEST_DEV=.*', 'export TEST_DEV=%s' % self.devices[0], line)) elif line.startswith('export TEST_DIR'): sources.write( re.sub(r'export TEST_DIR=.*', 'export TEST_DIR=%s' % self.test_mnt, line)) elif line.startswith('export SCRATCH_DEV'): sources.write(re.sub( r'export SCRATCH_DEV=.*', 'export SCRATCH_DEV=%s' % self.devices[1], line)) elif line.startswith('export SCRATCH_MNT'): sources.write( re.sub( r'export SCRATCH_MNT=.*', 'export SCRATCH_MNT=%s' % self.scratch_mnt, line)) break with open(cfg_file, "a") as sources: if self.log_test: sources.write('export TEST_LOGDEV="%s"\n' % self.log_test) self.log_devices.append(self.log_test) if self.log_scratch: sources.write('export SCRATCH_LOGDEV="%s"\n' % self.log_scratch) self.log_devices.append(self.log_scratch) if self.mkfs_opt: sources.write('MKFS_OPTIONS="%s"\n' % self.mkfs_opt) if self.mount_opt: sources.write('MOUNT_OPTIONS="%s"\n' % self.mount_opt) self.logdev_opt = self.params.get('logdev_opt', default='') for dev in self.log_devices: dev_obj = partition.Partition(dev) dev_obj.mkfs(fstype=self.fs_to_test, args=self.mkfs_opt) for ite, dev in enumerate(self.devices): dev_obj = partition.Partition(dev) if self.logdev_opt: dev_obj.mkfs(fstype=self.fs_to_test, args='%s %s=%s' % (self.mkfs_opt, self.logdev_opt, self.log_devices[ite])) else: dev_obj.mkfs(fstype=self.fs_to_test, args=self.mkfs_opt) git.get_repo('git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git', destination_dir=self.teststmpdir) build.make(self.teststmpdir) self.available_tests = self._get_available_tests() self.test_list = self._create_test_list(self.test_range) self.log.info("Tests available in srcdir: %s", ", ".join(self.available_tests)) if not self.test_range: self.exclude = self.params.get('exclude', default=None) self.gen_exclude = self.params.get('gen_exclude', default=None) self.share_exclude = self.params.get('share_exclude', default=None) if self.exclude or self.gen_exclude or self.share_exclude: self.exclude_file = os.path.join(self.teststmpdir, 'exclude') if self.exclude: self._create_test_list(self.exclude, self.fs_to_test, dangerous=False) if self.gen_exclude: self._create_test_list(self.gen_exclude, "generic", dangerous=False) if self.share_exclude: self._create_test_list(self.share_exclude, "shared", dangerous=False) if self.detected_distro.name is not 'SuSE': process.run('useradd 123456-fsgqa', sudo=True) process.run('useradd fsgqa', sudo=True) else: process.run('useradd -m -U fsgqa', sudo=True) process.run('groupadd sys', sudo=True) if not os.path.exists(self.scratch_mnt): os.makedirs(self.scratch_mnt) if not os.path.exists(self.test_mnt): os.makedirs(self.test_mnt)
def setUp(self): """ Set up. """ self.iface = self.params.get("interface", default="") self.count = self.params.get("count", default="500") self.nping_count = self.params.get("nping_count", default="") self.peer_ip = self.params.get("peer_ip", default="") self.peer_public_ip = self.params.get("peer_public_ip", default="") self.drop = self.params.get("drop_accepted", default="10") self.host_ip = self.params.get("host_ip", default="") self.option = self.params.get("option", default='') # Check if interface exists in the system interfaces = netifaces.interfaces() if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) if not self.peer_ip: self.cancel("peer ip should specify in input") self.ipaddr = self.params.get("host_ip", default="") self.netmask = self.params.get("netmask", default="") localhost = LocalHost() self.networkinterface = NetworkInterface(self.iface, localhost) try: self.networkinterface.add_ipaddr(self.ipaddr, self.netmask) self.networkinterface.save(self.ipaddr, self.netmask) except Exception: self.networkinterface.save(self.ipaddr, self.netmask) self.networkinterface.bring_up() if not wait.wait_for(self.networkinterface.is_link_up, timeout=120): self.cancel( "Link up of interface is taking longer than 120 seconds") self.peer_user = self.params.get("peer_user", default="root") self.peer_password = self.params.get("peer_password", '*', default="None") self.mtu = self.params.get("mtu", default=1500) self.remotehost = RemoteHost(self.peer_ip, self.peer_user, password=self.peer_password) self.peer_interface = self.remotehost.get_interface_by_ipaddr( self.peer_ip).name self.peer_networkinterface = NetworkInterface(self.peer_interface, self.remotehost) self.remotehost_public = RemoteHost(self.peer_public_ip, self.peer_user, password=self.peer_password) self.peer_public_networkinterface = NetworkInterface( self.peer_interface, self.remotehost_public) if self.peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") if self.networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in host") # Install needed packages smm = SoftwareManager() detected_distro = distro.detect() pkgs = ['tcpdump', 'flex', 'bison', 'gcc', 'gcc-c++', 'nmap'] for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package Can not install" % pkg) if detected_distro.name == "SuSE": self.nmap = os.path.join(self.teststmpdir, 'nmap') nmap_download = self.params.get("nmap_download", default="https:" "//nmap.org/dist/" "nmap-7.80.tar.bz2") tarball = self.fetch_asset(nmap_download) self.version = os.path.basename(tarball.split('.tar')[0]) self.n_map = os.path.join(self.nmap, self.version) archive.extract(tarball, self.nmap) os.chdir(self.n_map) process.system('./configure ppc64le', shell=True) build.make(self.n_map) process.system('./nping/nping -h', shell=True)
class NdctlTest(Test): """ Ndctl user space tooling for Linux, which handles NVDIMM devices. """ def get_default_region(self): """ Get the largest region if not provided """ self.plib.enable_region() region = self.params.get('region', default=None) if region: return region regions = self.plib.run_ndctl_list('-R') regions = sorted(regions, key=lambda i: i['size'], reverse=True) return self.plib.run_ndctl_list_val(regions[0], 'dev') def get_unsupported_alignval(self): """ Return unsupported size align based on platform """ align = self.get_size_alignval() if align == self.align_val['hash']: return self.align_val['radix'] else: return self.align_val['hash'] def get_size_alignval(self): """ Return the size align restriction based on platform """ self.align_val = {'hash': 16777216, 'radix': 2097152} if 'Hash' in genio.read_file('/proc/cpuinfo').rstrip('\t\r\n\0'): def_align = self.align_val['hash'] else: def_align = self.align_val['radix'] return def_align def build_fio(self): """ Install fio or build if not possible """ pkg = "fio" if process.system("which %s" % pkg, ignore_status=True): if not self.smm.check_installed(pkg) \ and not self.smm.install(pkg): for package in ["autoconf", "libtool", "make"]: if not self.smm.check_installed(package) \ and not self.smm.install(package): self.cancel( "Fail to install %s required for this test." "" % package) tarball = self.fetch_asset( "http://brick.kernel.dk/snaps/fio-2.1.10.tar.gz") archive.extract(tarball, self.teststmpdir) fio_version = os.path.basename(tarball.split('.tar.')[0]) sourcedir = os.path.join(self.teststmpdir, fio_version) build.make(sourcedir) return os.path.join(sourcedir, "fio") return pkg def setUp(self): """ Build 'ndctl' and setup the binary. """ deps = [] self.dist = distro.detect() self.package = self.params.get('package', default='upstream') self.preserve_setup = self.params.get('preserve_change', default=False) self.mode_to_use = self.params.get('modes', default='fsdax') if self.dist.name not in ['SuSE', 'rhel']: self.cancel('Unsupported OS %s' % self.dist.name) # DAX wont work with reflink, disabling here self.reflink = '-m reflink=0' self.smm = SoftwareManager() if self.package == 'upstream': deps.extend(['gcc', 'make', 'automake', 'autoconf']) if self.dist.name == 'SuSE': deps.extend([ 'libtool', 'libkmod-devel', 'libudev-devel', 'systemd-devel', 'libuuid-devel-static', 'libjson-c-devel', 'keyutils-devel', 'kmod-bash-completion', 'bash-completion-devel' ]) elif self.dist.name == 'rhel': deps.extend([ 'libtool', 'kmod-devel', 'libuuid-devel', 'json-c-devel', 'systemd-devel', 'keyutils-libs-devel', 'jq', 'parted', 'libtool' ]) for pkg in deps: if not self.smm.check_installed(pkg) and not \ self.smm.install(pkg): self.cancel('%s is needed for the test to be run' % pkg) git_branch = self.params.get('git_branch', default='pending') location = "https://github.com/pmem/ndctl/archive/" location = location + git_branch + ".zip" tarball = self.fetch_asset("ndctl.zip", locations=location, expire='7d') archive.extract(tarball, self.teststmpdir) os.chdir("%s/ndctl-%s" % (self.teststmpdir, git_branch)) process.run('./autogen.sh', sudo=True, shell=True) process.run( "./configure CFLAGS='-g -O2' --prefix=/usr " "--disable-docs " "--sysconfdir=/etc --libdir=" "/usr/lib64", shell=True, sudo=True) build.make(".") self.ndctl = os.path.abspath('./ndctl/ndctl') self.daxctl = os.path.abspath('./daxctl/daxctl') else: deps.extend(['ndctl']) if self.dist.name == 'rhel': deps.extend(['daxctl']) for pkg in deps: if not self.smm.check_installed(pkg) and not \ self.smm.install(pkg): self.cancel('%s is needed for the test to be run' % pkg) self.ndctl = 'ndctl' self.daxctl = 'daxctl' self.opt_dict = { '-B': 'provider', '-D': 'dev', '-R': 'dev', '-N': 'dev' } self.modes = ['raw', 'sector', 'fsdax', 'devdax'] self.part = None self.disk = None self.plib = pmem.PMem(self.ndctl, self.daxctl) if not self.plib.check_buses(): self.cancel("Test needs atleast one region") @avocado.fail_on(pmem.PMemException) def test_bus_ids(self): """ Test the bus id info """ vals = self.plib.run_ndctl_list('-B') if not vals: self.fail('Failed to fetch bus IDs') self.log.info('Available Bus provider IDs: %s', vals) @avocado.fail_on(pmem.PMemException) def test_dimms(self): """ Test the dimms info """ vals = self.plib.run_ndctl_list('-D') if not vals: self.fail('Failed to fetch DIMMs') self.log.info('Available DIMMs: %s', vals) @avocado.fail_on(pmem.PMemException) def test_dimm_health(self): """ Test the dimm health """ dimms = self.plib.run_ndctl_list('-DH') if not dimms: self.fail('Failed to fetch DIMMs') for dimm in dimms: health = self.plib.run_ndctl_list_val(dimm, 'health') if not health: self.cancel("kernel/ndctl does not support health reporting") if 'life_used_percentage' in health: if health['life_used_percentage'] != 0: self.fail( "DIMM has not been used, reporting says otherwise") if 'health_state' in health: if health['health_state'] != "ok": self.fail("DIMM health is bad") if 'shutdown_state' in health: if health['shutdown_state'] != "clean": self.fail("DIMM shutdown state is dirty") dim = self.plib.run_ndctl_list_val(dimm, 'dev') self.log.info("Dimm %s Health info: %s", dim, health) @avocado.fail_on(pmem.PMemException) def test_regions(self): """ Test the regions info """ self.plib.disable_region() old = self.plib.run_ndctl_list('-R') self.plib.enable_region() new = self.plib.run_ndctl_list('-R') if len(new) <= len(old): self.fail('Failed to fetch regions') self.log.info('Available regions: %s', new) @avocado.fail_on(pmem.PMemException) def test_namespace(self): """ Test namespace """ self.plib.enable_region() regions = self.plib.run_ndctl_list('-R') for val in regions: region = self.plib.run_ndctl_list_val(val, 'dev') self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region) namespaces = self.plib.run_ndctl_list('-N') self.log.info('Created namespace %s', namespaces) @avocado.fail_on(pmem.PMemException) def test_namespace_unaligned(self): """ Test namespace """ self.plib.enable_region() # Use an default unaligned pagesize and make sure it fails align_size = memory.get_page_size() size = (64 * 1024 * 1024) + align_size regions = self.plib.run_ndctl_list('-R') for val in regions: region = self.plib.run_ndctl_list_val(val, 'dev') self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) try: self.plib.create_namespace(region=region, size=size, align=align_size) except pmem.PMemException: self.log.info("Unaligned namespace creation failed" "as expected") else: self.fail("Unaligned namespace creation must have failed! ") @avocado.fail_on(pmem.PMemException) def test_disable_enable_ns(self): """ Test enable disable namespace """ region = self.get_default_region() if (not self.plib.is_region_legacy(region)): size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s' % region)[0], 'size') if size < (3 * 64 * 1024 * 1024): self.cancel('Not enough memory to create namespaces') for _ in range(0, 3): self.plib.create_namespace(region=region, size='64M') namespaces = self.plib.run_ndctl_list('-N') ns_names = [] for ns in namespaces: ns_names.append(self.plib.run_ndctl_list_val(ns, 'dev')) ns_names.append('all') for namespace in ns_names: self.plib.disable_namespace(namespace=namespace) self.plib.enable_namespace(namespace=namespace) @avocado.fail_on(pmem.PMemException) def test_namespace_modes(self): """ Create different namespace types """ failed_modes = [] region = self.get_default_region() self.log.info("Using %s for different namespace modes", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) for mode in self.modes: self.plib.create_namespace(region=region, mode=mode) ns_json = self.plib.run_ndctl_list('-r %s -N' % region)[0] created_mode = self.plib.run_ndctl_list_val(ns_json, 'mode') if mode != created_mode: failed_modes.append(mode) self.log.error("Expected mode %s, Got %s", mode, created_mode) else: self.log.info("Namespace with %s mode: %s", mode, ns_json) ns_name = self.plib.run_ndctl_list_val(ns_json, 'dev') self.plib.disable_namespace(namespace=ns_name, region=region) self.plib.destroy_namespace(namespace=ns_name, region=region) if failed_modes: self.fail("Namespace for %s mode failed!" % failed_modes) @avocado.fail_on(pmem.PMemException) def test_namespace_devmap(self): """ Test metadata device mapping option with a namespace """ region = self.get_default_region() m_map = self.params.get('map', default='mem') self.log.info("Using %s for checking device mapping", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode=self.mode_to_use, memmap=m_map) self.log.info("Validating device mapping") map_val = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s -N' % region)[0], 'map') if map_val != m_map: self.fail("Expected map:%s, Got %s" % (m_map, map_val)) else: self.log.info("Metadata mapped as expected") def multiple_namespaces_region(self, region): """ Test multiple namespace with single region """ namespace_size = self.params.get('size', default=None) size_align = self.get_size_alignval() slot_count = self.plib.get_slot_count(region) self.log.info("Using %s for muliple namespace regions", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) if namespace_size and ((namespace_size % size_align) != 0): self.cancel("Size value not %d aligned %d \n", size_align, namespace_size) region_size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s' % region)[0], 'size') if not namespace_size: namespace_size = region_size // slot_count # Now align the namespace size namespace_size = (namespace_size // size_align) * size_align else: slot_count = region_size // namespace_size if namespace_size <= size_align: self.log.warn("Ns size equal to pagesize, hence skipping region") return self.log.info("Creating %s namespaces", slot_count) for count in range(0, slot_count): self.plib.create_namespace(region=region, mode=self.mode_to_use, size=namespace_size) self.log.info("Namespace %s created", count + 1) @avocado.fail_on(pmem.PMemException) def test_multiple_namespaces_region(self): """ Test multiple namespace with single region """ region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") self.multiple_namespaces_region(region) @avocado.fail_on(pmem.PMemException) def test_multiple_ns_multiple_region(self): """ Test multiple namespace with multiple region """ self.plib.enable_region() if len(self.plib.run_ndctl_list('-R')) <= 1: self.cancel("Test not applicable without multiple regions") regions = self.plib.run_ndctl_list('-R') self.plib.disable_namespace() self.plib.destroy_namespace() for val in regions: region = self.plib.run_ndctl_list_val(val, 'dev') if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") self.multiple_namespaces_region(region) @avocado.fail_on(pmem.PMemException) def test_multiple_ns_modes_region(self): """ Test multiple namespace modes with single region """ region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") self.log.info("Using %s for muliple namespace regions", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s' % region)[0], 'size') if size < (len(self.modes) * 64 * 1024 * 1024): self.cancel('Not enough memory to create namespaces') for mode in self.modes: self.plib.create_namespace(region=region, mode=mode, size='64M') self.log.info("Namespace of type %s created", mode) @avocado.fail_on(pmem.PMemException) def test_nslot_namespace(self): """ Test max namespace with nslot value """ region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") size_align = self.get_size_alignval() slot_count = self.plib.get_slot_count(region) self.log.info("Using %s for max namespace creation", region) self.plib.disable_namespace() self.plib.destroy_namespace() region_size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s' % region)[0], 'size') namespace_size = region_size // slot_count # Now align the namespace size namespace_size = (namespace_size // size_align) * size_align self.log.info("Creating %s namespace", slot_count) for count in range(0, slot_count): self.plib.create_namespace(region=region, mode='fsdax', size=namespace_size) self.log.info("Namespace %s created", count) @avocado.fail_on(pmem.PMemException) def test_namespace_reconfigure(self): """ Test namespace reconfiguration """ region = self.get_default_region() self.log.info("Using %s for reconfiguring namespace", region) self.plib.disable_namespace() self.plib.destroy_namespace() self.plib.create_namespace(region=region, mode='fsdax', align='64k') old_ns = self.plib.run_ndctl_list()[0] old_ns_dev = self.plib.run_ndctl_list_val(old_ns, 'dev') self.log.info("Re-configuring namespace %s", old_ns_dev) self.plib.create_namespace(region=region, mode='fsdax', name='test_ns', reconfig=old_ns_dev, force=True) new_ns = self.plib.run_ndctl_list()[0] self.log.info("Checking namespace changes") failed_vals = [] for key, val in new_ns.items(): if key in list(set(old_ns.keys()) - set(['uuid', 'dev'])): if old_ns[key] != val: failed_vals.append({key: val}) else: self.log.info("Newly added filed %s:%s", key, val) if failed_vals: self.fail("New namespace unexpected change(s): %s" % failed_vals) @avocado.fail_on(pmem.PMemException) def test_check_namespace(self): """ Verify metadata for sector mode namespaces """ region = self.get_default_region() self.plib.disable_namespace() self.plib.destroy_namespace() self.log.info("Creating sector namespace using %s", region) self.plib.create_namespace(region=region, mode='sector') ns_sec_dev = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list()[0], 'dev') self.plib.disable_namespace(namespace=ns_sec_dev) self.log.info("Checking BTT metadata") if process.system("%s check-namespace %s" % (self.ndctl, ns_sec_dev), ignore_status=True): self.fail("Failed to check namespace metadata") @avocado.fail_on(pmem.PMemException) def test_check_numa(self): self.plib.enable_region() regions = self.plib.run_ndctl_list('-R') if not os.path.exists('/sys/bus/nd/devices/region0/numa_node'): self.fail("Numa node entries not found!") for val in regions: reg = self.plib.run_ndctl_list_val(val, 'dev') numa = genio.read_one_line('/sys/bus/nd/devices/%s/numa_node' % reg) # Check numa config in ndctl and sys interface if len(self.plib.run_ndctl_list('-r %s -R -U %s' % (reg, numa))) != 1: self.fail('Region mismatch between ndctl and sys interface') @avocado.fail_on(pmem.PMemException) def test_check_ns_numa(self): self.plib.enable_region() regions = self.plib.run_ndctl_list('-R') for dev in regions: region = self.plib.run_ndctl_list_val(dev, 'dev') if not self.plib.is_region_legacy(region): self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) size = self.plib.run_ndctl_list_val(dev, 'size') if size < (3 * 64 * 1024 * 1024): self.log.warn('Skipping region due to insufficient memory') continue for _ in range(3): self.plib.create_namespace(region=region, mode='fsdax', size='64M') namespaces = self.plib.run_ndctl_list('-N -r %s' % region) if not os.path.exists( '/sys/bus/nd/devices/namespace0.0/numa_node'): self.fail("Numa node entries not found!") for val in namespaces: ns_name = self.plib.run_ndctl_list_val(val, 'dev') numa = genio.read_one_line('/sys/bus/nd/devices/%s/numa_node' % ns_name) # Check numa config in ndctl and sys interface if len( self.plib.run_ndctl_list('-N -n %s -U %s' % (ns_name, numa))) != 1: self.fail('Numa mismatch between ndctl and sys interface') @avocado.fail_on(pmem.PMemException) def test_label_read_write(self): region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") nmem = "nmem%s" % re.findall(r'\d+', region)[0] self.log.info("Using %s for testing labels", region) self.plib.disable_region(name=region) self.log.info("Filling zeros to start test") if process.system('%s zero-labels %s' % (self.ndctl, nmem), shell=True): self.fail("Label zero-fill failed") self.plib.enable_region(name=region) self.plib.create_namespace(region=region) self.log.info("Storing labels with a namespace") old_op = process.system_output('%s check-labels %s' % (self.ndctl, nmem), shell=True) if process.system('%s read-labels %s -o output' % (self.ndctl, nmem), shell=True): self.fail("Label read failed") self.log.info("Refilling zeroes before a restore") self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.disable_region(name=region) if process.system('%s zero-labels %s' % (self.ndctl, nmem), shell=True): self.fail("Label zero-fill failed after read") self.log.info("Re-storing labels with a namespace") if process.system('%s write-labels %s -i output' % (self.ndctl, nmem), shell=True): self.fail("Label write failed") self.plib.enable_region(name=region) self.log.info("Checking mismatch after restore") new_op = process.system_output('%s check-labels %s' % (self.ndctl, nmem), shell=True) if new_op != old_op: self.fail("Label read and write mismatch") self.log.info("Checking created namespace after restore") if len(self.plib.run_ndctl_list('-N -r %s' % region)) != 1: self.fail("Created namespace not found after label restore") @avocado.fail_on(pmem.PMemException) def test_daxctl_list(self): """ Test daxctl list """ region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') index = re.findall(r'\d+', region)[0] vals = self.plib.run_daxctl_list('-r %s' % (index)) if len(vals) != 1: self.fail('Failed daxctl list') self.log.info('Created dax device %s', vals) @avocado.fail_on(pmem.PMemException) def test_region_capabilities(self): """ Test region capabilities """ self.plib.enable_region() self.plib.disable_namespace() self.plib.destroy_namespace() regions = self.plib.run_ndctl_list('-R -C') for region in regions: cap = self.plib.run_ndctl_list_val(region, 'capabilities') sec_sizes = [] fsdax_align = [] devdax_align = [] for typ in cap: mode = self.plib.run_ndctl_list_val(typ, 'mode') if mode == 'fsdax': fsdax_align = self.plib.run_ndctl_list_val( typ, 'alignments') elif mode == 'devdax': devdax_align = self.plib.run_ndctl_list_val( typ, 'alignments') elif mode == 'sector': sec_sizes = self.plib.run_ndctl_list_val( typ, 'sector_sizes') reg_name = self.plib.run_ndctl_list_val(region, 'dev') self.log.info("Creating namespaces with possible sizes") for size in sec_sizes: self.plib.create_namespace(region=reg_name, mode='sector', sector_size=size) self.plib.destroy_namespace(region=reg_name, force=True) for size in fsdax_align: self.plib.create_namespace(region=reg_name, mode='fsdax', align=size) self.plib.destroy_namespace(region=reg_name, force=True) for size in devdax_align: self.plib.create_namespace(region=reg_name, mode='devdax', align=size) self.plib.destroy_namespace(region=reg_name, force=True) @avocado.fail_on(pmem.PMemException) def test_daxctl_memhotplug_unplug(self): """ Test devdax memory hotplug/unplug """ for cmd in ["reconfigure-device", "offline-memory", "online-memory"]: if not self.plib.check_daxctl_subcmd(cmd): self.cancel("Binary does not support %s" % cmd) region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') daxdev = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'chardev') old_mem = memory.meminfo.MemTotal.b dev_prop = self.plib.reconfigure_dax_device(daxdev, mode="system-ram") self.log.info("Reconfigured device %s", dev_prop) new_mem = memory.meminfo.MemTotal.b self.log.info("Memory Before:%s, Memory After:%s", old_mem, new_mem) if new_mem <= old_mem: self.log.warn("Memorysize not increased %s<=%s", new_mem, old_mem) self.plib.set_dax_memory_offline(daxdev) unplug_mem = memory.meminfo.MemTotal.b if unplug_mem != old_mem: self.fail("Memory after unplug is not same as system memory") self.log.info("Memory restored to base memory after unplug") self.plib.set_dax_memory_online(daxdev) hplug_mem = memory.meminfo.MemTotal.b if hplug_mem != new_mem: self.fail("Memory after hotplug is not same as device size memory") self.log.info("Memory hotplug successful with pmem device") self.log.info("Restoring pmem device in devdax mode") self.plib.set_dax_memory_offline(daxdev) self.plib.reconfigure_dax_device(daxdev, mode="devdax") @avocado.fail_on(pmem.PMemException) def write_read_infoblock(self, ns_name, align='', size=''): """ Write_infoblock on given namespace """ self.plib.write_infoblock(namespace=ns_name, align=align, size=size, mode='devdax') read_out = self.plib.read_infoblock(namespace=ns_name) if align: if align != int(self.plib.run_ndctl_list_val(read_out[0], 'align')): self.fail("Alignment has not changed") return read_out[0] @avocado.fail_on(pmem.PMemException) def test_write_infoblock_supported_align(self): """ Test write_infoblock with align size """ if not self.plib.check_ndctl_subcmd("write-infoblock"): self.cancel("Binary does not support write-infoblock") region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') ns_name = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'dev') self.plib.disable_namespace(namespace=ns_name) self.write_read_infoblock(ns_name, align=self.get_size_alignval()) self.plib.enable_namespace(namespace=ns_name) @avocado.fail_on(pmem.PMemException) def test_write_infoblock_unalign(self): """ Test write_infoblock with unsupported align size """ if not self.plib.check_ndctl_subcmd("write-infoblock"): self.cancel("Binary does not support write-infoblock") region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') ns_name = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'dev') self.plib.disable_namespace(namespace=ns_name) self.write_read_infoblock(ns_name, align=self.get_unsupported_alignval()) try: self.plib.enable_namespace(namespace=ns_name) except pmem.PMemException: self.log.info("Failed as expected") else: self.log.info(self.plib.run_ndctl_list()) self.fail("Enabling namespace must have failed") idle_ns = self.plib.run_ndctl_list('-Ni -r %s' % region) if len(idle_ns) > 1: found = False for namespace in idle_ns: if int(self.plib.run_ndctl_list_val(namespace, 'size')) != 0: found = True break else: self.fail("Created namespace is not found") if not found: self.fail("Namespace with infoblock written not found") self.plib.destroy_namespace(namespace=ns_name, force=True) @avocado.fail_on(pmem.PMemException) def test_write_infoblock_align_default(self): """ Test write_infoblock with align size """ if not self.plib.check_ndctl_subcmd("write-infoblock"): self.cancel("Binary does not support write-infoblock") region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') ns_name = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'dev') align = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'align') self.plib.disable_namespace(namespace=ns_name) write_block = self.write_read_infoblock(ns_name) if align != self.plib.run_ndctl_list_val(write_block, 'align'): self.fail("Alignment is not same as default alignment") @avocado.fail_on(pmem.PMemException) def test_write_infoblock_size(self): """ Test write_infoblock with align size """ if not self.plib.check_ndctl_subcmd("write-infoblock"): self.cancel("Binary does not support write-infoblock") region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') ns_name = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'dev') size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'size') self.plib.disable_namespace(namespace=ns_name) align = self.get_size_alignval() size = size - align self.write_read_infoblock(ns_name, size=size, align=align) self.plib.enable_namespace(namespace=ns_name) @avocado.fail_on(pmem.PMemException) def test_write_infoblock_size_unaligned(self): """ Test write_infoblock with align size """ if not self.plib.check_ndctl_subcmd("write-infoblock"): self.cancel("Binary does not support write-infoblock") region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') ns_name = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'dev') size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'size') self.plib.disable_namespace(namespace=ns_name) align = memory.get_page_size() size = size - align self.write_read_infoblock(ns_name, size=size, align=align) try: self.plib.enable_namespace(namespace=ns_name) except pmem.PMemException: self.log.info("Failed as expected") else: self.log.info(self.plib.run_ndctl_list()) self.fail("Enabling namespace must have failed") @avocado.fail_on(pmem.PMemException) def test_sector_write(self): """ Test write on a sector mode device """ region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='sector', sector_size='512') self.disk = '/dev/%s' % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'blockdev') size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'size') self.part = partition.Partition(self.disk) self.part.mkfs(fstype='xfs', args='-b size=%s -s size=512' % memory.get_page_size()) mnt_path = self.params.get('mnt_point', default='/pmemS') if not os.path.exists(mnt_path): os.makedirs(mnt_path) self.part.mount(mountpoint=mnt_path) self.log.info("Test will run on %s", mnt_path) fio_job = self.params.get('fio_job', default='sector-fio.job') cmd = '%s --directory %s --filename mmap-pmem --size %s %s' % ( self.build_fio(), mnt_path, size // 2, self.get_data(fio_job)) if process.system(cmd, ignore_status=True): self.fail("FIO mmap workload on fsdax failed") @avocado.fail_on(pmem.PMemException) def test_fsdax_write(self): """ Test filesystem DAX with a FIO workload """ region = self.get_default_region() self.plib.create_namespace(region=region, mode='fsdax') self.disk = '/dev/%s' % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'blockdev') size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'size') self.part = partition.Partition(self.disk) self.part.mkfs(fstype='xfs', args='-b size=%s -s size=512 %s' % (memory.get_page_size(), self.reflink)) mnt_path = self.params.get('mnt_point', default='/pmem') if not os.path.exists(mnt_path): os.makedirs(mnt_path) self.part.mount(mountpoint=mnt_path, args='-o dax') self.log.info("Test will run on %s", mnt_path) fio_job = self.params.get('fio_job', default='ndctl-fio.job') cmd = '%s --directory %s --filename mmap-pmem --size %s %s' % ( self.build_fio(), mnt_path, size // 2, self.get_data(fio_job)) if process.system(cmd, ignore_status=True): self.fail("FIO mmap workload on fsdax failed") @avocado.fail_on(pmem.PMemException) def test_map_sync(self): """ Test MAP_SYNC flag with sample mmap write """ region = self.get_default_region() self.plib.create_namespace(region=region, mode='fsdax') self.disk = '/dev/%s' % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'blockdev') self.part = partition.Partition(self.disk) self.part.mkfs(fstype='xfs', args='-b size=%s -s size=512 %s' % (memory.get_page_size(), self.reflink)) mnt_path = self.params.get('mnt_point', default='/pmem_map') if not os.path.exists(mnt_path): os.makedirs(mnt_path) self.part.mount(mountpoint=mnt_path, args='-o dax') self.log.info("Testing MAP_SYNC on %s", mnt_path) src_file = os.path.join(self.teststmpdir, 'map_sync.c') shutil.copyfile(self.get_data('map_sync.c'), src_file) process.system('gcc %s -o map_sync' % src_file) process.system('fallocate -l 64k %s/new_file' % mnt_path) if process.system('./map_sync %s/new_file' % mnt_path, ignore_status=True): self.fail('Write with MAP_SYNC flag failed') @avocado.fail_on(pmem.PMemException) def test_devdax_write(self): """ Test device DAX with a daxio binary """ region = self.get_default_region() self.plib.create_namespace(region=region, mode='devdax') daxdev = "/dev/%s" % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'chardev') if process.system("%s -b no -i /dev/urandom -o %s" % (self.get_data("daxio.static"), daxdev), ignore_status=True): self.fail("DAXIO write on devdax failed") @avocado.fail_on(pmem.PMemException) def tearDown(self): if self.part: self.part.unmount() if self.disk: self.log.info("Removing the FS meta created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=1M count=1024 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s" % self.disk) if not self.preserve_setup: if self.plib.run_ndctl_list('-N'): self.plib.destroy_namespace(force=True) self.plib.disable_region()
def setUp(self): """ To check and install dependencies for the test """ self.peer_ip = self.params.get("peer_ip", default="") self.peer_user = self.params.get("peer_user_name", default="root") self.peer_password = self.params.get("peer_password", '*', default="None") interfaces = netifaces.interfaces() self.iface = self.params.get("interface", default="") if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) self.ipaddr = self.params.get("host_ip", default="") self.netmask = self.params.get("netmask", default="") configure_network.set_ip(self.ipaddr, self.netmask, self.iface) self.session = Session(self.peer_ip, user=self.peer_user, password=self.peer_password) smm = SoftwareManager() detected_distro = distro.detect() pkgs = ["gcc", "autoconf", "perl", "m4", "git-core", "automake"] if detected_distro.name == "Ubuntu": pkgs.extend(["libsctp1", "libsctp-dev", "lksctp-tools"]) else: pkgs.extend(["lksctp-tools", "lksctp-tools-devel"]) for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) cmd = "%s install %s" % (smm.backend.base_command, pkg) output = self.session.cmd(cmd) if not output.exit_status == 0: self.cancel( "unable to install the package %s on peer machine " % pkg) if self.peer_ip == "": self.cancel("%s peer machine is not available" % self.peer_ip) self.mtu = self.params.get("mtu", default=1500) self.peerinfo = PeerInfo(self.peer_ip, peer_user=self.peer_user, peer_password=self.peer_password) self.peer_interface = self.peerinfo.get_peer_interface(self.peer_ip) if not self.peerinfo.set_mtu_peer(self.peer_interface, self.mtu): self.cancel("Failed to set mtu in peer") if not configure_network.set_mtu_host(self.iface, self.mtu): self.cancel("Failed to set mtu in host") uperf_download = self.params.get("uperf_download", default="https:" "//github.com/uperf/uperf/" "archive/master.zip") tarball = self.fetch_asset("uperf.zip", locations=[uperf_download], expire='7d') archive.extract(tarball, self.teststmpdir) self.uperf_dir = os.path.join(self.teststmpdir, "uperf-master") cmd = "scp -r %s %s@%s:/tmp" % (self.uperf_dir, self.peer_user, self.peer_ip) if process.system(cmd, shell=True, ignore_status=True) != 0: self.cancel("unable to copy the uperf into peer machine") cmd = "cd /tmp/uperf-master;autoreconf -fi;./configure ppc64le;make" output = self.session.cmd(cmd) if not output.exit_status == 0: self.cancel("Unable to compile Uperf into peer machine") self.uperf_run = str(self.params.get("UPERF_SERVER_RUN", default=0)) if self.uperf_run == '1': cmd = "/tmp/uperf-master/src/uperf -s &" cmd = self.session.get_raw_ssh_command(cmd) self.obj = SubProcess(cmd) self.obj.start() os.chdir(self.uperf_dir) process.system('autoreconf -fi', shell=True) process.system('./configure ppc64le', shell=True) build.make(self.uperf_dir) self.expected_tp = self.params.get("EXPECTED_THROUGHPUT", default="85")
def setUp(self): ''' To check and install dependencies for the test ''' smm = SoftwareManager() pkgs = ["ethtool", "net-tools"] detected_distro = distro.detect() if detected_distro.name == "Ubuntu": pkgs.extend(["openssh-client", "iputils-ping"]) elif detected_distro.name == "SuSE": pkgs.extend(["openssh", "iputils"]) else: pkgs.extend(["openssh-clients", "iputils"]) for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) interfaces = netifaces.interfaces() interface = self.params.get("interface") if interface not in interfaces: self.cancel("%s interface is not available" % interface) self.iface = interface self.ipaddr = self.params.get("host_ip", default="") self.netmask = self.params.get("netmask", default="") local = LocalHost() self.networkinterface = NetworkInterface(self.iface, local) try: self.networkinterface.add_ipaddr(self.ipaddr, self.netmask) self.networkinterface.save(self.ipaddr, self.netmask) except Exception: self.networkinterface.save(self.ipaddr, self.netmask) self.networkinterface.bring_up() if not wait.wait_for(self.networkinterface.is_link_up, timeout=120): self.fail("Link up of interface is taking longer than 120 seconds") self.peer = self.params.get("peer_ip") if not self.peer: self.cancel("No peer provided") self.mtu = self.params.get("mtu", default=1500) self.peer_public_ip = self.params.get("peer_public_ip", default="") self.peer_user = self.params.get("peer_user", default="root") self.peer_password = self.params.get("peer_password", '*', default=None) if 'scp' or 'ssh' in str(self.name.name): self.session = Session(self.peer, user=self.peer_user, password=self.peer_password) if not self.session.connect(): self.cancel("failed connecting to peer") self.remotehost = RemoteHost(self.peer, self.peer_user, password=self.peer_password) self.peer_interface = self.remotehost.get_interface_by_ipaddr( self.peer).name self.peer_networkinterface = NetworkInterface(self.peer_interface, self.remotehost) self.remotehost_public = RemoteHost(self.peer_public_ip, self.peer_user, password=self.peer_password) self.peer_public_networkinterface = NetworkInterface( self.peer_interface, self.remotehost_public) self.mtu = self.params.get("mtu", default=1500) self.mtu_set() if self.networkinterface.ping_check(self.peer, count=5) is not None: self.cancel("No connection to peer")
def setUp(self): """ Build 'fio'. """ default_url = "https://brick.kernel.dk/snaps/fio-git-latest.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.disk_type = self.params.get('disk_type', default='') fs_args = self.params.get('fs_args', default='') mnt_args = self.params.get('mnt_args', default='') self.fio_file = 'fiotest-image' self.fs_create = False self.lv_create = False self.raid_create = False self.devdax_file = None fstype = self.params.get('fs', default='') if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") lv_needed = self.params.get('lv', default=False) raid_needed = self.params.get('raid', default=False) if distro.detect().name in ['Ubuntu', 'debian']: pkg_list = ['libaio-dev'] if fstype == 'btrfs': pkg_list.append('btrfs-progs') elif distro.detect().name is 'SuSE': pkg_list = ['libaio1', 'libaio-devel'] else: pkg_list = ['libaio', 'libaio-devel'] if self.disk_type == 'nvdimm': pkg_list.extend(['autoconf', 'pkg-config']) if distro.detect().name == 'SuSE': pkg_list.extend(['ndctl', 'libnuma-devel', 'libndctl-devel']) else: pkg_list.extend(['ndctl', 'daxctl', 'numactl-devel', 'ndctl-devel', 'daxctl-devel']) if raid_needed: pkg_list.append('mdadm') smm = SoftwareManager() for pkg in pkg_list: if pkg and not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Package %s is missing and could not be installed" % pkg) tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join(self.teststmpdir, "fio") fio_flags = "" self.ld_path = "" if self.disk_type == 'nvdimm': self.setup_pmem_disk(mnt_args) self.log.info("Building PMDK for NVDIMM fio engines") pmdk_url = self.params.get('pmdk_url', default='') tar = self.fetch_asset(pmdk_url, expire='7d') archive.extract(tar, self.teststmpdir) version = os.path.basename(tar.split('.tar.')[0]) pmdk_src = os.path.join(self.teststmpdir, version) build.make(pmdk_src) build.make(pmdk_src, extra_args='install prefix=%s' % self.teststmpdir) os.chdir(self.sourcedir) ext_flags = '`PKG_CONFIG_PATH=%s/lib/pkgconfig pkg-config --cflags\ --libs libpmem libpmemblk`' % self.teststmpdir self.ld_path = "LD_LIBRARY_PATH=%s/lib" % self.teststmpdir out = process.system_output('./configure --extra-cflags=' '"%s"' % ext_flags, shell=True) fio_flags = "LDFLAGS='%s'" % ext_flags for eng in ['PMDK libpmem', 'PMDK dev-dax', 'libnuma']: for line in out.decode().splitlines(): if line.startswith(eng) and 'no' in line: self.cancel("PMEM engines not built with fio") if not self.disk: self.disk = self.workdir self.dirs = self.disk if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/md/mdsraid' self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True self.dirs = self.disk if fstype: self.dirs = self.workdir self.create_fs(self.disk, self.dirs, fstype, fs_args, mnt_args) self.fs_create = True build.make(self.sourcedir, extra_args=fio_flags)
def setUp(self): # Check for basic utilities sm = SoftwareManager() detected_distro = distro.detect() deps = ['gcc', 'make', 'patch'] cpuinfo = genio.read_file("/proc/cpuinfo").strip() if detected_distro.name == "Ubuntu": deps += ['libpthread-stubs0-dev', 'git'] elif detected_distro.name == "SuSE": deps += ['glibc-devel-static', 'git-core'] else: deps += ['glibc-static', 'git'] for package in deps: if not sm.check_installed(package) and not sm.install(package): self.cancel(' %s is needed for the test to be run' % package) kernel.check_version("2.6.16") if detected_distro.name == "Ubuntu": op = glob.glob("/usr/lib/*/libpthread.a") else: op = glob.glob("/usr/lib*/libpthread.a") if not op: self.cancel("libpthread.a is required!!!" "\nTry installing glibc-static") self.page_sizes = ['16'] if 'POWER9' in cpuinfo and 'PowerNV' in cpuinfo: self.page_sizes = ['2', '1024'] # Get arguments: self.hugetlbfs_dir = self.params.get('hugetlbfs_dir', default=None) pages_requested = self.params.get('pages_requested', default=20) # Check hugepages: pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): Hugepages_support = genio.read_file("/proc/meminfo").rstrip("\n") if 'HugePages_' not in Hugepages_support: self.cancel("No Hugepages Configured") else: self.cancel("Kernel does not support hugepages") if not self.hugetlbfs_dir: self.hugetlbfs_dir = os.path.join(self.teststmpdir, 'hugetlbfs') os.makedirs(self.hugetlbfs_dir) for hp_size in self.page_sizes: if process.system('mount -t hugetlbfs -o pagesize=%sM none %s' % (hp_size, self.hugetlbfs_dir), sudo=True, ignore_status=True): self.cancel("hugetlbfs mount failed") genio.write_file( '/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % str(int(hp_size) * 1024), str(pages_requested)) pages_available = int( genio.read_file('/sys/kernel/mm/hugepages/huge' 'pages-%skB/nr_hugepages' % str(int(hp_size) * 1024).strip())) if pages_available < pages_requested: self.cancel('%d pages available, < %d pages requested' % (pages_available, pages_requested)) git.get_repo('https://github.com/libhugetlbfs/libhugetlbfs.git', destination_dir=self.workdir) os.chdir(self.workdir) patch = self.params.get('patch', default='elflink.patch') process.run('patch -p1 < %s' % self.get_data(patch), shell=True) build.make(self.workdir, extra_args='BUILDTYPE=NATIVEONLY')
def setup_htx(self): """ Builds HTX """ detected_distro = distro.detect() packages = ['git', 'gcc', 'make'] if detected_distro.name in ['centos', 'fedora', 'rhel', 'redhat']: packages.extend(['gcc-c++', 'ncurses-devel', 'tar']) elif detected_distro.name == "Ubuntu": packages.extend( ['libncurses5', 'g++', 'ncurses-dev', 'libncurses-dev']) elif detected_distro.name == 'SuSE': packages.extend(['libncurses5', 'gcc-c++', 'ncurses-devel', 'tar']) else: self.cancel("Test not supported in %s" % detected_distro.name) smm = SoftwareManager() for pkg in packages: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Can not install %s" % pkg) if self.run_type == 'git': url = "https://github.com/open-power/HTX/archive/master.zip" tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) htx_path = os.path.join(self.teststmpdir, "HTX-master") os.chdir(htx_path) exercisers = ["hxecapi_afu_dir", "hxedapl", "hxecapi", "hxeocapi"] for exerciser in exercisers: process.run("sed -i 's/%s//g' %s/bin/Makefile" % (exerciser, htx_path)) build.make(htx_path, extra_args='all') build.make(htx_path, extra_args='tar') process.run('tar --touch -xvzf htx_package.tar.gz') os.chdir('htx_package') if process.system('./installer.sh -f'): self.fail("Installation of htx fails:please refer job.log") else: dist_name = detected_distro.name.lower() if dist_name == 'suse': dist_name = 'sles' rpm_check = "htx%s%s" % (dist_name, detected_distro.version) skip_install = False ins_htx = process.system_output('rpm -qa | grep htx', shell=True, ignore_status=True).decode() if ins_htx: if not smm.check_installed(rpm_check): self.log.info("Clearing existing HTX rpm") process.system('rpm -e %s' % ins_htx, shell=True, ignore_status=True) if os.path.exists('/usr/lpp/htx'): shutil.rmtree('/usr/lpp/htx') else: self.log.info("Using existing HTX") skip_install = True if not skip_install: rpm_loc = self.params.get('rpm_link', default=None) if rpm_loc: if process.system('rpm -ivh --nodeps %s ' '--force' % rpm_loc, shell=True, ignore_status=True): self.cancel("Installing rpm failed") else: self.cancel("RPM link is required for RPM run type") self.log.info("Starting the HTX Deamon") process.run('/usr/lpp/htx/etc/scripts/htxd_run') self.log.info("Creating the HTX mdt files") process.run('htxcmdline -createmdt')
def setUp(self): ''' To check and install dependencies for the test ''' detected_distro = distro.detect() smm = SoftwareManager() depends = [] # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon if detected_distro.name == "Ubuntu": depends.extend(["openssh-client", "iputils-ping"]) elif detected_distro.name in ["rhel", "fedora", "centos", "redhat"]: depends.extend(["openssh-clients", "iputils"]) else: depends.extend(["openssh", "iputils"]) for pkg in depends: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) interfaces = netifaces.interfaces() self.user = self.params.get("user_name", default="root") self.host_interfaces = self.params.get("host_interfaces", default="").split(",") if not self.host_interfaces: self.cancel("user should specify host interfaces") self.peer_interfaces = self.params.get("peer_interfaces", default="").split(",") for self.host_interface in self.host_interfaces: if self.host_interface not in interfaces: self.cancel("interface is not available") self.peer_first_ipinterface = self.params.get("peer_ip", default="") if not self.peer_interfaces or self.peer_first_ipinterface == "": self.cancel("peer machine should available") msg = "ip addr show | grep %s | grep -oE '[^ ]+$'"\ % self.peer_first_ipinterface cmd = "ssh %s@%s %s" % (self.user, self.peer_first_ipinterface, msg) self.peer_first_interface = process.system_output(cmd, shell=True).strip() if self.peer_first_interface == "": self.fail("test failed because peer interface can not retrieved") self.bond_name = self.params.get("bond_name", default="tempbond") self.bond_status = "cat /proc/net/bonding/%s" % self.bond_name self.bond_dir = os.path.join("/sys/class/net/", self.bond_name) self.mode = self.params.get("bonding_mode", default="") if self.mode == "": self.cancel("test skipped because mode not specified") self.host_ips = [] self.peer_ips = [self.peer_first_ipinterface] for val in self.host_interfaces: cmd = "ip -f inet -o addr show %s | awk '{print $4}' | cut -d /\ -f1" % val local_ip = process.system_output(cmd, shell=True).strip() if local_ip == "" and val == self.host_interfaces[0]: self.fail("test failed because local ip can not retrieved") self.host_ips.append(local_ip) for val in self.peer_interfaces: msg = "ip -f inet -o addr show %s | awk '{print $4}' | cut -d /\ -f1" % val cmd = "ssh %s@%s \"%s\""\ % (self.user, self.peer_first_ipinterface, msg) peer_ip = process.system_output(cmd, shell=True).strip() cmd = 'echo %s | cut -d " " -f4' % peer_ip peer_ip = process.system_output(cmd, shell=True).strip() if peer_ip == "" and val == self.peer_first_interface: self.fail("test failed because peer ip can not retrieved") self.peer_ips.append(peer_ip) self.peer_interfaces.insert(0, self.peer_first_interface) self.net_mask = [] stf = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for val1, val2 in map(None, self.host_interfaces, self.host_ips): mask = "" if val2: tmp = fcntl.ioctl(stf.fileno(), 0x891b, struct.pack('256s', val1)) mask = socket.inet_ntoa(tmp[20:24]).strip('\n') self.net_mask.append(mask) self.bonding_slave_file = "/sys/class/net/%s/bonding/slaves"\ % self.bond_name self.peer_bond_needed = self.params.get("peer_bond_needed", default=False) self.peer_wait_time = self.params.get("peer_wait_time", default=5) self.sleep_time = int(self.params.get("sleep_time", default=5)) cmd = "route -n | grep %s | grep -w UG | awk "\ "'{ print $2 }'" % self.host_interfaces[0] self.gateway = process.system_output( '%s' % cmd, shell=True)
class NdctlTest(Test): """ Ndctl user space tooling for Linux, which handles NVDIMM devices. """ def get_default_region(self): """ Get the largest region if not provided """ self.plib.enable_region() region = self.params.get('region', default=None) if region: return region regions = self.plib.run_ndctl_list('-R') regions = sorted(regions, key=lambda i: i['size'], reverse=True) return self.plib.run_ndctl_list_val(regions[0], 'dev') @staticmethod def get_size_alignval(): """ Return the size align restriction based on platform """ if 'Hash' in genio.read_file('/proc/cpuinfo').rstrip('\t\r\n\0'): def_align = 16 * 1024 * 1024 else: def_align = 2 * 1024 * 1024 return def_align def build_fio(self): """ Install fio or build if not possible """ pkg = "fio" if process.system("which %s" % pkg, ignore_status=True): if not self.smm.check_installed(pkg) \ and not self.smm.install(pkg): for package in ["autoconf", "libtool", "make"]: if not self.smm.check_installed(package) \ and not self.smm.install(package): self.cancel( "Fail to install %s required for this test." "" % package) tarball = self.fetch_asset( "http://brick.kernel.dk/snaps/fio-2.1.10.tar.gz") archive.extract(tarball, self.teststmpdir) fio_version = os.path.basename(tarball.split('.tar.')[0]) sourcedir = os.path.join(self.teststmpdir, fio_version) build.make(sourcedir) return os.path.join(sourcedir, "fio") return pkg def setUp(self): """ Build 'ndctl' and setup the binary. """ deps = [] self.dist = distro.detect() self.package = self.params.get('package', default='upstream') self.preserve_setup = self.params.get('preserve_change', default=False) self.mode_to_use = self.params.get('modes', default='fsdax') if self.dist.name not in ['SuSE', 'rhel']: self.cancel('Unsupported OS %s' % self.dist.name) # DAX wont work with reflink, disabling here self.reflink = '-m reflink=0' self.smm = SoftwareManager() if self.package == 'upstream': deps.extend(['gcc', 'make', 'automake', 'autoconf']) if self.dist.name == 'SuSE': deps.extend([ 'ruby2.5-rubygem-asciidoctor', 'libtool', 'libkmod-devel', 'libudev-devel', 'systemd-devel', 'libuuid-devel-static', 'libjson-c-devel', 'keyutils-devel', 'kmod-bash-completion' ]) elif self.dist.name == 'rhel': deps.extend([ 'rubygem-asciidoctor', 'automake', 'libtool', 'kmod-devel', 'libuuid-devel', 'json-c-devel', 'systemd-devel', 'keyutils-libs-devel', 'jq', 'parted', 'libtool' ]) for pkg in deps: if not self.smm.check_installed(pkg) and not \ self.smm.install(pkg): self.cancel('%s is needed for the test to be run' % pkg) locations = ["https://github.com/pmem/ndctl/archive/master.zip"] tarball = self.fetch_asset("ndctl.zip", locations=locations, expire='7d') archive.extract(tarball, self.teststmpdir) os.chdir("%s/ndctl-master" % self.teststmpdir) process.run('./autogen.sh', sudo=True, shell=True) process.run( "./configure CFLAGS='-g -O2' --prefix=/usr " "--sysconfdir=/etc --libdir=" "/usr/lib64", shell=True, sudo=True) build.make(".") self.ndctl = os.path.abspath('./ndctl/ndctl') self.daxctl = os.path.abspath('./daxctl/daxctl') else: deps.extend(['ndctl']) if self.dist.name == 'rhel': deps.extend(['daxctl']) for pkg in deps: if not self.smm.check_installed(pkg) and not \ self.smm.install(pkg): self.cancel('%s is needed for the test to be run' % pkg) self.ndctl = 'ndctl' self.daxctl = 'daxctl' self.opt_dict = { '-B': 'provider', '-D': 'dev', '-R': 'dev', '-N': 'dev' } self.modes = ['raw', 'sector', 'fsdax', 'devdax'] self.part = None self.disk = None self.plib = pmem.PMem(self.ndctl, self.daxctl) if not self.plib.check_buses(): self.cancel("Test needs atleast one region") @avocado.fail_on(pmem.PMemException) def test_bus_ids(self): """ Test the bus id info """ vals = self.plib.run_ndctl_list('-B') if not vals: self.fail('Failed to fetch bus IDs') self.log.info('Available Bus provider IDs: %s', vals) @avocado.fail_on(pmem.PMemException) def test_dimms(self): """ Test the dimms info """ vals = self.plib.run_ndctl_list('-D') if not vals: self.fail('Failed to fetch DIMMs') self.log.info('Available DIMMs: %s', vals) @avocado.fail_on(pmem.PMemException) def test_regions(self): """ Test the regions info """ self.plib.disable_region() old = self.plib.run_ndctl_list('-R') self.plib.enable_region() new = self.plib.run_ndctl_list('-R') if len(new) <= len(old): self.fail('Failed to fetch regions') self.log.info('Available regions: %s', new) @avocado.fail_on(pmem.PMemException) def test_namespace(self): """ Test namespace """ self.plib.enable_region() regions = self.plib.run_ndctl_list('-R') for val in regions: region = self.plib.run_ndctl_list_val(val, 'dev') self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region) namespaces = self.plib.run_ndctl_list('-N') self.log.info('Created namespace %s', namespaces) @avocado.fail_on(pmem.PMemException) def test_disable_enable_ns(self): """ Test enable disable namespace """ region = self.get_default_region() if (not self.plib.is_region_legacy(region)): for _ in range(0, 3): self.plib.create_namespace(region=region, size='128M') namespaces = self.plib.run_ndctl_list('-N') ns_names = [] for ns in namespaces: ns_names.append(self.plib.run_ndctl_list_val(ns, 'dev')) ns_names.append('all') for namespace in ns_names: self.plib.disable_namespace(namespace=namespace) self.plib.enable_namespace(namespace=namespace) @avocado.fail_on(pmem.PMemException) def test_namespace_modes(self): """ Create different namespace types """ failed_modes = [] region = self.get_default_region() self.log.info("Using %s for different namespace modes", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) for mode in self.modes: self.plib.create_namespace(region=region, mode=mode) ns_json = self.plib.run_ndctl_list()[0] created_mode = self.plib.run_ndctl_list_val(ns_json, 'mode') if mode != created_mode: failed_modes.append(mode) self.log.error("Expected mode %s, Got %s", mode, created_mode) else: self.log.info("Namespace with %s mode: %s", mode, ns_json) ns_name = self.plib.run_ndctl_list_val(ns_json, 'dev') self.plib.disable_namespace(namespace=ns_name, region=region) self.plib.destroy_namespace(namespace=ns_name, region=region) if failed_modes: self.fail("Namespace for %s mode failed!" % failed_modes) @avocado.fail_on(pmem.PMemException) def test_namespace_devmap(self): """ Test metadata device mapping option with a namespace """ region = self.get_default_region() m_map = self.params.get('map', default='mem') size_align = self.get_size_alignval() # Size input in MB namespace_size = int(self.params.get('size', default=128) * 1048576) if namespace_size and (namespace_size % size_align): self.cancel("Size value %s not %s aligned \n" % (namespace_size, size_align)) self.log.info("Using %s for checking device mapping", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode=self.mode_to_use, memmap=m_map, size='%s' % namespace_size) self.log.info("Validating device mapping") map_val = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s -N' % region)[0], 'map') if map_val != m_map: self.fail("Expected map:%s, Got %s" % (m_map, map_val)) else: self.log.info("Metadata mapped as expected") def multiple_namespaces_region(self, region): """ Test multiple namespace with single region """ namespace_size = self.params.get('size', default=None) size_align = self.get_size_alignval() slot_count = self.plib.get_slot_count(region) self.log.info("Using %s for muliple namespace regions", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) if namespace_size and ((namespace_size % size_align) != 0): self.cancel("Size value not %d aligned %d \n", size_align, namespace_size) region_size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s' % region)[0], 'size') if not namespace_size: namespace_size = region_size // slot_count # Now align the namespace size namespace_size = (namespace_size // size_align) * size_align else: slot_count = region_size // namespace_size self.log.info("Creating %s namespaces", slot_count) for count in range(0, slot_count): self.plib.create_namespace(region=region, mode=self.mode_to_use, size=namespace_size) self.log.info("Namespace %s created", count + 1) @avocado.fail_on(pmem.PMemException) def test_multiple_namespaces_region(self): """ Test multiple namespace with single region """ region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") self.multiple_namespaces_region(region) @avocado.fail_on(pmem.PMemException) def test_multiple_ns_multiple_region(self): """ Test multiple namespace with multiple region """ self.plib.enable_region() if len(self.plib.run_ndctl_list('-R')) <= 1: self.cancel("Test not applicable without multiple regions") regions = self.plib.run_ndctl_list('-R') self.plib.disable_namespace() self.plib.destroy_namespace() for val in regions: region = self.plib.run_ndctl_list_val(val, 'dev') if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") self.multiple_namespaces_region(region) @avocado.fail_on(pmem.PMemException) def test_multiple_ns_modes_region(self): """ Test multiple namespace modes with single region """ region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") self.log.info("Using %s for muliple namespace regions", region) self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s' % region)[0], 'size') if size < (len(self.modes) * 64 * 1024 * 1024): self.cancel('Not enough memory to create namespaces') for mode in self.modes: self.plib.create_namespace(region=region, mode=mode, size='64M') self.log.info("Namespace of type %s created", mode) @avocado.fail_on(pmem.PMemException) def test_nslot_namespace(self): """ Test max namespace with nslot value """ region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") size_align = self.get_size_alignval() slot_count = self.plib.get_slot_count(region) self.log.info("Using %s for max namespace creation", region) self.plib.disable_namespace() self.plib.destroy_namespace() region_size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-r %s' % region)[0], 'size') namespace_size = region_size // slot_count # Now align the namespace size namespace_size = (namespace_size // size_align) * size_align self.log.info("Creating %s namespace", slot_count) for count in range(0, slot_count): self.plib.create_namespace(region=region, mode='fsdax', size=namespace_size) self.log.info("Namespace %s created", count) @avocado.fail_on(pmem.PMemException) def test_namespace_reconfigure(self): """ Test namespace reconfiguration """ region = self.get_default_region() self.log.info("Using %s for reconfiguring namespace", region) self.plib.disable_namespace() self.plib.destroy_namespace() self.plib.create_namespace(region=region, mode='fsdax', align='64k') old_ns = self.plib.run_ndctl_list()[0] old_ns_dev = self.plib.run_ndctl_list_val(old_ns, 'dev') self.log.info("Re-configuring namespace %s", old_ns_dev) self.plib.create_namespace(region=region, mode='fsdax', name='test_ns', reconfig=old_ns_dev, force=True) new_ns = self.plib.run_ndctl_list()[0] self.log.info("Checking namespace changes") failed_vals = [] for key, val in new_ns.items(): if key in list(set(old_ns.keys()) - set(['uuid', 'dev'])): if old_ns[key] != val: failed_vals.append({key: val}) else: self.log.info("Newly added filed %s:%s", key, val) if failed_vals: self.fail("New namespace unexpected change(s): %s" % failed_vals) @avocado.fail_on(pmem.PMemException) def test_check_namespace(self): """ Verify metadata for sector mode namespaces """ region = self.get_default_region() self.plib.disable_namespace() self.plib.destroy_namespace() self.log.info("Creating sector namespace using %s", region) self.plib.create_namespace(region=region, mode='sector') ns_sec_dev = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list()[0], 'dev') self.plib.disable_namespace(namespace=ns_sec_dev) self.log.info("Checking BTT metadata") if process.system("%s check-namespace %s" % (self.ndctl, ns_sec_dev), ignore_status=True): self.fail("Failed to check namespace metadata") @avocado.fail_on(pmem.PMemException) def test_check_numa(self): self.plib.enable_region() regions = self.plib.run_ndctl_list('-R') if not os.path.exists('/sys/bus/nd/devices/region0/numa_node'): self.fail("Numa node entries not found!") for val in regions: reg = self.plib.run_ndctl_list_val(val, 'dev') numa = genio.read_one_line('/sys/bus/nd/devices/%s/numa_node' % reg) # Check numa config in ndctl and sys interface if len(self.plib.run_ndctl_list('-r %s -R -U %s' % (reg, numa))) != 1: self.fail('Region mismatch between ndctl and sys interface') @avocado.fail_on(pmem.PMemException) def test_check_ns_numa(self): self.plib.enable_region() regions = self.plib.run_ndctl_list('-R') for dev in regions: region = self.plib.run_ndctl_list_val(dev, 'dev') if not self.plib.is_region_legacy(region): self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) for _ in range(3): self.plib.create_namespace(region=region, mode='fsdax', size='128M') namespaces = self.plib.run_ndctl_list('-N -r %s' % region) if not os.path.exists( '/sys/bus/nd/devices/namespace0.0/numa_node'): self.fail("Numa node entries not found!") for val in namespaces: ns_name = self.plib.run_ndctl_list_val(val, 'dev') numa = genio.read_one_line('/sys/bus/nd/devices/%s/numa_node' % ns_name) # Check numa config in ndctl and sys interface if len( self.plib.run_ndctl_list('-N -n %s -U %s' % (ns_name, numa))) != 1: self.fail('Numa mismatch between ndctl and sys interface') @avocado.fail_on(pmem.PMemException) def test_label_read_write(self): region = self.get_default_region() if (self.plib.is_region_legacy(region)): self.cancel("Legacy config skipping the test") nmem = "nmem%s" % re.findall(r'\d+', region)[0] self.log.info("Using %s for testing labels", region) self.plib.disable_region(name=region) self.log.info("Filling zeros to start test") if process.system('%s zero-labels %s' % (self.ndctl, nmem), shell=True): self.fail("Label zero-fill failed") self.plib.enable_region(name=region) self.plib.create_namespace(region=region) self.log.info("Storing labels with a namespace") old_op = process.system_output('%s check-labels %s' % (self.ndctl, nmem), shell=True) if process.system('%s read-labels %s -o output' % (self.ndctl, nmem), shell=True): self.fail("Label read failed") self.log.info("Refilling zeroes before a restore") self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.disable_region(name=region) if process.system('%s zero-labels %s' % (self.ndctl, nmem), shell=True): self.fail("Label zero-fill failed after read") self.log.info("Re-storing labels with a namespace") if process.system('%s write-labels %s -i output' % (self.ndctl, nmem), shell=True): self.fail("Label write failed") self.plib.enable_region(name=region) self.log.info("Checking mismatch after restore") new_op = process.system_output('%s check-labels %s' % (self.ndctl, nmem), shell=True) if new_op != old_op: self.fail("Label read and write mismatch") self.log.info("Checking created namespace after restore") if len(self.plib.run_ndctl_list('-N -r %s' % region)) != 1: self.fail("Created namespace not found after label restore") @avocado.fail_on(pmem.PMemException) def test_daxctl_list(self): """ Test daxctl list """ region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='devdax') index = re.findall(r'\d+', region)[0] vals = self.plib.run_daxctl_list('-r %s' % (index)) if len(vals) != 1: self.fail('Failed daxctl list') self.log.info('Created dax device %s', vals) @avocado.fail_on(pmem.PMemException) def test_sector_write(self): """ Test write on a sector mode device """ region = self.get_default_region() self.plib.disable_namespace(region=region) self.plib.destroy_namespace(region=region) self.plib.create_namespace(region=region, mode='sector', sector_size='512') self.disk = '/dev/%s' % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'blockdev') size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'size') self.part = partition.Partition(self.disk) self.part.mkfs(fstype='xfs', args='-b size=%s -s size=512' % memory.get_page_size()) mnt_path = self.params.get('mnt_point', default='/pmemS') if not os.path.exists(mnt_path): os.makedirs(mnt_path) self.part.mount(mountpoint=mnt_path) self.log.info("Test will run on %s", mnt_path) fio_job = self.params.get('fio_job', default='sector-fio.job') cmd = '%s --directory %s --filename mmap-pmem --size %s %s' % ( self.build_fio(), mnt_path, size // 2, self.get_data(fio_job)) if process.system(cmd, ignore_status=True): self.fail("FIO mmap workload on fsdax failed") @avocado.fail_on(pmem.PMemException) def test_fsdax_write(self): """ Test filesystem DAX with a FIO workload """ region = self.get_default_region() self.plib.create_namespace(region=region, mode='fsdax') self.disk = '/dev/%s' % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'blockdev') size = self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'size') self.part = partition.Partition(self.disk) self.part.mkfs(fstype='xfs', args='-b size=%s -s size=512 %s' % (memory.get_page_size(), self.reflink)) mnt_path = self.params.get('mnt_point', default='/pmem') if not os.path.exists(mnt_path): os.makedirs(mnt_path) self.part.mount(mountpoint=mnt_path, args='-o dax') self.log.info("Test will run on %s", mnt_path) fio_job = self.params.get('fio_job', default='ndctl-fio.job') cmd = '%s --directory %s --filename mmap-pmem --size %s %s' % ( self.build_fio(), mnt_path, size // 2, self.get_data(fio_job)) if process.system(cmd, ignore_status=True): self.fail("FIO mmap workload on fsdax failed") @avocado.fail_on(pmem.PMemException) def test_map_sync(self): """ Test MAP_SYNC flag with sample mmap write """ region = self.get_default_region() self.plib.create_namespace(region=region, mode='fsdax') self.disk = '/dev/%s' % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'blockdev') self.part = partition.Partition(self.disk) self.part.mkfs(fstype='xfs', args='-b size=%s -s size=512 %s' % (memory.get_page_size(), self.reflink)) mnt_path = self.params.get('mnt_point', default='/pmem_map') if not os.path.exists(mnt_path): os.makedirs(mnt_path) self.part.mount(mountpoint=mnt_path, args='-o dax') self.log.info("Testing MAP_SYNC on %s", mnt_path) src_file = os.path.join(self.teststmpdir, 'map_sync.c') shutil.copyfile(self.get_data('map_sync.c'), src_file) process.system('gcc %s -o map_sync' % src_file) process.system('fallocate -l 64k %s/new_file' % mnt_path) if process.system('./map_sync %s/new_file' % mnt_path, ignore_status=True): self.fail('Write with MAP_SYNC flag failed') @avocado.fail_on(pmem.PMemException) def test_devdax_write(self): """ Test device DAX with a daxio binary """ region = self.get_default_region() self.plib.create_namespace(region=region, mode='devdax') daxdev = "/dev/%s" % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list("-N -r %s" % region)[0], 'chardev') if process.system("%s -b no -i /dev/urandom -o %s" % (self.get_data("daxio.static"), daxdev), ignore_status=True): self.fail("DAXIO write on devdax failed") @avocado.fail_on(pmem.PMemException) def tearDown(self): if self.part: self.part.unmount() if self.disk: self.log.info("Removing the FS meta created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=1M count=1024 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s" % self.disk) if not self.preserve_setup: if self.plib.run_ndctl_list('-N'): self.plib.destroy_namespace(force=True) self.plib.disable_region()
def setUp(self): """ Setup and install dependencies for the test. """ self.test_name = "ucmatose" self.basic = self.params.get("basic_option", default="None") self.ext = self.params.get("ext_option", default="None") self.flag = self.params.get("ext_flag", default="0") if self.basic == "None" and self.ext == "None": self.cancel("No option given") if self.flag == "1" and self.ext != "None": self.option = self.ext else: self.option = self.basic if process.system("ibstat", shell=True, ignore_status=True) != 0: self.cancel("MOFED is not installed. Skipping") detected_distro = distro.detect() pkgs = [] smm = SoftwareManager() if detected_distro.name == "Ubuntu": pkgs.extend(["openssh-client", "iputils-ping"]) elif detected_distro.name == "SuSE": pkgs.extend(["openssh", "iputils"]) else: pkgs.extend(["openssh-clients", "iputils"]) for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Not able to install %s" % pkg) interfaces = netifaces.interfaces() self.flag = self.params.get("ext_flag", default="0") self.iface = self.params.get("interface", default="") self.peer_ip = self.params.get("peer_ip", default="") self.peer_user = self.params.get("peer_user_name", default="root") self.peer_password = self.params.get("peer_password", '*', default="passw0rd") self.peer_login(self.peer_ip, self.peer_user, self.peer_password) if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) if self.peer_ip == "": self.cancel("%s peer machine is not available" % self.peer_ip) self.timeout = "2m" self.local_ip = netifaces.ifaddresses(self.iface)[AF_INET][0]['addr'] if detected_distro.name == "Ubuntu": cmd = "service ufw stop" # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif detected_distro.name in ['rhel', 'fedora', 'redhat']: cmd = "systemctl stop firewalld" elif detected_distro.name == "SuSE": if detected_distro.version == 15: cmd = "systemctl stop firewalld" else: cmd = "rcSuSEfirewall2 stop" elif detected_distro.name == "centos": cmd = "service iptables stop" else: self.cancel("Distro not supported") if process.system(cmd, ignore_status=True, shell=True) != 0: self.cancel("Unable to disable firewall") output, exitcode = self.run_command(cmd) if exitcode != 0: self.cancel("Unable to disable firewall on peer")
def setUp(self): ''' To check and install dependencies for the test ''' detected_distro = distro.detect() smm = SoftwareManager() depends = [] # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon if detected_distro.name == "Ubuntu": depends.extend(["openssh-client", "iputils-ping"]) elif detected_distro.name in ["rhel", "fedora", "centos", "redhat"]: depends.extend(["openssh-clients", "iputils"]) else: depends.extend(["openssh", "iputils"]) for pkg in depends: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) self.mode = self.params.get("bonding_mode", default="") if 'setup' in str(self.name) or 'run' in str(self.name): if not self.mode: self.cancel("test skipped because mode not specified") interfaces = netifaces.interfaces() self.peer_public_ip = self.params.get("peer_public_ip", default="") self.user = self.params.get("user_name", default="root") self.password = self.params.get("peer_password", '*', default="None") self.host_interfaces = self.params.get("bond_interfaces", default="").split(" ") if not self.host_interfaces: self.cancel("user should specify host interfaces") self.peer_interfaces = self.params.get("peer_interfaces", default="").split(" ") for self.host_interface in self.host_interfaces: if self.host_interface not in interfaces: self.cancel("interface is not available") self.peer_first_ipinterface = self.params.get("peer_ip", default="") if not self.peer_interfaces or self.peer_first_ipinterface == "": self.cancel("peer machine should available") self.ipaddr = self.params.get("host_ips", default="").split(" ") self.netmask = self.params.get("netmask", default="") self.localhost = LocalHost() if 'setup' in str(self.name.name): for ipaddr, interface in zip(self.ipaddr, self.host_interfaces): networkinterface = NetworkInterface(interface, self.localhost) try: networkinterface.add_ipaddr(ipaddr, self.netmask) networkinterface.save(ipaddr, self.netmask) except Exception: networkinterface.save(ipaddr, self.netmask) networkinterface.bring_up() self.miimon = self.params.get("miimon", default="100") self.fail_over_mac = self.params.get("fail_over_mac", default="2") self.downdelay = self.params.get("downdelay", default="0") self.bond_name = self.params.get("bond_name", default="tempbond") self.net_path = "/sys/class/net/" self.bond_status = "/proc/net/bonding/%s" % self.bond_name self.bond_dir = os.path.join(self.net_path, self.bond_name) self.bonding_slave_file = "%s/bonding/slaves" % self.bond_dir self.bonding_masters_file = "%s/bonding_masters" % self.net_path self.peer_bond_needed = self.params.get("peer_bond_needed", default=False) self.peer_wait_time = self.params.get("peer_wait_time", default=5) self.sleep_time = int(self.params.get("sleep_time", default=5)) self.mtu = self.params.get("mtu", default=1500) self.ib = False if self.host_interface[0:2] == 'ib': self.ib = True self.log.info("Bond Test on IB Interface? = %s", self.ib) self.session = Session(self.peer_first_ipinterface, user=self.user, password=self.password) self.setup_ip() self.err = [] self.remotehost = RemoteHost(self.peer_first_ipinterface, self.user, password=self.password) self.remotehost_public = RemoteHost(self.peer_public_ip, self.user, password=self.password) if 'setup' in str(self.name.name): for interface in self.peer_interfaces: peer_networkinterface = NetworkInterface( interface, self.remotehost) if peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") for host_interface in self.host_interfaces: self.networkinterface = NetworkInterface( host_interface, self.localhost) if self.networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in host")
def setUp(self): ''' To check and install dependencies for the test ''' smm = SoftwareManager() detected_distro = distro.detect() pkgs = ["gcc"] if detected_distro.name == "Ubuntu": pkgs.append('openssh-client') elif detected_distro.name == "SuSE": pkgs.append('openssh') else: pkgs.append('openssh-clients') for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) interfaces = netifaces.interfaces() self.iface = self.params.get("interface", default="") self.peer_ip = self.params.get("peer_ip", default="") if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) if self.peer_ip == "": self.cancel("%s peer machine is not available" % self.peer_ip) self.tmo = self.params.get("TIMEOUT", default="600") self.iperf_run = self.params.get("IPERF_RUN", default="0") self.netserver_run = self.params.get("NETSERVER_RUN", default="0") self.iper = os.path.join(self.teststmpdir, 'iperf') self.netperf = os.path.join(self.teststmpdir, 'netperf') if detected_distro.name == "Ubuntu": cmd = "service ufw stop" # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif detected_distro.name in ['rhel', 'fedora', 'redhat']: cmd = "systemctl stop firewalld" elif detected_distro.name == "SuSE": cmd = "rcSuSEfirewall2 stop" elif detected_distro.name == "centos": cmd = "service iptables stop" else: self.cancel("Distro not supported") if process.system("%s && ssh %s %s" % (cmd, self.peer_ip, cmd), ignore_status=True, shell=True) != 0: self.cancel("Unable to disable firewall") tarball = self.fetch_asset( 'ftp://ftp.netperf.org/netperf/' 'netperf-2.7.0.tar.bz2', expire='7d') archive.extract(tarball, self.netperf) version = os.path.basename(tarball.split('.tar.')[0]) self.neperf = os.path.join(self.netperf, version) tmp = "scp -r %s root@%s:" % (self.neperf, self.peer_ip) if process.system(tmp, shell=True, ignore_status=True) != 0: self.cancel("unable to copy the netperf into peer machine") tmp = "cd /root/netperf-2.7.0;./configure ppc64le;make" cmd = "ssh %s \"%s\"" % (self.peer_ip, tmp) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("test failed because command failed in peer machine") time.sleep(5) os.chdir(self.neperf) process.system('./configure ppc64le', shell=True) build.make(self.neperf) self.perf = os.path.join(self.neperf, 'src') time.sleep(5) tarball = self.fetch_asset( 'iperf.zip', locations=['https://github.com/esnet/iperf/archive/master.zip'], expire='7d') archive.extract(tarball, self.iper) self.ipe = os.path.join(self.iper, 'iperf-master') tmp = "scp -r %s root@%s:" % (self.ipe, self.peer_ip) if process.system(tmp, shell=True, ignore_status=True) != 0: self.cancel("unable to copy the iperf into peer machine") tmp = "cd /root/iperf-master;./configure;make" cmd = "ssh %s \"%s\"" % (self.peer_ip, tmp) if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("test failed because command failed in peer machine") time.sleep(5) os.chdir(self.ipe) process.system('./configure', shell=True) build.make(self.ipe) self.iperf = os.path.join(self.ipe, 'src')
def setUp(self): ''' To check and install dependencies for the test ''' interfaces = netifaces.interfaces() self.flag = self.params.get("ext_flag", default="0") self.iface = self.params.get("interface", default="") self.peer_ip = self.params.get("peer_ip", default="") if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) if self.peer_ip == "": self.cancel("%s peer machine is not available" % self.peer_ip) self.ca_name = self.params.get("CA_NAME", default="mlx4_0") self.gid = int(self.params.get("GID_NUM", default="0")) self.port = int(self.params.get("PORT_NUM", default="1")) self.peer_ca = self.params.get("PEERCA", default="mlx4_0") self.peer_gid = int(self.params.get("PEERGID", default="0")) self.peer_port = int(self.params.get("PEERPORT", default="1")) self.tmo = self.params.get("TIMEOUT", default="120") smm = SoftwareManager() detected_distro = distro.detect() pkgs = [] if detected_distro.name == "Ubuntu": pkgs.extend(["ibverbs-utils", 'openssh-client']) cmd = "service ufw stop" # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif detected_distro.name in ['rhel', 'fedora', 'redhat']: pkgs.extend(["libibverbs", 'openssh-clients']) cmd = "systemctl stop firewalld" elif detected_distro.name == "SuSE": pkgs.append('openssh') if detected_distro.version == 15: cmd = "systemctl stop firewalld" else: cmd = "rcSuSEfirewall2 stop" elif detected_distro.name == "centos": pkgs.extend(['libibverbs', 'openssh-clients']) cmd = "service iptables stop" else: self.cancel("Distro not supported") if process.system("%s && ssh %s %s" % (cmd, self.peer_ip, cmd), ignore_status=True, shell=True) != 0: self.cancel("Unable to disable firewall") for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) if process.system("ibstat", shell=True, ignore_status=True) != 0: self.cancel("infiniband adaptors not available") self.tool_name = self.params.get("tool") self.log.info("test with %s", self.tool_name) self.peer_iface = '' cmd = "ssh %s \"ip addr show\"" % self.peer_ip output = process.system_output(cmd, shell=True).strip() for line in output.splitlines(): if self.peer_ip in line: self.peer_iface = line.split()[-1] break
def setUp(self): ''' Build lmbench Source: http://www.bitmover.com/lmbench/lmbench3.tar.gz ''' fsdir = self.params.get('fsdir', default=None) temp_file = self.params.get('temp_file', default=None) memory_size_mb = self.params.get('MB', default=125) self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) smm = SoftwareManager() for package in ['gcc', 'make', 'patch']: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) tarball = self.fetch_asset('http://www.bitmover.com' '/lmbench/lmbench3.tar.gz') archive.extract(tarball, self.workdir) version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.workdir, version) # Patch for lmbench os.chdir(self.sourcedir) makefile_patch = 'patch -p1 < %s' % self.get_data('makefile.patch') build_patch = 'patch -p1 < %s' % self.get_data( '0001-Fix-build-issues-with-lmbench.patch') lmbench_fix_patch = 'patch -p1 < %s' % self.get_data( '0002-Changing-shebangs-on-lmbench-scripts.patch') ostype_fix_patch = 'patch -p1 < %s' % self.get_data( 'fix_add_os_type.patch') process.run(makefile_patch, shell=True) process.run(build_patch, shell=True) process.run(lmbench_fix_patch, shell=True) process.run(ostype_fix_patch, shell=True) build.make(self.sourcedir) # configure lmbench os.chdir(self.sourcedir) process.system('yes "" | make config', shell=True, ignore_status=True) # find the lmbench config file output = os.popen('ls -1 bin/*/CONFIG*').read() config_files = output.splitlines() if len(config_files) != 1: self.error('Config not found : % s' % config_files) config_file = config_files[0] if not fsdir: fsdir = self.tmpdir if not temp_file: temp_file = os.path.join(self.tmpdir, 'XXX') # patch the resulted config to use the proper temporary directory and # file locations with open(config_file, "r+") as cfg_file: lines = cfg_file.readlines() cfg_file.seek(0) cfg_file.truncate() for line in lines: if line.startswith("FSDIR="): cfg_file.write("FSDIR=%s\n" % fsdir) elif line.startswith("FILE="): cfg_file.write("FILE=%s\n" % temp_file) elif line.startswith("MB="): cfg_file.write("MB=%s\n" % memory_size_mb) else: cfg_file.write(line) # Printing the config file cfg_file.seek(0) for line in cfg_file.readlines(): print(line)
def setUp(self): """ Install all the required dependencies Building source tarball requires packages specific to os that needs to be installed, if not installed test will stop. """ smm = SoftwareManager() dist = distro.detect() dist_name = dist.name.lower() packages = [ 'gcc', 'wget', 'autoconf', 'automake', 'dejagnu', 'binutils', 'patch' ] if dist_name == 'suse': packages.extend([ 'libdw-devel', 'libelf-devel', 'git-core', 'elfutils', 'binutils-devel', 'libtool', 'gcc-c++' ]) # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif dist_name in ("rhel", "fedora", "redhat"): packages.extend([ 'elfutils-devel', 'elfutils-libelf-devel', 'git', 'elfutils-libelf', 'elfutils-libs', 'libtool-ltdl' ]) elif dist_name == 'ubuntu': packages.extend([ 'elfutils', 'libelf-dev', 'libtool', 'git', 'libelf1', 'librpmbuild3', 'binutils-dev' ]) else: self.cancel("Unsupported OS!") for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("Fail to install %s required for this test." % package) run_type = self.params.get("type", default="upstream") if run_type == "upstream": source = self.params.get('url', default="https://gitlab.com/" "cespedes/ltrace.git") git.get_repo(source, destination_dir=os.path.join(self.workdir, 'ltrace')) self.src_lt = os.path.join(self.workdir, "ltrace") os.chdir(self.src_lt) process.run('patch -p1 < %s' % self.get_data('ltrace.patch'), shell=True) elif run_type == "distro": self.src_lt = os.path.join(self.workdir, "ltrace-distro") if not os.path.exists(self.src_lt): self.src_lt = smm.get_source("ltrace", self.src_lt) os.chdir(self.src_lt) process.run('./autogen.sh') process.run('./configure') build.make(self.src_lt)
def setUp(self): """ Resolve the packages dependencies and download the source. """ smg = SoftwareManager() self.comp = self.params.get('comp', default='') run_type = self.params.get('type', default='upstream') if self.comp: self.comp = '-C %s' % self.comp detected_distro = distro.detect() deps = ['gcc', 'make', 'automake', 'autoconf'] if 'Ubuntu' in detected_distro.name: deps.extend(['libpopt0', 'libc6', 'libc6-dev', 'libcap-dev', 'libpopt-dev', 'libcap-ng0', 'libcap-ng-dev', 'libnuma-dev', 'libfuse-dev', 'elfutils', 'libelf1']) elif 'SuSE' in detected_distro.name: deps.extend(['popt', 'glibc', 'glibc-devel', 'popt-devel', 'sudo', 'libcap2', 'libcap-devel', 'libcap-ng-devel', 'fuse', 'fuse-devel', 'glibc-devel-static']) # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif detected_distro.name in ['centos', 'fedora', 'rhel', 'redhat']: deps.extend(['popt', 'glibc', 'glibc-devel', 'glibc-static', 'libcap-ng', 'libcap', 'libcap-devel', 'fuse-devel', 'libcap-ng-devel']) for package in deps: if not smg.check_installed(package) and not smg.install(package): self.cancel( "Fail to install %s required for this test." % (package)) if run_type == 'upstream': location = ["https://github.com/torvalds/linux/archive/master.zip"] tarball = self.fetch_asset("kselftest.zip", locations=location, expire='1d') archive.extract(tarball, self.workdir) self.buldir = os.path.join(self.workdir, 'linux-master') else: # Make sure kernel source repo is configured if detected_distro.name in ['centos', 'fedora', 'rhel']: self.buldir = smg.get_source('kernel', self.workdir) self.buldir = os.path.join( self.buldir, os.listdir(self.buldir)[0]) elif 'Ubuntu' in detected_distro.name: self.buldir = smg.get_source('linux', self.workdir) elif 'SuSE' in detected_distro.name: smg.get_source('kernel-source', self.workdir) packages = '/usr/src/packages/' os.chdir(os.path.join(packages, 'SOURCES')) process.system('./mkspec', ignore_status=True) shutil.copy(os.path.join(packages, 'SOURCES/kernel' '-default.spec'), os.path.join(packages, 'SPECS/kernel' '-default.spec')) self.buldir = smg.prepare_source(os.path.join( packages, 'SPECS/kernel' '-default.spec'), dest_path=self.teststmpdir) for l_dir in glob.glob(os.path.join(self.buldir, 'linux*')): if os.path.isdir(l_dir) and 'Makefile' in os.listdir(l_dir): self.buldir = os.path.join( self.buldir, os.listdir(self.buldir)[0]) self.sourcedir = os.path.join(self.buldir, self.testdir) result = build.run_make(self.sourcedir) for line in str(result).splitlines(): if 'ERROR' in line: self.fail("Compilation failed, Please check the build logs !!")
def setUp(self): """ Install all the dependency packages required for building source tarball specific to os,if not tests will stop. """ smm = SoftwareManager() dist = distro.detect() packages = ['gcc', 'dejagnu', 'flex', 'bison', 'sharutils'] if dist.name in ['Ubuntu', 'debian']: packages.extend([ 'libmpfr-dev', 'libgmp-dev', 'libmpc-dev', 'zip', 'libc6-dev', 'libelf1', 'elfutils', 'autogen' ]) if dist.name == 'Ubuntu': packages.extend(['texinfo', 'gnat']) elif dist.name.lower() == 'suse': packages.extend([ 'glibc-devel-static', 'zlib-devel', 'elfutils', 'libelf-devel', 'gcc-c++', 'isl-devel', 'gmp-devel', 'glibc-devel', 'mpfr-devel', 'makeinfo', 'texinfo', 'mpc-devel' ]) if (int(dist.version) == 15 and int(dist.release) > 3): packages.remove('isl-devel') else: packages.extend([ 'glibc-static', 'elfutils-devel', 'texinfo-tex', 'texinfo', 'elfutils-libelf-devel', 'gmp-devel', 'mpfr-devel', 'libmpc-devel', 'zlib-devel', 'gettext', 'libgcc', 'libgomp', 'dblatex', 'doxygen', 'texlive-collection-latex', 'python3-sphinx', 'systemtap-sdt-devel' ]) if dist.name == 'rhel' and \ (int(dist.version) == 8 and int(dist.release) >= 6): packages.extend([ 'autogen', 'guile', 'guile-devel', 'isl-devel', 'docbook5-style-xsl' ]) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("Failed to install %s required for this test." % package) run_type = self.params.get('type', default='distro') if run_type == "upstream": url = 'https://github.com/gcc-mirror/gcc/archive/master.zip' tarball = self.fetch_asset('gcc.zip', locations=[url], expire='7d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, 'gcc-master') elif run_type == "distro": self.sourcedir = os.path.join(self.workdir, 'gcc-distro') if not os.path.exists(self.sourcedir): os.makedirs(self.sourcedir) """ FIXME. On certain distros I have observed get_source() API fails to populate source tree. This can be an avocado utils issue. Explicitly fail this testcase until it has been root caused. """ if (int(dist.version) == 15 and int(dist.release) > 3): self.fail('Test case is broken for this release') else: self.sourcedir = smm.get_source("gcc", self.sourcedir) os.chdir(self.sourcedir) process.run('./configure', ignore_status=True, sudo=True) build.make(self.sourcedir, ignore_status=True)
def setUp(self): """ Set up. """ self.policy = self.params.get('policy', default='service-time') self.policies = ["service-time", "round-robin", "queue-length"] # We will remove and add the policy back, so that this becomes # the last member of the list. This is done so that in the # policy change test later, this policy is set in the last # iteration. self.policies.remove(self.policy) self.policies.append(self.policy) self.op_shot_sleep_time = 60 self.op_long_sleep_time = 180 # Install needed packages dist = distro.detect() pkg_name = "" svc_name = "" if dist.name in ['Ubuntu', 'debian']: pkg_name += "multipath-tools" svc_name = "multipath-tools" elif dist.name == 'SuSE': pkg_name += "multipath-tools" svc_name = "multipathd" else: pkg_name += "device-mapper-multipath" svc_name = "multipathd" smm = SoftwareManager() if not smm.check_installed(pkg_name) and not smm.install(pkg_name): self.cancel("Can not install %s" % pkg_name) # Check if given multipath devices are present in system self.wwids = self.params.get('wwids', default='').split(',') system_wwids = multipath.get_multipath_wwids() wwids_to_remove = [] for wwid in self.wwids: if wwid not in system_wwids: self.log.info("%s not present in the system", wwid) wwids_to_remove.append(wwid) for wwid in wwids_to_remove: self.wwids.remove(wwid) if self.wwids == ['']: self.cancel("No Multipath Devices Given") # Create service object self.mpath_svc = service.SpecificServiceManager(svc_name) self.mpath_svc.restart() wait.wait_for(self.mpath_svc.status, timeout=10) # Take a backup of current config file self.mpath_file = "/etc/multipath.conf" if os.path.isfile(self.mpath_file): shutil.copyfile(self.mpath_file, "%s.bkp" % self.mpath_file) self.mpath_list = [] # Find all details of multipath devices for wwid in self.wwids: if wwid not in process.system_output('multipath -ll', ignore_status=True, shell=True).decode('utf-8'): continue self.mpath_dic = {} self.mpath_dic["wwid"] = wwid self.mpath_dic["name"] = multipath.get_mpath_name(wwid) self.mpath_dic["paths"] = multipath.get_paths(wwid) self.mpath_dic["policy"] = multipath.get_policy(wwid) self.mpath_dic["size"] = multipath.get_size(wwid) self.mpath_list.append(self.mpath_dic) pprint(self.mpath_list)
def setUp(self): """ Use distro provided bonnie++ bin if not available Build bonnie++ from below Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-1.03e.tgz """ fstype = self.params.get('fs', default='ext4') smm = SoftwareManager() # Install the package from web deps = ['gcc', 'make'] if distro.detect().name == 'Ubuntu': deps.extend(['g++']) else: deps.extend(['gcc-c++']) if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs not supported with RHEL 7.4 onwards") elif distro.detect().name == 'Ubuntu': deps.extend(['btrfs-tools']) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test" % package) if process.system("which bonnie++", ignore_status=True): tarball = self.fetch_asset( 'http://www.coker.com.au/bonnie++/' 'bonnie++-1.03e.tgz', expire='7d') archive.extract(tarball, self.teststmpdir) self.source = os.path.join( self.teststmpdir, os.path.basename(tarball.split('.tgz')[0])) os.chdir(self.source) process.run('./configure') build.make(self.source) build.make(self.source, extra_args='install') self.disk = self.params.get('disk', default=None) self.scratch_dir = self.params.get('dir', default=self.workdir) self.uid_to_use = self.params.get('uid-to-use', default=getpass.getuser()) self.number_to_stat = self.params.get('number-to-stat', default=2048) self.data_size = self.params.get('data_size_to_pass', default=0) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.scratch_dir) self.log.info("Test will run on %s", self.scratch_dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating %s file system on %s disk", fstype, self.disk) self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.scratch_dir) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.scratch_dir))
def run(test, params, env): """ Test different hmi injections with guest :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def set_condn(action, recover=False): """ Set/reset guest state/action :param action: Guest state change/action :param recover: whether to recover given state default: False """ if not recover: if action == "pin_vcpu": for i in range(cur_vcpu): virsh.vcpupin(vm_name, i, hmi_cpu, "--live", ignore_status=False, debug=True) virsh.emulatorpin(vm_name, hmi_cpu, "live", ignore_status=False, debug=True) elif action == "filetrans": utils_test.run_file_transfer(test, params, env) elif action == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) time.sleep(10) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) os.remove(save_file) elif action == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) time.sleep(10) result = virsh.resume(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) return host_version = params.get("host_version") guest_version = params.get("guest_version", "") max_vcpu = int(params.get("ppchmi_vcpu_max", '1')) cur_vcpu = int(params.get("ppchmi_vcpu_cur", "1")) cores = int(params.get("ppchmi_cores", '1')) sockets = int(params.get("ppchmi_sockets", '1')) threads = int(params.get("ppchmi_threads", '1')) status_error = "yes" == params.get("status_error", "no") condition = params.get("condn", "") inject_code = params.get("inject_code", "") scom_base = params.get("scom_base", "") hmi_name = params.get("hmi_name", "") hmi_iterations = int(params.get("hmi_iterations", 1)) if host_version not in cpu.get_cpu_arch(): test.cancel("Unsupported Host cpu version") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sm = SoftwareManager() if not sm.check_installed("opal-utils") and not sm.install("opal-utils"): test.cancel("opal-utils package install failed") cpus_list = cpu.cpu_online_list() cpu_idle_state = cpu.get_cpuidle_state() cpu.set_cpuidle_state() # Lets use second available host cpu hmi_cpu = cpus_list[1] pir = int( open('/sys/devices/system/cpu/cpu%s/pir' % hmi_cpu).read().strip(), 16) if host_version == 'power9': coreid = (((pir) >> 2) & 0x3f) nodeid = (((pir) >> 8) & 0x7f) & 0xf hmi_scom_addr = hex(((coreid & 0x1f + 0x20) << 24) | int(scom_base, 16)) if host_version == 'power8': coreid = (((pir) >> 3) & 0xf) nodeid = (((pir) >> 7) & 0x3f) hmi_scom_addr = hex(((coreid & 0xf) << 24) | int(scom_base, 16)) hmi_cmd = "putscom -c %s %s %s" % (nodeid, hmi_scom_addr, inject_code) vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) org_xml = vmxml.copy() # Destroy the vm vm.destroy() try: session = None bgt = None libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, cur_vcpu, sockets=sockets, cores=cores, threads=threads, add_topology=True) if guest_version: libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version) vm.start() # Lets clear host and guest dmesg process.system("dmesg -C", verbose=False) session = vm.wait_for_login() session.cmd("dmesg -C") # Set condn if "vcpupin" in condition: set_condn("pin_vcpu") if "stress" in condition: utils_test.load_stress("stress_in_vms", params=params, vms=[vm]) if "save" in condition: set_condn("save") if "suspend" in condition: set_condn("suspend") # hmi inject logging.debug("Injecting %s HMI on cpu %s", hmi_name, hmi_cpu) logging.debug("HMI Command: %s", hmi_cmd) process.run(hmi_cmd) # Check host and guest dmesg host_dmesg = process.run("dmesg -c", verbose=False).stdout_text guest_dmesg = session.cmd_output("dmesg") if "Unrecovered" in host_dmesg: test.fail("Unrecovered host hmi\n%s", host_dmesg) else: logging.debug("Host dmesg: %s", host_dmesg) logging.debug("Guest dmesg: %s", guest_dmesg) if "save" in condition: set_condn("save") if "suspend" in condition: set_condn("suspend") finally: if "stress" in condition: utils_test.unload_stress("stress_in_vms", params=params, vms=[vm]) if session: session.close() org_xml.sync() cpu.set_cpuidle_state(setstate=cpu_idle_state)
def setUp(self): # Check for root permission if os.geteuid() != 0: exit("You need to have root privileges to run this script." "\nPlease try again, using 'sudo'. Exiting.") # Check for basic utilities sm = SoftwareManager() detected_distro = distro.detect() deps = ['gcc', 'make', 'patch'] if detected_distro.name == "Ubuntu": deps += ['libpthread-stubs0-dev', 'git'] elif detected_distro.name == "SuSE": deps += ['glibc-devel-static', 'git-core'] else: deps += ['glibc-static', 'git'] for package in deps: if not sm.check_installed(package) and not sm.install(package): self.cancel(' %s is needed for the test to be run' % package) kernel.check_version("2.6.16") if detected_distro.name == "Ubuntu": op = glob.glob("/usr/lib/*/libpthread.a") else: op = glob.glob("/usr/lib*/libpthread.a") if not op: self.error("libpthread.a is required!!!" "\nTry installing glibc-static") # Get arguments: self.hugetlbfs_dir = self.params.get('hugetlbfs_dir', default=None) pages_requested = self.params.get('pages_requested', default=20) # Check hugepages: pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): Hugepages_support = process.system_output('cat /proc/meminfo', verbose=False, shell=True) if 'HugePages_' not in Hugepages_support: self.error("No Hugepages Configured") memory.set_num_huge_pages(pages_requested) pages_available = memory.get_num_huge_pages() else: self.error("Kernel does not support hugepages") # Check no of hugepages : if pages_available < pages_requested: self.error( '%d pages available, < %d pages requested' % pages_available, pages_requested) # Check if hugetlbfs is mounted cmd_result = process.run('grep hugetlbfs /proc/mounts', verbose=False) if not cmd_result: if not self.hugetlbfs_dir: self.hugetlbfs_dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(self.hugetlbfs_dir) process.system('mount -t hugetlbfs none %s' % self.hugetlbfs_dir) data_dir = os.path.abspath(self.datadir) git.get_repo('https://github.com/libhugetlbfs/libhugetlbfs.git', destination_dir=self.srcdir) os.chdir(self.srcdir) patch = self.params.get('patch', default='elflink.patch') process.run('patch -p1 < %s' % data_dir + '/' + patch, shell=True) # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon if detected_distro.name in ["rhel", "fedora", "redhat"]: falloc_patch = 'patch -p1 < %s ' % (os.path.join( data_dir, 'falloc.patch')) process.run(falloc_patch, shell=True) build.make(self.srcdir, extra_args='BUILDTYPE=NATIVEONLY')
def setUp(self): sm = SoftwareManager() for package in ("ppc64-diag", "powerpc-utils", "lsvpd", "ipmitool"): if not sm.check_installed(package) and not sm.install(package): self.cancel("Fail to install %s required for this test." % package)
def setUp(self): """ To check and install dependencies for the test """ self.peer_user = self.params.get("peer_user", default="root") self.peer_public_ip = self.params.get("peer_public_ip", default="") self.peer_ip = self.params.get("peer_ip", default="") self.peer_password = self.params.get("peer_password", '*', default="None") interfaces = netifaces.interfaces() self.iface = self.params.get("interface", default="") if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) self.ipaddr = self.params.get("host_ip", default="") self.netmask = self.params.get("netmask", default="") local = LocalHost() self.networkinterface = NetworkInterface(self.iface, local) try: self.networkinterface.add_ipaddr(self.ipaddr, self.netmask) self.networkinterface.save(self.ipaddr, self.netmask) except Exception: self.networkinterface.save(self.ipaddr, self.netmask) self.networkinterface.bring_up() self.session = Session(self.peer_ip, user=self.peer_user, password=self.peer_password) if not self.session.connect(): self.cancel("failed connecting to peer") smm = SoftwareManager() detected_distro = distro.detect() pkgs = ['gcc'] if detected_distro.name == "Ubuntu": pkgs.append('openssh-client') elif detected_distro.name == "SuSE": pkgs.append('openssh') else: pkgs.append('openssh-clients') for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("%s package is need to test" % pkg) cmd = "%s install %s" % (smm.backend.base_command, pkg) output = self.session.cmd(cmd) if not output.exit_status == 0: self.cancel("unable to install the package %s on peer machine " % pkg) if self.peer_ip == "": self.cancel("%s peer machine is not available" % self.peer_ip) self.timeout = self.params.get("TIMEOUT", default="600") self.mtu = self.params.get("mtu", default=1500) self.remotehost = RemoteHost(self.peer_ip, self.peer_user, password=self.peer_password) self.peer_interface = self.remotehost.get_interface_by_ipaddr(self.peer_ip).name self.peer_networkinterface = NetworkInterface(self.peer_interface, self.remotehost) self.remotehost_public = RemoteHost(self.peer_public_ip, self.peer_user, password=self.peer_password) self.peer_public_networkinterface = NetworkInterface(self.peer_interface, self.remotehost_public) if self.peer_networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in peer") if self.networkinterface.set_mtu(self.mtu) is not None: self.cancel("Failed to set mtu in host") self.netperf_run = str(self.params.get("NETSERVER_RUN", default=0)) self.netperf = os.path.join(self.teststmpdir, 'netperf') netperf_download = self.params.get("netperf_download", default="https:" "//github.com/HewlettPackard/" "netperf/archive/netperf-2.7.0.zip") tarball = self.fetch_asset(netperf_download, expire='7d') archive.extract(tarball, self.netperf) self.version = "%s-%s" % ("netperf", os.path.basename(tarball.split('.zip')[0])) self.neperf = os.path.join(self.netperf, self.version) destination = "%s:/tmp" % self.peer_ip output = self.session.copy_files(self.neperf, destination, recursive=True) if not output: self.cancel("unable to copy the netperf into peer machine") cmd = "cd /tmp/%s;./configure ppc64le;make" % self.version output = self.session.cmd(cmd) if not output.exit_status == 0: self.fail("test failed because command failed in peer machine") os.chdir(self.neperf) process.system('./configure ppc64le', shell=True) build.make(self.neperf) self.perf = os.path.join(self.neperf, 'src', 'netperf') self.expected_tp = self.params.get("EXPECTED_THROUGHPUT", default="90") self.duration = self.params.get("duration", default="300") self.min = self.params.get("minimum_iterations", default="1") self.max = self.params.get("maximum_iterations", default="15") self.option = self.params.get("option", default='')
def setUp(self): """ Setup and install dependencies for the test. """ self.test_name = "mckey" self.basic = self.params.get("basic_option", default="None") self.ext = self.params.get("ext_option", default="None") self.flag = self.params.get("ext_flag", default="0") if self.basic == "None" and self.ext == "None": self.cancel("No option given") if self.flag == "1" and self.ext != "None": self.option = self.ext else: self.option = self.basic if process.system("ibstat", shell=True, ignore_status=True) != 0: self.cancel("MOFED is not installed. Skipping") pkgs = [] detected_distro = distro.detect() if detected_distro.name == "Ubuntu": pkgs.extend(["openssh-client", "iputils-ping"]) elif detected_distro.name == "SuSE": pkgs.extend(["openssh", "iputils"]) else: pkgs.extend(["openssh-clients", "iputils"]) smm = SoftwareManager() for pkg in pkgs: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Not able to install %s" % pkg) interfaces = netifaces.interfaces() self.iface = self.params.get("interface", default="") self.peer_ip = self.params.get("peer_ip", default="") self.peer_user = self.params.get("peer_user_name", default="root") self.peer_password = self.params.get("peer_password", '*', default="None") self.ipaddr = self.params.get("host_ip", default="") self.netmask = self.params.get("netmask", default="") if self.iface[0:2] == 'ib': configure_network.set_ip(self.ipaddr, self.netmask, self.iface, interface_type='Infiniband') else: configure_network.set_ip(self.ipaddr, self.netmask, self.iface, interface_type='Ethernet') self.session = Session(self.peer_ip, user=self.peer_user, password=self.peer_password) if self.iface not in interfaces: self.cancel("%s interface is not available" % self.iface) if self.peer_ip == "": self.cancel("%s peer machine is not available" % self.peer_ip) self.timeout = "2m" self.local_ip = netifaces.ifaddresses(self.iface)[AF_INET][0]['addr'] self.ip_val = self.local_ip.split(".")[-1] self.mtu = self.params.get("mtu", default=1500) self.peerinfo = PeerInfo(self.peer_ip, peer_user=self.peer_user, peer_password=self.peer_password) self.peer_interface = self.peerinfo.get_peer_interface(self.peer_ip) self.option = self.option.replace("PEERIP", self.peer_ip) self.option = self.option.replace("LOCALIP", self.local_ip) self.option = self.option.replace("IPVAL", self.ip_val) self.option_list = self.option.split(",") if detected_distro.name == "Ubuntu": cmd = "service ufw stop" # FIXME: "redhat" as the distro name for RHEL is deprecated # on Avocado versions >= 50.0. This is a temporary compatibility # enabler for older runners, but should be removed soon elif detected_distro.name in ['rhel', 'fedora', 'redhat']: cmd = "systemctl stop firewalld" elif detected_distro.name == "SuSE": if detected_distro.version == 15: cmd = "systemctl stop firewalld" else: cmd = "rcSuSEfirewall2 stop" elif detected_distro.name == "centos": cmd = "service iptables stop" else: self.cancel("Distro not supported") if process.system(cmd, ignore_status=True, shell=True) != 0: self.cancel("Unable to disable firewall") output = self.session.cmd(cmd) if not output.exit_status == 0: self.cancel("Unable to disable firewall on peer")
def setUp(self): """ Use distro provided bonnie++ bin if not available Build bonnie++ from below Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-1.03e.tgz """ fstype = self.params.get('fs', default='') self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False smm = SoftwareManager() # Install the package from web deps = ['gcc', 'make'] if distro.detect().name == 'Ubuntu': deps.extend(['g++']) else: deps.extend(['gcc-c++']) if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs not supported with RHEL 7.4 onwards") elif distro.detect().name == 'Ubuntu': deps.extend(['btrfs-tools']) if raid_needed: deps.append('mdadm') for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test" % package) if process.system("which bonnie++", ignore_status=True): tarball = self.fetch_asset( 'http://www.coker.com.au/bonnie++/' 'bonnie++-1.03e.tgz', expire='7d') archive.extract(tarball, self.teststmpdir) self.source = os.path.join( self.teststmpdir, os.path.basename(tarball.split('.tgz')[0])) os.chdir(self.source) process.run('./configure') build.make(self.source) build.make(self.source, extra_args='install') self.disk = self.params.get('disk', default=None) self.uid_to_use = self.params.get('uid-to-use', default=getpass.getuser()) self.number_to_stat = self.params.get('number-to-stat', default=2048) self.data_size = self.params.get('data_size_to_pass', default=0) self.scratch_dir = self.disk if self.disk is not None: if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/md/mdsraid' self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name self.scratch_dir = self.disk if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True self.scratch_dir = self.disk if fstype: self.scratch_dir = self.workdir self.create_fs(self.disk, self.scratch_dir, fstype) self.fs_create = True