class fs_mark(Test): """ The fs_mark program is meant to give a low level bashing to file systems. The write pattern that we concentrate on is heavily synchronous IO across mutiple directories, drives, etc. """ def setUp(self): """ fs_mark """ smm = SoftwareManager() tarball = self.fetch_asset('http://prdownloads.source' 'forge.net/fsmark/fs_mark-3.3.tar.gz') archive.extract(tarball, self.srcdir) fs_version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.srcdir, fs_version) os.chdir(self.sourcedir) process.run('make') build.make(self.sourcedir) self.disk = self.params.get('disk', default=None) self.fstype = self.params.get('fs', default='ext4') if self.fstype == 'btrfs': if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') def test(self): """ Run fs_mark """ os.chdir(self.sourcedir) # Just provide a sample run parameters num_files = self.params.get('num_files', default='1024') size = self.params.get('size', default='1000') self.dir = self.params.get('dir', default=self.teststmpdir) self.part_obj = Partition(self.disk, mountpoint=self.dir) self.log.info("Test will run on %s", self.dir) self.log.info("Unmounting the disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dir) self.part_obj.mount() cmd = ('./fs_mark -d %s -s %s -n %s' % (self.dir, size, num_files)) process.run(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' self.log.info("Unmounting directory %s", self.dir) self.part_obj.unmount()
class FioTest(Test): """ fio is an I/O tool meant to be used both for benchmark and stress/hardware verification. :see: http://freecode.com/projects/fio :param fio_tarbal: name of the tarbal of fio suite located in deps path :param fio_job: config defining set of executed tests located in deps path """ def setUp(self): """ Build 'fio'. """ default_url = "http://brick.kernel.dk/snaps/fio-2.1.10.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default=self.srcdir) fstype = self.params.get('fs', default='ext4') tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) fio_version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.teststmpdir, fio_version) build.make(self.sourcedir) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dir) self.part_obj.mount() def test(self): """ Execute 'fio' with appropriate parameters. """ self.log.info("Test will run on %s", self.dir) fio_job = self.params.get('fio_job', default='fio-simple.job') self.fio_file = 'fiotest-image' cmd = '%s/fio %s %s --filename=%s' % ( self.sourcedir, os.path.join(self.datadir, fio_job), self.dir, self.fio_file) process.system(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting directory %s", self.dir) self.part_obj.unmount() if os.path.exists(self.fio_file): os.remove(self.fio_file)
class Thp_Defrag(Test): ''' Defrag test enables THP and fragments the system memory using dd load and turns on THP defrag and checks whether defrag occured. :avocado: tags=memory,privileged ''' @skipIf(PAGESIZE, "No THP support for kernel with 4K PAGESIZE") def setUp(self): ''' Sets required params for dd workload and mounts the tmpfs ''' # Get required mem info self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space') self.block_size = int(mmap.PAGESIZE) / 1024 # add mount point if os.path.exists(self.mem_path): os.makedirs(self.mem_path) self.device = Partition(device="none", mountpoint=self.mem_path) self.device.mount(mountpoint=self.mem_path, fstype="tmpfs") free_space = (disk.freespace(self.mem_path)) / 1024 # Leaving out some free space in tmpfs self.count = (free_space / self.block_size) - 3 @avocado.fail_on def test(self): ''' Enables THP, Turns off the defrag and fragments the memory. Once the memory gets fragmented turns on the defrag and checks whether defrag happened. ''' # Enables THP memory.set_thp_value("enabled", "always") # Turns off Defrag memory.set_thp_value("khugepaged/defrag", "0") # Fragments The memory self.log.info("Fragmenting the memory Using dd command \n") for iterator in range(self.count): defrag_cmd = 'dd if=/dev/urandom of=%s/%d bs=%dK count=1'\ % (self.mem_path, iterator, self.block_size) if (process.system(defrag_cmd, timeout=900, verbose=False, ignore_status=True, shell=True)): self.fail('Defrag command Failed %s' % defrag_cmd) total = memory.memtotal() hugepagesize = memory.get_huge_page_size() nr_full = int(0.8 * (total / hugepagesize)) # Sets max possible hugepages before defrag on nr_hp_before = self.set_max_hugepages(nr_full) # Turns Defrag ON memory.set_thp_value("khugepaged/defrag", "1") self.log.info("Sleeping %d seconds to settle out things", 10) time.sleep(10) # Sets max hugepages after defrag on nr_hp_after = self.set_max_hugepages(nr_full) # Check for memory defragmentation if nr_hp_before >= nr_hp_after: e_msg = "No Memory Defragmentation\n" e_msg += "%d hugepages before turning khugepaged on,\n"\ "%d After it" % (nr_hp_before, nr_hp_after) self.fail(e_msg) self.log.info("Defrag test passed") @staticmethod def set_max_hugepages(nr_full): ''' Tries to set the hugepages to the nr_full value and returns the max possible value set. ''' memory.set_num_huge_pages(nr_full) return memory.get_num_huge_pages() def tearDown(self): ''' Removes files and unmounts the tmpfs. ''' if self.mem_path: self.log.info('Cleaning Up!!!') memory.set_num_huge_pages(0) self.device.unmount() process.system('rm -rf %s' % self.mem_path, ignore_status=True)
class FSMark(Test): """ The fs_mark program is meant to give a low level bashing to file systems. The write pattern that we concentrate on is heavily synchronous IO across mutiple directories, drives, etc. """ def setUp(self): """ fs_mark """ smm = SoftwareManager() tarball = self.fetch_asset('https://github.com/josefbacik/fs_mark/' 'archive/master.zip') archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join(self.teststmpdir, 'fs_mark-master') os.chdir(self.sourcedir) process.run('make') build.make(self.sourcedir) self.disk = self.params.get('disk', default=None) self.num = self.params.get('num_files', default='1024') self.size = self.params.get('size', default='1000') self.dirs = self.params.get('dir', default=self.workdir) self.fstype = self.params.get('fs', default='ext4') if self.fstype == 'btrfs': if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dirs) self.log.info("Test will run on %s", self.dirs) self.log.info("Unmounting the disk before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on dir %s", self.disk, self.dirs) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.dirs)) def test(self): """ Run fs_mark """ os.chdir(self.sourcedir) cmd = "./fs_mark -d %s -s %s -n %s" % (self.dirs, self.size, self.num) process.run(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.dirs) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class Thp(Test): ''' The test enables THP and stress the system using dd load and verifies whether THP has been allocated for usage or not ''' def setUp(self): ''' Sets all the reqd parameter and also mounts the tmpfs to be used in test. ''' # Set params as per available memory in system self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space') free_mem = int(memory.freememtotal() / 1024) self.dd_timeout = 900 # Set block size as hugepage size * 2 self.block_size = (memory.get_huge_page_size() / 1024) * 2 self.count = free_mem / self.block_size # Mount device as per free memory size os.mkdir(self.mem_path) self.device = Partition(device="none", mountpoint=self.mem_path) self.device.mount(mountpoint=self.mem_path, fstype="tmpfs", args='-o size=%dM' % free_mem) def test(self): ''' Enables THP , Runs the dd workload and checks whether THP has been allocated. ''' # Enables THP try: memory.set_thp_value("enabled", "always") except Exception as details: self.fail("Failed %s" % details) # Read thp values before stressing the system thp_alloted_before = int(memory.read_from_vmstat("thp_fault_alloc")) thp_split_before = int(memory.read_from_vmstat("thp_split_page")) thp_collapse_alloc_before = int( memory.read_from_vmstat("thp_collapse_alloc")) # Start Stresssing the System self.log.info('Stress testing using dd command') for iterator in range(self.count): stress_cmd = 'dd if=/dev/zero of=%s/%d bs=%dM count=1'\ % (self.mem_path, iterator, self.block_size) if (process.system(stress_cmd, timeout=self.dd_timeout, verbose=False, ignore_status=True, shell=True)): self.fail('dd command failed %s' % stress_cmd) # Read thp values after stressing the system thp_alloted_after = int(memory.read_from_vmstat("thp_fault_alloc")) thp_split_after = int(memory.read_from_vmstat("thp_split_page")) thp_collapse_alloc_after = int( memory.read_from_vmstat("thp_collapse_alloc")) # Check whether THP is Used or not if thp_alloted_after <= thp_alloted_before: e_msg = "Thp usage count has not increased\n" e_msg += "Before Stress:%d\nAfter stress:%d" % (thp_alloted_before, thp_alloted_after) self.fail(e_msg) else: thp_fault_alloc = thp_alloted_after - thp_alloted_before thp_split = thp_split_after - thp_split_before thp_collapse_alloc = (thp_collapse_alloc_after - thp_collapse_alloc_before) self.log.info("\nTest statistics, changes during test run:") self.log.info( "thp_fault_alloc=%d\nthp_split=%d\n" "thp_collapse_alloc=%d\n", thp_fault_alloc, thp_split, thp_collapse_alloc) def tearDown(self): ''' Removes the files created and unmounts the tmpfs. ''' if self.mem_path: self.log.info('Cleaning Up!!!') self.device.unmount() process.system('rm -rf %s' % self.mem_path, ignore_status=True)
class Tiobench(Test): """ Avocado test for tiobench. """ def setUp(self): """ Build tiobench. Source: https://github.com/mkuoppal/tiobench.git """ self.fstype = self.params.get('fs', default='ext4') smm = SoftwareManager() packages = ['gcc'] if self.fstype == 'btrfs': if distro.detect().name == 'Ubuntu': packages.extend(['btrfs-tools']) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test." % package) locations = ["https://github.com/mkuoppal/tiobench/archive/master.zip"] tarball = self.fetch_asset("tiobench.zip", locations=locations) archive.extract(tarball, self.srcdir) os.chdir(os.path.join(self.srcdir, "tiobench-master")) build.make(".") def test(self): """ Test execution with necessary arguments. :params dir: The directory in which to test. Defaults to ., the current directory. :params blocks: The blocksize in Bytes to use. Defaults to 4096. :params threads: The number of concurrent test threads. :params size: The total size in MBytes of the files may use together. :params num_runs: This number specifies over how many runs each test should be averaged. """ self.target = self.params.get('dir', default=self.srcdir) self.disk = self.params.get('disk', default=None) blocks = self.params.get('blocks', default=4096) threads = self.params.get('threads', default=10) size = self.params.get('size', default=1024) num_runs = self.params.get('numruns', default=2) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.target) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating %s file system", self.fstype) self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.target) self.part_obj.mount() self.log.info("Test will run on %s" % self.target) self.whiteboard = process.system_output('perl ./tiobench.pl ' '--target {} --block={} ' '--threads={} --size={} ' '--numruns={}' .format(self.target, blocks, threads, size, num_runs)) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.target) self.part_obj.unmount()
class FioTest(Test): """ fio is an I/O tool meant to be used both for benchmark and stress/hardware verification. :see: http://freecode.com/projects/fio :param fio_tarbal: name of the tarbal of fio suite located in deps path :param fio_job: config defining set of executed tests located in deps path """ def setUp(self): """ Build 'fio'. """ default_url = "https://brick.kernel.dk/snaps/fio-git-latest.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default='/mnt') self.disk_type = self.params.get('disk_type', default='') fstype = self.params.get('fs', default='') fs_args = self.params.get('fs_args', default='') mnt_args = self.params.get('mnt_args', default='') lv_needed = self.params.get('lv', default=False) raid_needed = self.params.get('raid', default=False) self.fio_file = 'fiotest-image' self.fs_create = False self.lv_create = False self.raid_create = False self.devdax_file = None if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") if distro.detect().name in ['Ubuntu', 'debian']: pkg_list = ['libaio-dev'] if fstype == 'btrfs': pkg_list.append('btrfs-progs') elif distro.detect().name is 'SuSE': pkg_list = ['libaio1', 'libaio-devel'] else: pkg_list = ['libaio', 'libaio-devel'] if self.disk_type == 'nvdimm': pkg_list.extend(['autoconf', 'pkg-config']) if distro.detect().name == 'SuSE': pkg_list.extend( ['ndctl', 'libnuma-devel', 'libndctl-devel']) else: pkg_list.extend([ 'ndctl', 'daxctl', 'numactl-devel', 'ndctl-devel', 'daxctl-devel' ]) if raid_needed: pkg_list.append('mdadm') smm = SoftwareManager() for pkg in pkg_list: if pkg and not smm.check_installed(pkg) and not smm.install(pkg): self.cancel( "Package %s is missing and could not be installed" % pkg) tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join(self.teststmpdir, "fio") fio_flags = "" self.ld_path = "" self.raid_name = '/dev/md/sraid' self.vgname = 'avocado_vg' self.lvname = 'avocado_lv' self.err_mesg = [] if self.disk_type == 'nvdimm': self.setup_pmem_disk(mnt_args) self.log.info("Building PMDK for NVDIMM fio engines") pmdk_url = self.params.get('pmdk_url', default='') tar = self.fetch_asset(pmdk_url, expire='7d') archive.extract(tar, self.teststmpdir) version = os.path.basename(tar.split('.tar.')[0]) pmdk_src = os.path.join(self.teststmpdir, version) build.make(pmdk_src) build.make(pmdk_src, extra_args='install prefix=%s' % self.teststmpdir) os.chdir(self.sourcedir) ext_flags = '`PKG_CONFIG_PATH=%s/lib/pkgconfig pkg-config --cflags\ --libs libpmem libpmemblk`' % self.teststmpdir self.ld_path = "LD_LIBRARY_PATH=%s/lib" % self.teststmpdir out = process.system_output('./configure --extra-cflags=' '"%s"' % ext_flags, shell=True) fio_flags = "LDFLAGS='%s'" % ext_flags for eng in ['PMDK libpmem', 'PMDK dev-dax', 'libnuma']: for line in out.decode().splitlines(): if line.startswith(eng) and 'no' in line: self.cancel("PMEM engines not built with fio") if not self.disk: self.dir = self.workdir self.target = self.disk self.lv_disk = self.disk self.part_obj = Partition(self.disk, mountpoint=self.dir) self.sraid = softwareraid.SoftwareRaid(self.raid_name, '0', self.disk.split(), '1.2') dmesg.clear_dmesg() if self.disk in disk.get_disks(): self.pre_cleanup() if raid_needed: self.create_raid(self.target, self.raid_name) self.raid_create = True self.target = self.raid_name if lv_needed: self.lv_disk = self.target self.target = self.create_lv(self.target) self.lv_create = True if fstype: self.create_fs(self.target, self.dir, fstype, fs_args, mnt_args) self.fs_create = True else: self.cancel("Missing disk %s in OS" % self.disk) build.make(self.sourcedir, extra_args=fio_flags) @avocado.fail_on(pmem.PMemException) def setup_pmem_disk(self, mnt_args): if not self.disk: self.plib = pmem.PMem() regions = sorted(self.plib.run_ndctl_list('-R'), key=lambda i: i['size'], reverse=True) if not regions: self.plib.enable_region() regions = sorted(self.plib.run_ndctl_list('-R'), key=lambda i: i['size'], reverse=True) region = self.plib.run_ndctl_list_val(regions[0], 'dev') if self.plib.run_ndctl_list("-N -r %s" % region): self.plib.destroy_namespace(region=region, force=True) if 'dax' in mnt_args: self.plib.create_namespace(region=region) self.disk = "/dev/%s" % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-N -r %s' % region)[0], 'blockdev') else: self.plib.create_namespace(region=region, mode='devdax') self.devdax_file = "/dev/%s" % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-N -r %s' % region)[0], 'chardev') def pre_cleanup(self): """ cleanup the disk and directory before test starts on it """ self.log.info("Pre_cleaning of disk and diretories...") disk_list = [ '/dev/mapper/avocado_vg-avocado_lv', self.raid_name, self.disk ] for disk in disk_list: self.delete_fs(disk) self.log.info("checking ...lv/vg existance...") if lv_utils.lv_check(self.vgname, self.lvname): self.log.info("found lv existance... deleting it") self.delete_lv() elif lv_utils.vg_check(self.vgname): self.log.info("found vg existance ... deleting it") lv_utils.vg_remove(self.vgname) else: self.log.info("No VG/LV detected") self.log.info("checking for sraid existance...") if self.sraid.exists(): self.log.info("found sraid existance... deleting it") self.delete_raid() else: self.log.info("No softwareraid detected ") self.log.info("\n End of pre_cleanup") def create_raid(self, l_disk, l_raid_name): """ creates a softwareraid with given raid name on given disk :param l_disk: disk name on which raid will be created :l_raid_name: name of the softwareraid :return: None """ self.log.info("creating softwareraid on {}".format(l_disk)) self.sraid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sraid.create() def create_lv(self, l_disk): """ creates a volume group then logical volume on it and returns lv. :param l_disk: disk name on which a lv will be created :returns: Returns the lv name :rtype: str """ lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(self.vgname, l_disk) lv_utils.lv_create(self.vgname, self.lvname, lv_size) return '/dev/%s/%s' % (self.vgname, self.lvname) def create_fs(self, l_disk, mountpoint, fstype, fs_args='', mnt_args=''): """ umounts the given disk if mounted then creates a filesystem on it and then mounts it on given directory :param l_disk: disk name on which fs will be created :param mountpoint: directory name on which the disk will be mounted :param fstype: filesystem type like ext4,xfs,btrfs etc :param fs_args: filesystem related extra arguments like -f -b -s etc :param mnt_args: mounting arguments like -o etc :returns: None """ self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype, args=fs_args) try: self.part_obj.mount(args=mnt_args) except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def delete_raid(self): """ it checks for existing of raid and deletes it if exists """ self.log.info("deleting Sraid %s" % self.raid_name) def is_raid_deleted(): self.sraid.stop() self.sraid.clear_superblock() self.log.info("checking for raid metadata") cmd = "wipefs -af %s" % self.disk process.system(cmd, shell=True, ignore_status=True) if self.sraid.exists(): return False return True self.log.info("checking lvm_metadata on %s" % self.raid_name) cmd = 'blkid -o value -s TYPE %s' % self.raid_name out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.raid_name process.system(cmd, shell=True, ignore_status=True) if wait.wait_for(is_raid_deleted, timeout=10): self.log.info("software raid %s deleted" % self.raid_name) else: self.err_mesg.append("failed to delete raid %s" % self.raid_name) def delete_lv(self): """ checks if lv/vg exists and delete them along with its metadata if exists. """ def is_lv_deleted(): lv_utils.lv_remove(self.vgname, self.lvname) time.sleep(5) lv_utils.vg_remove(self.vgname) if lv_utils.lv_check(self.vgname, self.lvname): return False return True if wait.wait_for(is_lv_deleted, timeout=10): self.log.info("lv %s deleted" % self.lvname) else: self.err_mesg.append("lv %s not deleted" % self.lvname) # checking and deleteing if lvm_meta_data exists after lv removed cmd = 'blkid -o value -s TYPE %s' % self.lv_disk out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.lv_disk process.system(cmd, shell=True, ignore_status=True) def delete_fs(self, l_disk): """ checks for disk/dir mount, unmount if mounted and checks for filesystem exitance and wipe it off after dir/disk unmount. :param l_disk: disk name for which you want to check the mount status :return: None """ def is_fs_deleted(): cmd = "wipefs -af %s" % l_disk process.system(cmd, shell=True, ignore_status=True) if disk.fs_exists(l_disk): return False return True def is_disk_unmounted(): cmd = "umount %s" % l_disk cmd1 = 'umount /dev/mapper/avocado_vg-avocado_lv' process.system(cmd, shell=True, ignore_status=True) process.system(cmd1, shell=True, ignore_status=True) if disk.is_disk_mounted(l_disk): return False return True def is_dir_unmounted(): cmd = 'umount %s' % self.dir process.system(cmd, shell=True, ignore_status=True) if disk.is_dir_mounted(self.dir): return False return True self.log.info("checking if disk is mounted.") if disk.is_disk_mounted(l_disk): self.log.info("%s is mounted, unmounting it ....", l_disk) if wait.wait_for(is_disk_unmounted, timeout=10): self.log.info("%s unmounted successfully" % l_disk) else: self.err_mesg.append("%s unmount failed", l_disk) else: self.log.info("disk %s not mounted." % l_disk) self.log.info("checking if dir %s is mounted." % self.dir) if disk.is_dir_mounted(self.dir): self.log.info("%s is mounted, unmounting it ....", self.dir) if wait.wait_for(is_dir_unmounted, timeout=10): self.log.info("%s unmounted successfully" % self.dir) else: self.err_mesg.append("failed to unount %s", self.dir) else: self.log.info("dir %s not mounted." % self.dir) self.log.info("checking if fs exists in {}".format(l_disk)) if disk.fs_exists(l_disk): self.log.info("found fs on %s, removing it....", l_disk) if wait.wait_for(is_fs_deleted, timeout=10): self.log.info("fs removed successfully..") else: self.err_mesg.append("failed to delete fs on %s", l_disk) else: self.log.info("No fs detected on %s" % self.disk) def test(self): """ Execute 'fio' with appropriate parameters. """ self.log.info("Test will run on %s", self.dir) fio_job = self.params.get('fio_job', default='fio-simple.job') # if fs is present create a file on that fs, if no fs # self.dirs = path to disk, thus a filename is not needed if self.fs_create: filename = "%s/%s" % (self.dir, self.fio_file) elif self.devdax_file: filename = self.devdax_file elif self.disk: filename = self.target else: filename = self.dir cmd = '%s %s/fio %s --filename=%s' % (self.ld_path, self.sourcedir, self.get_data(fio_job), filename) self.log.info("running fio test using command : %s" % cmd) status = process.system(cmd, ignore_status=True, shell=True) if status: # status of 3 is a common warning with iscsi disks but fio # process completes successfully so throw a warning not # a fail. For other nonzero statuses we should fail. if status == 3: self.log.warning("Warnings during fio run") else: self.fail("fio run failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if os.path.exists(self.fio_file): os.remove(self.fio_file) if self.fs_create: self.delete_fs(self.target) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid() dmesg.clear_dmesg() if self.err_mesg: self.warn("test failed due to following errors %s" % self.err_mesg)
class Thp(Test): ''' The test enables THP and stress the system using dd load and verifies whether THP has been allocated for usage or not :avocado: tags=memory,privileged,hugepage ''' @skipIf(PAGESIZE, "No THP support for kernel with 4K PAGESIZE") @skipUnless('Hugepagesize' in dict(memory.meminfo), "Hugepagesize not defined in kernel.") def setUp(self): ''' Sets all the reqd parameter and also mounts the tmpfs to be used in test. ''' # Set params as per available memory in system self.mem_path = self.params.get("t_dir", default=os.path.join( data_dir.get_tmp_dir(), 'thp_space')) free_mem = self.params.get("mem_size", default=memory.meminfo.MemFree.m) self.dd_timeout = self.params.get("dd_timeout", default=900) self.thp_split = None try: memory.read_from_vmstat("thp_split_page") self.thp_split = "thp_split_page" except IndexError: self.thp_split = "thp_split" # Set block size as hugepage size * 2 self.block_size = memory.meminfo.Hugepagesize.m * 2 self.count = free_mem // self.block_size # Mount device as per free memory size if not os.path.exists(self.mem_path): os.makedirs(self.mem_path) self.device = Partition(device="none", mountpoint=self.mem_path) self.device.mount(mountpoint=self.mem_path, fstype="tmpfs", args='-o size=%dM' % free_mem) def test(self): ''' Enables THP , Runs the dd workload and checks whether THP has been allocated. ''' # Enables THP try: memory.set_thp_value("enabled", "always") except Exception as details: self.fail("Failed %s" % details) # Read thp values before stressing the system thp_alloted_before = int(memory.read_from_vmstat("thp_fault_alloc")) thp_split_before = int(memory.read_from_vmstat(self.thp_split)) thp_collapse_alloc_before = int( memory.read_from_vmstat("thp_collapse_alloc")) # Start Stresssing the System self.log.info('Stress testing using dd command') for iterator in range(self.count): stress_cmd = 'dd if=/dev/zero of=%s/%d bs=%dM count=1'\ % (self.mem_path, iterator, self.block_size) if (process.system(stress_cmd, timeout=self.dd_timeout, verbose=False, ignore_status=True, shell=True)): self.fail('dd command failed %s' % stress_cmd) # Read thp values after stressing the system thp_alloted_after = int(memory.read_from_vmstat("thp_fault_alloc")) thp_split_after = int(memory.read_from_vmstat(self.thp_split)) thp_collapse_alloc_after = int( memory.read_from_vmstat("thp_collapse_alloc")) # Check whether THP is Used or not if thp_alloted_after <= thp_alloted_before: e_msg = "Thp usage count has not increased\n" e_msg += "Before Stress:%d\nAfter stress:%d" % (thp_alloted_before, thp_alloted_after) self.fail(e_msg) else: thp_fault_alloc = thp_alloted_after - thp_alloted_before thp_split = thp_split_after - thp_split_before thp_collapse_alloc = (thp_collapse_alloc_after - thp_collapse_alloc_before) self.log.info("\nTest statistics, changes during test run:") self.log.info( "thp_fault_alloc=%d\nthp_split=%d\n" "thp_collapse_alloc=%d\n", thp_fault_alloc, thp_split, thp_collapse_alloc) def tearDown(self): ''' Removes the files created and unmounts the tmpfs. ''' if self.mem_path: self.log.info('Cleaning Up!!!') self.device.unmount() process.system('rm -rf %s' % self.mem_path, ignore_status=True)
class Tiobench(Test): """ Avocado test for tiobench. """ def setUp(self): """ Build tiobench. Source: https://github.com/mkuoppal/tiobench.git """ self.fstype = self.params.get('fs', default='ext4') smm = SoftwareManager() packages = ['gcc'] if self.fstype == 'btrfs': if distro.detect().name == 'Ubuntu': packages.extend(['btrfs-tools']) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test." % package) locations = ["https://github.com/mkuoppal/tiobench/archive/master.zip"] tarball = self.fetch_asset("tiobench.zip", locations=locations) archive.extract(tarball, self.teststmpdir) os.chdir(os.path.join(self.teststmpdir, "tiobench-master")) build.make(".") self.target = self.params.get('dir', default=self.workdir) self.disk = self.params.get('disk', default=None) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.target) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating %s file system", self.fstype) self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.target) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.target)) def test(self): """ Test execution with necessary arguments. :params blocks: The blocksize in Bytes to use. Defaults to 4096. :params threads: The number of concurrent test threads. :params size: The total size in MBytes of the files may use together. :params num_runs: This number specifies over how many runs each test should be averaged. """ blocks = self.params.get('blocks', default=4096) threads = self.params.get('threads', default=10) size = self.params.get('size', default=1024) num_runs = self.params.get('numruns', default=2) self.log.info("Test will run on %s", self.target) self.whiteboard = process.system_output('perl ./tiobench.pl ' '--target {} --block={} ' '--threads={} --size={} ' '--numruns={}' .format(self.target, blocks, threads, size, num_runs)) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.target) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class Thp_Defrag(Test): ''' Defrag test enables THP and fragments the system memory using dd load and turns on THP defrag and checks whether defrag occured. :avocado: tags=memory,privileged ''' @skipIf(PAGESIZE, "No THP support for kernel with 4K PAGESIZE") def setUp(self): ''' Sets required params for dd workload and mounts the tmpfs ''' # Get required mem info self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space') self.block_size = int(mmap.PAGESIZE) / 1024 # add mount point if os.path.exists(self.mem_path): os.makedirs(self.mem_path) self.device = Partition(device="none", mountpoint=self.mem_path) self.device.mount(mountpoint=self.mem_path, fstype="tmpfs") free_space = (disk.freespace(self.mem_path)) / 1024 # Leaving out some free space in tmpfs self.count = (free_space / self.block_size) - 3 @avocado.fail_on def test(self): ''' Enables THP, Turns off the defrag and fragments the memory. Once the memory gets fragmented turns on the defrag and checks whether defrag happened. ''' # Enables THP memory.set_thp_value("enabled", "always") # Turns off Defrag memory.set_thp_value("khugepaged/defrag", "0") # Fragments The memory self.log.info("Fragmenting the memory Using dd command \n") for iterator in range(self.count): defrag_cmd = 'dd if=/dev/urandom of=%s/%d bs=%dK count=1'\ % (self.mem_path, iterator, self.block_size) if(process.system(defrag_cmd, timeout=900, verbose=False, ignore_status=True, shell=True)): self.fail('Defrag command Failed %s' % defrag_cmd) total = memory.memtotal() hugepagesize = memory.get_huge_page_size() nr_full = int(0.8 * (total / hugepagesize)) # Sets max possible hugepages before defrag on nr_hp_before = self.set_max_hugepages(nr_full) # Turns Defrag ON memory.set_thp_value("khugepaged/defrag", "1") self.log.info("Sleeping %d seconds to settle out things", 10) time.sleep(10) # Sets max hugepages after defrag on nr_hp_after = self.set_max_hugepages(nr_full) # Check for memory defragmentation if nr_hp_before >= nr_hp_after: e_msg = "No Memory Defragmentation\n" e_msg += "%d hugepages before turning khugepaged on,\n"\ "%d After it" % (nr_hp_before, nr_hp_after) self.fail(e_msg) self.log.info("Defrag test passed") @staticmethod def set_max_hugepages(nr_full): ''' Tries to set the hugepages to the nr_full value and returns the max possible value set. ''' memory.set_num_huge_pages(nr_full) return memory.get_num_huge_pages() def tearDown(self): ''' Removes files and unmounts the tmpfs. ''' if self.mem_path: self.log.info('Cleaning Up!!!') memory.set_num_huge_pages(0) self.device.unmount() process.system('rm -rf %s' % self.mem_path, ignore_status=True)
class Bonnie(Test): """ Bonnie++ is a benchmark suite that is aimed at performing a number of simple tests of hard drive and file system performance. """ def setUp(self): """ Build bonnie++ Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-1.03e.tgz """ fstype = self.params.get('fs', default='ext4') smm = SoftwareManager() deps = ['gcc', 'make'] if distro.detect().name == 'Ubuntu': deps.extend(['g++']) else: deps.extend(['gcc-c++']) if fstype == 'btrfs': if distro.detect().name == 'Ubuntu': deps.extend(['btrfs-tools']) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel("Fail to install/check %s, which is needed for" "Bonnie test to run" % package) self.disk = self.params.get('disk', default=None) self.scratch_dir = self.params.get('dir', default=self.srcdir) self.uid_to_use = self.params.get('uid-to-use', default=getpass.getuser()) self.number_to_stat = self.params.get('number-to-stat', default=2048) self.data_size = self.params.get('data_size_to_pass', default=0) tarball = self.fetch_asset('http://www.coker.com.au/bonnie++/' 'bonnie++-1.03e.tgz', expire='7d') archive.extract(tarball, self.teststmpdir) self.source = os.path.join(self.teststmpdir, os.path.basename(tarball.split('.tgz')[0])) os.chdir(self.source) process.run('./configure') build.make(self.source) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.scratch_dir) self.log.info("Test will run on %s", self.scratch_dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating %s file system on %s disk", fstype, self.disk) self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.scratch_dir) self.part_obj.mount() def test(self): """ Run 'bonnie' with its arguments """ args = [] args.append('-d %s' % self.scratch_dir) args.append('-n %s' % self.number_to_stat) args.append('-s %s' % self.data_size) args.append('-u %s' % self.uid_to_use) cmd = ('%s/bonnie++ %s' % (self.source, " ".join(args))) if process.system(cmd, shell=True, ignore_status=True): self.fail("test failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.scratch_dir) self.part_obj.unmount()
class LtpFs(Test): ''' Using LTP (Linux Test Project) testsuite to run Filesystem related tests ''' def setUp(self): ''' To check and install dependencies for the test ''' smm = SoftwareManager() for package in ['gcc', 'make', 'automake', 'autoconf']: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) self.disk = self.params.get('disk', default=None) self.mount_point = self.params.get('dir', default=self.workdir) self.script = self.params.get('script') fstype = self.params.get('fs', default='ext4') self.args = self.params.get('args', default='') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.mount_point) self.log.info("Unmounting the disk/dir if it is already mounted") self.part_obj.unmount() self.log.info("creating %s file system on %s", fstype, self.disk) self.part_obj.mkfs(fstype) self.log.info("mounting %s on %s", self.disk, self.mount_point) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.mount_point)) url = "https://github.com/linux-test-project/ltp/" url += "archive/master.zip" tarball = self.fetch_asset("ltp-master.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) ltp_dir = os.path.join(self.teststmpdir, "ltp-master") os.chdir(ltp_dir) build.make(ltp_dir, extra_args='autotools') self.ltpbin_dir = os.path.join(ltp_dir, 'bin') if not os.path.isdir(self.ltpbin_dir): os.mkdir(self.ltpbin_dir) process.system('./configure --prefix=%s' % self.ltpbin_dir, ignore_status=True) build.make(ltp_dir) build.make(ltp_dir, extra_args='install') def test_fs_run(self): ''' Downloads LTP, compiles, installs and runs filesystem tests on a user specified disk ''' if self.script == 'runltp': logfile = os.path.join(self.logdir, 'ltp.log') failcmdfile = os.path.join(self.logdir, 'failcmdfile') self.args += (" -q -p -l %s -C %s -d %s" % (logfile, failcmdfile, self.mount_point)) self.log.info("Args = %s", self.args) cmd = '%s %s' % (os.path.join(self.ltpbin_dir, self.script), self.args) result = process.run(cmd, ignore_status=True) # Walk the stdout and try detect failed tests from lines # like these: # aio01 5 TPASS : Test 5: 10 reads and # writes in 0.000022 sec # vhangup02 1 TFAIL : vhangup02.c:88: # vhangup() failed, errno:1 # and check for fail_status The first part contain test name fail_status = ['TFAIL', 'TBROK', 'TWARN'] split_lines = (line.split(None, 3) for line in result.stdout.splitlines()) failed_tests = [items[0] for items in split_lines if len(items) == 4 and items[2] in fail_status] if failed_tests: self.fail("LTP tests failed: %s" % ", ".join(failed_tests)) elif result.exit_status != 0: self.fail("No test failures detected, but LTP finished with %s" % (result.exit_status)) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.mount_point) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class IOZone(Test): ''' IOzone is a filesystem benchmark tool. The benchmark generates and measures a variety of file operations. Iozone has been ported to many machines and runs under many operating systems. ''' def setUp(self): ''' Build IOZone Source: http://www.iozone.org/src/current/iozone3_434.tar ''' fstype = self.params.get('fs', default='') self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False self.disk = self.params.get('disk', default=None) self.base_dir = os.path.abspath(self.basedir) smm = SoftwareManager() packages = ['gcc', 'make', 'patch'] if raid_needed: packages.append('mdadm') for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel( 'btrfs-tools is needed for the test to be run') tarball = self.fetch_asset( 'http://www.iozone.org/src/current/iozone3_434.tar') archive.extract(tarball, self.teststmpdir) version = os.path.basename(tarball.split('.tar')[0]) self.sourcedir = os.path.join(self.teststmpdir, version) make_dir = os.path.join(self.sourcedir, 'src', 'current') os.chdir(make_dir) patch = self.params.get('patch', default='makefile.patch') patch = self.get_data(patch) process.run('patch -p3 < %s' % patch, shell=True) d_distro = distro.detect() arch = d_distro.arch if arch == 'ppc': build.make(make_dir, extra_args='linux-powerpc') elif arch == 'ppc64' or arch == 'ppc64le': build.make(make_dir, extra_args='linux-powerpc64') elif arch == 'x86_64': build.make(make_dir, extra_args='linux-AMD64') else: build.make(make_dir, extra_args='linux') self.dirs = self.disk if self.disk is not None: if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/md/mdsraid' self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name self.dirs = self.disk if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True self.dirs = self.disk if fstype: self.dirs = self.workdir self.create_fs(self.disk, self.dirs, fstype) self.fs_create = True def create_raid(self, l_disk, l_raid_name): self.sraid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sraid.create() def delete_raid(self): self.sraid.stop() self.sraid.clear_superblock() def create_lv(self, l_disk): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(vgname, l_disk) lv_utils.lv_create(vgname, lvname, lv_size) return '/dev/%s/%s' % (vgname, lvname) def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname) def create_fs(self, l_disk, mountpoint, fstype): self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def delete_fs(self, l_disk): self.part_obj.unmount() delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % l_disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s" % l_disk) @staticmethod def __get_section_name(desc): """ Returns section name with '_' replacing ' ' """ return desc.strip().replace(' ', '_') def generate_keyval(self): """ Generating key-value list from results and recording it in JSON file """ keylist = {} if self.auto_mode: labels = ('write', 'rewrite', 'read', 'reread', 'randread', 'randwrite', 'bkwdread', 'recordrewrite', 'strideread', 'fwrite', 'frewrite', 'fread', 'freread') for line in self.results.splitlines(): fields = line.split() if len(fields) != 15: continue try: fields = tuple([int(i) for i in fields]) except ValueError: continue for lin, val in zip(labels, fields[2:]): key_name = "%d-%d-%s" % (fields[0], fields[1], lin) keylist[key_name] = val else: child_regexp = re.compile(r'Children see throughput for[s]+' r'([d]+)s+([-w]+[-ws]*)=[s]+([d.]*) ' 'KB/sec') parent_regexp = re.compile(r'Parent sees throughput for[s]+' r'([d]+)s+([-w]+[-ws]*)=[s]+([d.]*) ' 'KB/sec') kbsec_regexp = re.compile(r'=[s]+([d.]*) KB/sec') kbval_regexp = re.compile(r'=[s]+([d.]*) KB') section = None w_count = 0 for line in self.results.splitlines(): line = line.strip() # Check for the beginning of a new result section match = child_regexp.search(line) if match: # Extract the section name and the worker count w_count = int(match.group(1)) section = self.__get_section_name(match.group(2)) # Output the appropriate keyval pair key_name = '%s-%d-kids' % (section, w_count) keylist[key_name] = match.group(3) continue # Check for any other interesting lines if '=' in line: # Is it something we recognize? First check for parent. match = parent_regexp.search(line) if match: # The section name and the worker count better match p_count = int(match.group(1)) p_secnt = self.__get_section_name(match.group(2)) if p_secnt != section or p_count != w_count: continue # Set the base name for the keyval basekey = 'parent' else: # Check for the various 'throughput' values if line[3:26] == ' throughput per thread ': basekey = line[0:3] match_x = kbsec_regexp else: # The only other thing we expect is 'Min xfer' if not line.startswith('Min xfer '): continue basekey = 'MinXfer' match_x = kbval_regexp match = match_x.search(line) if match: result = match.group(1) key_name = "%s-%d-%s" % (section, w_count, basekey) keylist[key_name] = result self.whiteboard = json.dumps(keylist, indent=1) def test(self): ''' Test method for performing IOZone test and analysis. ''' directory = self.params.get('dir', default=None) args = self.params.get('args', default=None) previous_results = self.params.get('previous_results', default=None) if not directory: directory = self.base_dir os.chdir(directory) if not args: args = '-a' cmd = os.path.join(self.sourcedir, 'src', 'current', 'iozone') self.results = process.system_output('%s %s' % (cmd, args)).decode('utf-8') self.auto_mode = ("-a" in args) results_path = os.path.join(self.outputdir, 'raw_output') analysisdir = os.path.join(self.outputdir, 'analysis') with open(results_path, 'w') as r_file: r_file.write(self.results) self.generate_keyval() if self.auto_mode: if previous_results: analysis = IOzoneAnalyzer( self.log, list_files=[results_path, previous_results], output_dir=analysisdir) analysis.analyze() else: analysis = IOzoneAnalyzer(self.log, list_files=[results_path], output_dir=analysisdir) analysis.analyze() plotter = IOzonePlotter(self.log, results_file=results_path, output_dir=analysisdir) plotter.plot_2d_graphs() def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: if self.fs_create: self.delete_fs(self.disk) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid()
class Bonnie(Test): """ Bonnie++ is a benchmark suite that is aimed at performing a number of simple tests of hard drive and file system performance. """ def setUp(self): """ Build bonnie++ Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-1.03e.tgz """ fstype = self.params.get('fs', default='ext4') smm = SoftwareManager() if fstype == 'btrfs': if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') self.disk = self.params.get('disk', default=None) self.scratch_dir = self.params.get('dir', default=self.srcdir) self.uid_to_use = self.params.get('uid-to-use', default=getpass.getuser()) self.number_to_stat = self.params.get('number-to-stat', default=2048) self.data_size = self.params.get('data_size_to_pass', default=0) tarball = self.fetch_asset( 'http://www.coker.com.au/bonnie++/' 'bonnie++-1.03e.tgz', expire='7d') archive.extract(tarball, self.teststmpdir) self.source = os.path.join(self.teststmpdir, os.path.basename(tarball.split('.tgz')[0])) os.chdir(self.source) process.run('./configure') build.make(self.source) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.scratch_dir) self.log.info("Test will run on %s", self.scratch_dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating %s file system on %s disk", fstype, self.disk) self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.scratch_dir) self.part_obj.mount() def test(self): """ Run 'bonnie' with its arguments """ args = [] args.append('-d %s' % self.scratch_dir) args.append('-n %s' % self.number_to_stat) args.append('-s %s' % self.data_size) args.append('-u %s' % self.uid_to_use) cmd = ('%s/bonnie++ %s' % (self.source, " ".join(args))) if process.system(cmd, shell=True, ignore_status=True): self.fail("test failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.scratch_dir) self.part_obj.unmount()
class Disktest(Test): """ Avocado module for disktest. Pattern test of the disk, using unique signatures for each block and each iteration of the test. Designed to check for data corruption issues in the disk and disk controller. It writes 50MB/s of 500KB size ops. """ def setUp(self): """ Verifies if we have gcc to compile disktest. :param disk: Disk to be used in test. :param dir: Directory of used in test. When the target does not exist, it's created. :param gigabytes: Disk space that will be used for the test to run. :param chunk_mb: Size of the portion of the disk used to run the test. Cannot be smaller than the total amount of RAM. """ softm = SoftwareManager() if not softm.check_installed("gcc") and not softm.install("gcc"): self.cancel('Gcc is needed for the test to be run') # Log of all the disktest processes self.disk_log = os.path.abspath(os.path.join(self.outputdir, "log.txt")) self._init_params() self._compile_disktest() def _init_params(self): """ Retrieves and checks the test params """ self.disk = self.params.get('disk', default=None) self.dirs = self.params.get('dir', default=self.workdir) self.fstype = self.params.get('fs', default='ext4') memory_mb = memory.memtotal() / 1024 self.chunk_mb = int(self.params.get('chunk_mb', default=None)) if self.chunk_mb is None: # By default total RAM self.chunk_mb = memory_mb if self.chunk_mb == 0: self.chunk_mb = 1 if memory_mb > self.chunk_mb: self.cancel("Chunk size has to be greater or equal to RAM size. " "(%s > %s)" % (self.chunk_mb, memory_mb)) gigabytes = int(self.params.get('gigabytes', default=None)) if gigabytes is None: free = 107374182400 # cap it at 100GB by default free = min(utils_disk.freespace(self.dirs) / 1073741824, free) gigabytes = free self.no_chunks = 1024 * gigabytes / self.chunk_mb if self.no_chunks == 0: self.cancel("Free disk space is lower than chunk size (%s, %s)" % (1024 * gigabytes, self.chunk_mb)) self.log.info("Test will use %s chunks %sMB each in %sMB RAM using %s " "GB of disk space on %s dirs (%s).", self.no_chunks, self.chunk_mb, memory_mb, self.no_chunks * self.chunk_mb, len(self.dirs), self.dirs) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dirs) self.log.info("Unmounting the disk/dir if it is already mounted") self.part_obj.unmount() self.log.info("creating %s fs on %s", self.fstype, self.disk) self.part_obj.mkfs(self.fstype) self.log.info("mounting %s on %s", self.disk, self.dirs) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.dirs)) def _compile_disktest(self): """ Compiles the disktest """ c_file = self.get_data("disktest.c") shutil.copy(c_file, self.teststmpdir) build.make(self.teststmpdir, extra_args="disktest", env={"CFLAGS": "-O2 -Wall -D_FILE_OFFSET_BITS=64 " "-D _GNU_SOURCE"}) def one_disk_chunk(self, disk, chunk): """ Tests one part of the disk by spawning a disktest instance. :param disk: Directory (usually a mountpoint). :param chunk: Portion of the disk used. """ cmd = ("%s/disktest -m %d -f %s/testfile.%d -i -S >> \"%s\" 2>&1" % (self.teststmpdir, self.chunk_mb, disk, chunk, self.disk_log)) proc = process.get_sub_process_klass(cmd)(cmd, shell=True, verbose=False) pid = proc.start() return pid, proc def test(self): """ Runs one iteration of disktest. """ procs = [] errors = [] for i in xrange(self.no_chunks): self.log.debug("Testing chunk %s...", i) procs.append(self.one_disk_chunk(self.dirs, i)) for pid, proc in procs: if proc.wait(): errors.append(str(pid)) if errors: self.fail("The %s pid(s) failed, please check the logs and %s" " for details." % (", ".join(errors), self.disk_log)) def tearDown(self): """ To clean all the testfiles generated """ for disk in getattr(self, "dirs", []): for filename in glob.glob("%s/testfile.*" % disk): os.remove(filename) if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.dirs) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class LTP(Test): """ LTP (Linux Test Project) testsuite :param args: Extra arguments ("runltp" can use with "-f $test") """ failed_tests = list() mem_tests = ['-f mm', '-f hugetlb'] @staticmethod def mount_point(mount_dir): lines = genio.read_file('/proc/mounts').rstrip('\t\r\0').splitlines() for substr in lines: mop = substr.split(" ")[1] if mop == mount_dir: return True return False def check_thp(self): if 'thp_file_alloc' in genio.read_file('/proc/vm' 'stat').rstrip('\t\r\n\0'): self.thp = True return self.thp def setup_tmpfs_dir(self): # check for THP page cache self.check_thp() if not os.path.isdir(self.mount_dir): os.makedirs(self.mount_dir) self.device = None if not self.mount_point(self.mount_dir): if self.thp: self.device = Partition(device="none", mountpoint=self.mount_dir, mount_options="huge=always") else: self.device = Partition(device="none", mountpoint=self.mount_dir) self.device.mount(mountpoint=self.mount_dir, fstype="tmpfs", mnt_check=False) def setUp(self): smg = SoftwareManager() dist = distro.detect() self.args = self.params.get('args', default='') self.mem_leak = self.params.get('mem_leak', default=0) deps = ['gcc', 'make', 'automake', 'autoconf', 'psmisc'] if dist.name == "Ubuntu": deps.extend(['libnuma-dev']) elif dist.name in ["centos", "rhel", "fedora"]: deps.extend(['numactl-devel']) elif dist.name == "SuSE": deps.extend(['libnuma-devel']) self.ltpbin_dir = self.mount_dir = None self.thp = False if self.args in self.mem_tests: self.mount_dir = self.params.get('tmpfs_mount_dir', default=None) if self.mount_dir: self.setup_tmpfs_dir() over_commit = self.params.get('overcommit', default=True) if not over_commit: process.run('echo 2 > /proc/sys/vm/overcommit_memory', shell=True, ignore_status=True) for package in deps: if not smg.check_installed(package) and not smg.install(package): self.cancel('%s is needed for the test to be run' % package) clear_dmesg() url = self.params.get( 'url', default= 'https://github.com/linux-test-project/ltp/archive/master.zip') match = next((ext for ext in [".zip", ".tar"] if ext in url), None) tarball = '' if match: tarball = self.fetch_asset("ltp-master%s" % match, locations=[url], expire='7d') else: self.cancel("Provided LTP Url is not valid") archive.extract(tarball, self.workdir) ltp_dir = os.path.join(self.workdir, "ltp-master") os.chdir(ltp_dir) build.make(ltp_dir, extra_args='autotools') if not self.ltpbin_dir: self.ltpbin_dir = os.path.join(self.teststmpdir, 'bin') if not os.path.exists(self.ltpbin_dir): os.mkdir(self.ltpbin_dir) process.system('./configure --prefix=%s' % self.ltpbin_dir) build.make(ltp_dir) build.make(ltp_dir, extra_args='install') def test(self): logfile = os.path.join(self.logdir, 'ltp.log') failcmdfile = os.path.join(self.logdir, 'failcmdfile') skipfileurl = self.params.get('skipfileurl', default=None) if skipfileurl: skipfilepath = self.fetch_asset("skipfile", locations=[skipfileurl], expire='7d') else: skipfilepath = self.get_data('skipfile') os.chmod(self.teststmpdir, 0o755) self.args += (" -q -p -l %s -C %s -d %s -S %s" % (logfile, failcmdfile, self.teststmpdir, skipfilepath)) if self.mem_leak: self.args += " -M %s" % self.mem_leak cmd = "%s %s" % (os.path.join(self.ltpbin_dir, 'runltp'), self.args) process.run(cmd, ignore_status=True) # Walk the ltp.log and try detect failed tests from lines like these: # msgctl04 FAIL 2 with open(logfile, 'r') as file_p: lines = file_p.readlines() for line in lines: if 'FAIL' in line: value = re.split(r'\s+', line) self.failed_tests.append(value[0]) collect_dmesg(self) if self.failed_tests: self.fail("LTP tests failed: %s" % self.failed_tests) def tearDown(self): if self.mount_dir: self.device.unmount()
class FioTest(Test): """ fio is an I/O tool meant to be used both for benchmark and stress/hardware verification. :see: http://freecode.com/projects/fio :param fio_tarbal: name of the tarbal of fio suite located in deps path :param fio_job: config defining set of executed tests located in deps path """ def setUp(self): """ Build 'fio'. """ default_url = "http://brick.kernel.dk/snaps/fio-2.1.10.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default=self.srcdir) fstype = self.params.get('fs', default='ext4') tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) fio_version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.teststmpdir, fio_version) build.make(self.sourcedir) smm = SoftwareManager() if fstype == 'btrfs': if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dir) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed", self.disk, self.dir) self.fio_file = 'fiotest-image' def test(self): """ Execute 'fio' with appropriate parameters. """ self.log.info("Test will run on %s", self.dir) fio_job = self.params.get('fio_job', default='fio-simple.job') cmd = '%s/fio %s %s --filename=%s' % ( self.sourcedir, os.path.join(self.datadir, fio_job), self.dir, self.fio_file) process.system(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting directory %s", self.dir) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk) if os.path.exists(self.fio_file): os.remove(self.fio_file)
class Tiobench(Test): """ Avocado test for tiobench. """ def setUp(self): """ Build tiobench. Source: https://github.com/mkuoppal/tiobench.git """ self.fstype = self.params.get('fs', default='') self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False smm = SoftwareManager() packages = ['gcc'] if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': packages.extend(['btrfs-tools']) if raid_needed: packages.append('mdadm') for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test." % package) locations = ["https://github.com/mkuoppal/tiobench/archive/master.zip"] tarball = self.fetch_asset("tiobench.zip", locations=locations) archive.extract(tarball, self.teststmpdir) os.chdir(os.path.join(self.teststmpdir, "tiobench-master")) build.make(".") self.disk = self.params.get('disk', default=None) self.target = self.disk if self.disk is not None: if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/md/mdsraid' self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name self.target = self.disk if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True self.target = self.disk if self.fstype: self.target = self.workdir self.create_fs(self.disk, self.target, self.fstype) self.fs_create = True def create_raid(self, l_disk, l_raid_name): self.sraid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sraid.create() def delete_raid(self): self.sraid.stop() self.sraid.clear_superblock() def create_lv(self, l_disk): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(vgname, l_disk) lv_utils.lv_create(vgname, lvname, lv_size) return '/dev/%s/%s' % (vgname, lvname) def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname) def create_fs(self, l_disk, mountpoint, fstype): self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def delete_fs(self, l_disk): self.part_obj.unmount() delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % l_disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s" % l_disk) def test(self): """ Test execution with necessary arguments. :params blocks: The blocksize in Bytes to use. Defaults to 4096. :params threads: The number of concurrent test threads. :params size: The total size in MBytes of the files may use together. :params num_runs: This number specifies over how many runs each test should be averaged. """ blocks = self.params.get('blocks', default=4096) threads = self.params.get('threads', default=10) size = self.params.get('size', default=1024) num_runs = self.params.get('numruns', default=2) self.log.info("Test will run on %s", self.target) self.whiteboard = process.system_output( 'perl ./tiobench.pl ' '--target {} --block={} ' '--threads={} --numruns={} ' '-size={}'.format(self.target, blocks, threads, num_runs, size)).decode("utf-8") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: if self.fs_create: self.delete_fs(self.disk) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid()
class FioTest(Test): """ fio is an I/O tool meant to be used both for benchmark and stress/hardware verification. :see: http://freecode.com/projects/fio :param fio_tarbal: name of the tarbal of fio suite located in deps path :param fio_job: config defining set of executed tests located in deps path """ def setUp(self): """ Build 'fio'. """ default_url = "https://brick.kernel.dk/snaps/fio-git-latest.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.disk_type = self.params.get('disk_type', default='') fs_args = self.params.get('fs_args', default='') mnt_args = self.params.get('mnt_args', default='') self.fio_file = 'fiotest-image' self.fs_create = False self.lv_create = False self.raid_create = False self.devdax_file = None fstype = self.params.get('fs', default='') if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") lv_needed = self.params.get('lv', default=False) raid_needed = self.params.get('raid', default=False) if distro.detect().name in ['Ubuntu', 'debian']: pkg_list = ['libaio-dev'] if fstype == 'btrfs': pkg_list.append('btrfs-progs') else: pkg_list = ['libaio', 'libaio-devel'] if self.disk_type == 'nvdimm': pkg_list.extend(['autoconf', 'pkg-config']) if distro.detect().name == 'SuSE': pkg_list.extend( ['ndctl', 'libnuma-devel', 'libndctl-devel']) else: pkg_list.extend([ 'ndctl', 'daxctl', 'numactl-devel', 'ndctl-devel', 'daxctl-devel' ]) if raid_needed: pkg_list.append('mdadm') smm = SoftwareManager() for pkg in pkg_list: if pkg and not smm.check_installed(pkg) and not smm.install(pkg): self.cancel( "Package %s is missing and could not be installed" % pkg) tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join(self.teststmpdir, "fio") if self.disk_type == 'nvdimm': self.setup_pmem_disk(mnt_args) self.log.info("Building PMDK for NVDIMM fio engines") pmdk_url = self.params.get('pmdk_url', default='') tar = self.fetch_asset(pmdk_url, expire='7d') archive.extract(tar, self.teststmpdir) version = os.path.basename(tar.split('.tar.')[0]) pmdk_src = os.path.join(self.teststmpdir, version) build.make(pmdk_src) build.make(pmdk_src, extra_args='install prefix=/usr') os.chdir(self.sourcedir) out = process.system_output("./configure --prefix=/usr", shell=True) for eng in ['PMDK libpmem', 'PMDK dev-dax', 'libnuma']: for line in out.decode().splitlines(): if line.startswith(eng) and 'no' in line: self.cancel("PMEM engines not built with fio") if not self.disk: self.disk = self.workdir self.dirs = self.disk if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/md/mdsraid' self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True self.dirs = self.disk if fstype: self.dirs = self.workdir self.create_fs(self.disk, self.dirs, fstype, fs_args, mnt_args) self.fs_create = True build.make(self.sourcedir) @avocado.fail_on(pmem.PMemException) def setup_pmem_disk(self, mnt_args): if not self.disk: self.plib = pmem.PMem() regions = sorted(self.plib.run_ndctl_list('-R'), key=lambda i: i['size'], reverse=True) if not regions: self.plib.enable_region() regions = sorted(self.plib.run_ndctl_list('-R'), key=lambda i: i['size'], reverse=True) region = self.plib.run_ndctl_list_val(regions[0], 'dev') if self.plib.run_ndctl_list("-N -r %s" % region): self.plib.destroy_namespace(region=region, force=True) if 'dax' in mnt_args: self.plib.create_namespace(region=region) self.disk = "/dev/%s" % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-N -r %s' % region)[0], 'blockdev') else: self.plib.create_namespace(region=region, mode='devdax') self.devdax_file = "/dev/%s" % self.plib.run_ndctl_list_val( self.plib.run_ndctl_list('-N -r %s' % region)[0], 'chardev') def create_raid(self, l_disk, l_raid_name): self.sraid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sraid.create() def delete_raid(self): self.sraid.stop() self.sraid.clear_superblock() def create_lv(self, l_disk): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(vgname, l_disk) lv_utils.lv_create(vgname, lvname, lv_size) return '/dev/%s/%s' % (vgname, lvname) def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname) def create_fs(self, l_disk, mountpoint, fstype, fs_args='', mnt_args=''): self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype, args=fs_args) try: self.part_obj.mount(args=mnt_args) except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def delete_fs(self, l_disk): self.part_obj.unmount() delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % l_disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s" % l_disk) def test(self): """ Execute 'fio' with appropriate parameters. """ self.log.info("Test will run on %s", self.dirs) fio_job = self.params.get('fio_job', default='fio-simple.job') # if fs is present create a file on that fs, if no fs # self.dirs = path to disk, thus a filename is not needed if self.fs_create: filename = "%s/%s" % (self.dirs, self.fio_file) elif self.devdax_file: filename = self.devdax_file else: filename = self.dirs cmd = '%s/fio %s --filename=%s' % (self.sourcedir, self.get_data(fio_job), filename) status = process.system(cmd, ignore_status=True, shell=True) if status: # status of 3 is a common warning with iscsi disks but fio # process completes successfully so throw a warning not # a fail. For other nonzero statuses we should fail. if status == 3: self.log.warning("Warnings during fio run") else: self.fail("fio run failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if os.path.exists(self.fio_file): os.remove(self.fio_file) if self.fs_create: self.delete_fs(self.disk) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid()
class Bonnie(Test): """ Bonnie++ is a benchmark suite that is aimed at performing a number of simple tests of hard drive and file system performance. """ def setUp(self): """ Use distro provided bonnie++ bin if not available Build bonnie++ from below Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-1.03e.tgz """ self.fstype = self.params.get('fs', default='') self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default='/mnt') self.uid_to_use = self.params.get('uid-to-use', default=getpass.getuser()) self.number_to_stat = self.params.get('number-to-stat', default=2048) self.data_size = self.params.get('data_size_to_pass', default=0) smm = SoftwareManager() # Install the package from web deps = ['gcc', 'make'] if distro.detect().name == 'Ubuntu': deps.extend(['g++']) else: deps.extend(['gcc-c++']) if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs not supported with RHEL 7.4 onwards") elif distro.detect().name == 'Ubuntu': deps.extend(['btrfs-tools']) if raid_needed: deps.append('mdadm') for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test" % package) if process.system("which bonnie++", ignore_status=True): tarball = self.fetch_asset('http://www.coker.com.au/bonnie++/' 'bonnie++-1.03e.tgz', expire='7d') archive.extract(tarball, self.teststmpdir) self.source = os.path.join(self.teststmpdir, os.path.basename( tarball.split('.tgz')[0])) os.chdir(self.source) process.run('./configure') build.make(self.source) build.make(self.source, extra_args='install') if not os.path.exists(self.dir): os.mkdir(self.dir) self.raid_name = '/dev/md/sraid' self.vgname = 'avocado_vg' self.lvname = 'avocado_lv' self.err_mesg = [] self.target = self.disk self.lv_disk = self.disk self.part_obj = Partition(self.disk, mountpoint=self.dir) self.sw_raid = softwareraid.SoftwareRaid(self.raid_name, '0', self.disk.split(), '1.2') dmesg.clear_dmesg() if self.disk is not None: self.pre_cleanup() if self.disk in disk.get_disks(): if raid_needed: self.create_raid(self.disk, self.raid_name) self.raid_create = True self.target = self.raid_name if lv_needed: self.lv_disk = self.target self.target = self.create_lv(self.target) self.lv_create = True if self.fstype: self.create_fs(self.target, self.dir, self.fstype) self.fs_create = True else: self.cancel("Missing disk %s in OS" % self.disk) else: self.cancel("please provide valid disk") def create_raid(self, l_disk, l_raid_name): """ creates a softwareraid with given raid name on given disk :param l_disk: disk name on which raid will be created :l_raid_name: name of the softwareraid :return: None """ self.log.info("creating softwareraid on {}" .format(l_disk)) self.sw_raid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sw_raid.create() def create_lv(self, l_disk): """ creates a volume group then logical volume on it and returns lv. :param l_disk: disk name on which a lv will be created :returns: Returns the lv name :rtype: str """ lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(self.vgname, l_disk) lv_utils.lv_create(self.vgname, self.lvname, lv_size) return '/dev/%s/%s' % (self.vgname, self.lvname) def create_fs(self, l_disk, mountpoint, fstype): """ umounts the given disk if mounted then creates a filesystem on it and then mounts it on given directory :param l_disk: disk name on which fs will be created :param mountpoint: directory name on which the disk will be mounted :param fstype: filesystem type like ext4,xfs,btrfs etc :returns: None """ self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def pre_cleanup(self): """ cleanup the disk and directory before test starts on it """ self.log.info("Pre_cleaning of disk and diretories...") disk_list = ['/dev/mapper/avocado_vg-avocado_lv', self.raid_name, self.disk] for disk in disk_list: self.delete_fs(disk) self.log.info("checking ...lv/vg existance...") if lv_utils.lv_check(self.vgname, self.lvname): self.log.info("found lv existance... deleting it") self.delete_lv() elif lv_utils.vg_check(self.vgname): self.log.info("found vg existance ... deleting it") lv_utils.vg_remove(self.vgname) self.log.info("checking for softwareraid existance...") if self.sw_raid.exists(): self.log.info("found softwareraid existance... deleting it") self.delete_raid() else: self.log.info("No softwareraid detected ") self.log.info("\n End of pre_cleanup") def delete_raid(self): """ it checks for existing of raid and deletes it if exists """ self.log.info("deleting Sraid %s" % self.raid_name) def is_raid_deleted(): self.sw_raid.stop() self.sw_raid.clear_superblock() self.log.info("checking for raid metadata") cmd = "wipefs -af %s" % self.disk process.system(cmd, shell=True, ignore_status=True) if self.sw_raid.exists(): return False return True self.log.info("checking lvm_metadata on %s" % self.raid_name) cmd = 'blkid -o value -s TYPE %s' % self.raid_name out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.raid_name process.system(cmd, shell=True, ignore_status=True) if wait.wait_for(is_raid_deleted, timeout=10): self.log.info("software raid %s deleted" % self.raid_name) else: self.err_mesg.append("failed to delete sraid %s" % self.raid_name) def delete_lv(self): """ checks if lv/vg exists and delete them along with its metadata if exists. """ def is_lv_deleted(): lv_utils.lv_remove(self.vgname, self.lvname) time.sleep(5) lv_utils.vg_remove(self.vgname) if lv_utils.lv_check(self.vgname, self.lvname): return False return True if wait.wait_for(is_lv_deleted, timeout=10): self.log.info("lv %s deleted" % self.lvname) else: self.err_mesg.append("failed to delete lv %s" % self.lvname) # checking and deleteing if lvm_meta_data exists after lv removed cmd = 'blkid -o value -s TYPE %s' % self.lv_disk out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.lv_disk process.system(cmd, shell=True, ignore_status=True) def delete_fs(self, l_disk): """ checks for disk/dir mount, unmount if mounted and checks for filesystem exitance and wipe it off after dir/disk unmount. :param l_disk: disk name for which you want to check the mount status :return: None """ def is_fs_deleted(): cmd = "wipefs -af %s" % l_disk process.system(cmd, shell=True, ignore_status=True) if disk.fs_exists(l_disk): return False return True def is_disk_unmounted(): cmd = "umount %s" % l_disk cmd1 = 'umount /dev/mapper/avocado_vg-avocado_lv' process.system(cmd, shell=True, ignore_status=True) process.system(cmd1, shell=True, ignore_status=True) if disk.is_disk_mounted(l_disk): return False return True def is_dir_unmounted(): cmd = 'umount %s' % self.dir process.system(cmd, shell=True, ignore_status=True) if disk.is_dir_mounted(self.dir): return False return True self.log.info("checking if disk is mounted.") if disk.is_disk_mounted(l_disk): self.log.info("%s is mounted, unmounting it ....", l_disk) if wait.wait_for(is_disk_unmounted, timeout=10): self.log.info("%s unmounted successfully" % l_disk) else: self.err_mesg.append("%s unmount failed", l_disk) else: self.log.info("disk %s not mounted." % l_disk) self.log.info("checking if dir %s is mounted." % self.dir) if disk.is_dir_mounted(self.dir): self.log.info("%s is mounted, unmounting it ....", self.dir) if wait.wait_for(is_dir_unmounted, timeout=10): self.log.info("%s unmounted successfully" % self.dir) else: self.err_mesg.append("failed to unount %s", self.dir) else: self.log.info("dir %s not mounted." % self.dir) self.log.info("checking if fs exists in {}" .format(l_disk)) if disk.fs_exists(l_disk): self.log.info("found fs on %s, removing it....", l_disk) if wait.wait_for(is_fs_deleted, timeout=10): self.log.info("fs removed successfully..") else: self.err_mesg.append(f'failed to delete fs on {l_disk}') else: self.log.info(f'No fs detected on {self.disk}') self.log.info("Running dd...") def test(self): """ Run 'bonnie' with its arguments """ args = [] args.append('-d %s' % self.dir) args.append('-n %s' % self.number_to_stat) args.append('-s %s' % self.data_size) args.append('-u %s' % self.uid_to_use) cmd = ('bonnie++ %s' % " ".join(args)) if process.system(cmd, shell=True, ignore_status=True): self.fail("test failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: if self.fs_create: self.delete_fs(self.target) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid() dmesg.clear_dmesg() if self.err_mesg: self.warn("test failed due to following errors %s" % self.err_mesg)
class LtpFs(Test): ''' Using LTP (Linux Test Project) testsuite to run Filesystem related tests ''' def setUp(self): ''' To check and install dependencies for the test ''' self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default='/mnt') self.fstype = self.params.get('fs', default='ext4') self.args = self.params.get('args', default='') smm = SoftwareManager() packages = ['gcc', 'make', 'automake', 'autoconf'] if raid_needed: packages.append('mdadm') for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") self.raid_name = '/dev/md/sraid' self.vgname = 'avocado_vg' self.lvname = 'avocado_lv' self.err_mesg = [] self.target = self.disk self.lv_disk = self.disk self.part_obj = Partition(self.disk, mountpoint=self.dir) self.sw_raid = softwareraid.SoftwareRaid(self.raid_name, '0', self.disk.split(), '1.2') dmesg.clear_dmesg() if self.disk is not None: self.pre_cleanup() if self.disk in disk.get_disks(): if raid_needed: self.create_raid(self.disk, self.raid_name) self.raid_create = True self.target = self.raid_name if lv_needed: self.lv_disk = self.target self.target = self.create_lv(self.target) self.lv_create = True if self.fstype: self.create_fs(self.target, self.dir, self.fstype) self.fs_create = True else: self.cancel("Missing disk %s in OS" % self.disk) else: self.cancel("please provide valid disk") url = "https://github.com/linux-test-project/ltp/" url += "archive/master.zip" tarball = self.fetch_asset("ltp-master.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) ltp_dir = os.path.join(self.teststmpdir, "ltp-master") os.chdir(ltp_dir) build.make(ltp_dir, extra_args='autotools') self.ltpbin_dir = os.path.join(ltp_dir, 'bin') if not os.path.isdir(self.ltpbin_dir): os.mkdir(self.ltpbin_dir) process.system('./configure --prefix=%s' % self.ltpbin_dir, ignore_status=True) build.make(ltp_dir) build.make(ltp_dir, extra_args='install') def create_raid(self, l_disk, l_raid_name): """ creates a softwareraid with given raid name on given disk :param l_disk: disk name on which raid will be created :l_raid_name: name of the softwareraid :return: None """ self.log.info("creating softwareraid on {}".format(l_disk)) self.sw_raid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sw_raid.create() def create_lv(self, l_disk): """ creates a volume group then logical volume on it and returns lv. :param l_disk: disk name on which a lv will be created :returns: Returns the lv name :rtype: str """ lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(self.vgname, l_disk) lv_utils.lv_create(self.vgname, self.lvname, lv_size) return '/dev/%s/%s' % (self.vgname, self.lvname) def create_fs(self, l_disk, mountpoint, fstype): """ umounts the given disk if mounted then creates a filesystem on it and then mounts it on given directory :param l_disk: disk name on which fs will be created :param mountpoint: directory name on which the disk will be mounted :param fstype: filesystem type like ext4,xfs,btrfs etc :returns: None """ self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def pre_cleanup(self): """ cleanup the disk and directory before test starts on it """ self.log.info("Pre_cleaning of disk and diretories...") disk_list = [ '/dev/mapper/avocado_vg-avocado_lv', self.raid_name, self.disk ] for disk in disk_list: self.delete_fs(disk) self.log.info("checking ...lv/vg existance...") if lv_utils.lv_check(self.vgname, self.lvname): self.log.info("found lv existance... deleting it") self.delete_lv() elif lv_utils.vg_check(self.vgname): self.log.info("found vg existance ... deleting it") lv_utils.vg_remove(self.vgname) self.log.info("checking for softwareraid existance...") if self.sw_raid.exists(): self.log.info("found softwareraid existance... deleting it") self.delete_raid() else: self.log.info("No softwareraid detected ") self.log.info("\n End of pre_cleanup") def delete_raid(self): """ it checks for existing of raid and deletes it if exists """ self.log.info("deleting Sraid %s" % self.raid_name) def is_raid_deleted(): self.sw_raid.stop() self.sw_raid.clear_superblock() self.log.info("checking for raid metadata") cmd = "wipefs -af %s" % self.disk process.system(cmd, shell=True, ignore_status=True) if self.sw_raid.exists(): return False return True self.log.info("checking lvm_metadata on %s" % self.raid_name) cmd = 'blkid -o value -s TYPE %s' % self.raid_name out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.raid_name process.system(cmd, shell=True, ignore_status=True) if wait.wait_for(is_raid_deleted, timeout=10): self.log.info("software raid %s deleted" % self.raid_name) else: self.err_mesg.append("failed to delete swraid %s" % self.raid_name) def delete_lv(self): """ checks if lv/vg exists and delete them along with its metadata if exists. """ def is_lv_deleted(): lv_utils.lv_remove(self.vgname, self.lvname) time.sleep(5) lv_utils.vg_remove(self.vgname) if lv_utils.lv_check(self.vgname, self.lvname): return False return True if wait.wait_for(is_lv_deleted, timeout=10): self.log.info("lv %s deleted", self.lvname) else: self.err_mesg.append("failed to delete lv %s" % self.lvname) # checking and deleteing if lvm_meta_data exists after lv removed cmd = 'blkid -o value -s TYPE %s' % self.lv_disk out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.lv_disk process.system(cmd, shell=True, ignore_status=True) def delete_fs(self, l_disk): """ checks for disk/dir mount, unmount if mounted and checks for filesystem exitance and wipe it off after dir/disk unmount. :param l_disk: disk name for which you want to check the mount status :return: None """ def is_fs_deleted(): cmd = "wipefs -af %s" % l_disk process.system(cmd, shell=True, ignore_status=True) if disk.fs_exists(l_disk): return False return True def is_disk_unmounted(): cmd = "umount %s" % l_disk cmd1 = 'umount /dev/mapper/avocado_vg-avocado_lv' process.system(cmd, shell=True, ignore_status=True) process.system(cmd1, shell=True, ignore_status=True) if disk.is_disk_mounted(l_disk): return False return True def is_dir_unmounted(): cmd = 'umount %s' % self.dir process.system(cmd, shell=True, ignore_status=True) if disk.is_dir_mounted(self.dir): return False return True self.log.info("checking if disk is mounted.") if disk.is_disk_mounted(l_disk): self.log.info("%s is mounted, unmounting it ....", l_disk) if wait.wait_for(is_disk_unmounted, timeout=10): self.log.info("%s unmounted successfully" % l_disk) else: self.err_mesg.append("%s unmount failed", l_disk) else: self.log.info("disk %s not mounted." % l_disk) self.log.info("checking if dir %s is mounted." % self.dir) if disk.is_dir_mounted(self.dir): self.log.info("%s is mounted, unmounting it ....", self.dir) if wait.wait_for(is_dir_unmounted, timeout=10): self.log.info("%s unmounted successfully" % self.dir) else: self.err_mesg.append("failed to unount %s", self.dir) else: self.log.info("dir %s not mounted." % self.dir) self.log.info("checking if fs exists in {}".format(l_disk)) if disk.fs_exists(l_disk): self.log.info("found fs on %s, removing it....", l_disk) if wait.wait_for(is_fs_deleted, timeout=10): self.log.info("fs removed successfully..") else: self.err_mesg.append(f'failed to delete fs on {l_disk}') else: self.log.info("No fs detected on %s" % self.disk) def test_fs_run(self): ''' Downloads LTP, compiles, installs and runs filesystem tests on a user specified disk ''' logfile = os.path.join(self.logdir, 'ltp.log') failcmdfile = os.path.join(self.logdir, 'failcmdfile') self.args += (" -q -p -l %s -C %s -d %s" % (logfile, failcmdfile, self.dir)) self.log.info("Args = %s", self.args) cmd = '%s %s' % (os.path.join(self.ltpbin_dir, 'runltp'), self.args) result = process.run(cmd, ignore_status=True) # Walk the stdout and try detect failed tests from lines # like these: # aio01 5 TPASS : Test 5: 10 reads and # writes in 0.000022 sec # vhangup02 1 TFAIL : vhangup02.c:88: # vhangup() failed, errno:1 # and check for fail_status The first part contain test name fail_status = ['TFAIL', 'TBROK', 'TWARN'] split_lines = (line.split(None, 3) for line in result.stdout.splitlines()) failed_tests = [ items[0] for items in split_lines if len(items) == 4 and items[2] in fail_status ] if failed_tests: self.fail("LTP tests failed: %s" % ", ".join(failed_tests)) elif result.exit_status != 0: self.fail("No test failures detected, but LTP finished with %s" % (result.exit_status)) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: if self.fs_create: self.delete_fs(self.target) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid() dmesg.clear_dmesg() if self.err_mesg: self.warn("test failed due to following errors %s" % self.err_mesg)
class FSMark(Test): """ The fs_mark program is meant to give a low level bashing to file systems. The write pattern that we concentrate on is heavily synchronous IO across mutiple directories, drives, etc. """ def setUp(self): """ fs_mark """ smm = SoftwareManager() tarball = self.fetch_asset('https://github.com/josefbacik/fs_mark/' 'archive/master.zip') archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join(self.teststmpdir, 'fs_mark-master') os.chdir(self.sourcedir) process.run('make') build.make(self.sourcedir) self.disk = self.params.get('disk', default=None) self.num = self.params.get('num_files', default='1024') self.size = self.params.get('size', default='1000') self.dir = self.params.get('dir', default=self.srcdir) self.fstype = self.params.get('fs', default='ext4') if self.fstype == 'btrfs': if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dir) self.log.info("Test will run on %s", self.dir) self.log.info("Unmounting the disk before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on dir %s", self.disk, self.dir) self.part_obj.mount() def test(self): """ Run fs_mark """ os.chdir(self.sourcedir) cmd = ('./fs_mark -d %s -s %s -n %s' % (self.dir, self.size, self.num)) process.run(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.dir) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class ThpSwapping(Test): ''' The test fills out the total avl memory and tries to swap the thp out. :avocado: tags=memory,privileged,hugepage ''' @skipIf(PAGESIZE, "No THP support for kernel with 4K PAGESIZE") @skipUnless('Hugepagesize' in dict(memory.meminfo), "Hugepagesize not defined in kernel.") def setUp(self): ''' Sets the Required params for dd and mounts the tmpfs dir ''' self.swap_free = [] mem_free = memory.meminfo.MemFree.m mem = memory.meminfo.MemTotal.m swap = memory.meminfo.SwapTotal.m self.hugepage_size = memory.meminfo.Hugepagesize.m self.swap_free.append(memory.meminfo.SwapFree.m) self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space') self.dd_timeout = 900 # If swap is enough fill all memory with dd if self.swap_free[0] > (mem - mem_free): self.count = (mem // self.hugepage_size) // 2 tmpfs_size = mem else: self.count = (mem_free // self.hugepage_size) // 2 tmpfs_size = mem_free if swap <= 0: self.cancel("Swap is not enabled in the system") if not os.path.ismount(self.mem_path): if not os.path.isdir(self.mem_path): os.makedirs(self.mem_path) self.device = Partition(device="none", mountpoint=self.mem_path) self.device.mount(mountpoint=self.mem_path, fstype="tmpfs", args="-o size=%sM" % tmpfs_size, mnt_check=False) def test(self): ''' Enables THP Runs dd, fills out the available memory and checks whether THP is swapped out. ''' # Enables THP try: memory.set_thp_value("enabled", "always") except Exception as details: self.fail("Failed %s" % details) for iterator in range(self.count): swap_cmd = "dd if=/dev/zero of=%s/%d bs=%sM "\ "count=1" % (self.mem_path, iterator, self.hugepage_size * 2) if (process.system(swap_cmd, timeout=self.dd_timeout, verbose=False, ignore_status=True, shell=True)): self.fail('Swap command Failed %s' % swap_cmd) self.swap_free.append(memory.meminfo.SwapFree.m) # Checks Swap is used or not if self.swap_free[1] - self.swap_free[0] >= 0: self.fail("Swap Space remains untouched") def tearDown(self): ''' Removes directories in tmpfs and unmounts it. ''' if self.mem_path: self.log.info('Cleaning Up!!!') self.device.unmount() process.system('rm -rf %s' % self.mem_path, ignore_status=True)
class LtpFs(Test): ''' Using LTP (Linux Test Project) testsuite to run Filesystem related tests ''' def setUp(self): ''' To check and install dependencies for the test ''' self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False smm = SoftwareManager() packages = ['gcc', 'make', 'automake', 'autoconf'] if raid_needed: packages.append('mdadm') for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) self.disk = self.params.get('disk', default=None) self.mount_point = self.params.get('dir', default=self.workdir) self.script = self.params.get('script') fstype = self.params.get('fs', default='') self.fsstress_run = self.params.get('fsstress_loop', default='1') if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') if self.disk is not None: if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/md/mdsraid' self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name self.mount_point = self.disk if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True self.mount_point = self.disk if fstype: self.mount_point = self.workdir self.create_fs(self.disk, self.mount_point, fstype) self.fs_create = True url = "https://github.com/linux-test-project/ltp/" url += "archive/master.zip" tarball = self.fetch_asset("ltp-master.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) ltp_dir = os.path.join(self.teststmpdir, "ltp-master") os.chdir(ltp_dir) build.make(ltp_dir, extra_args='autotools') process.system('./configure', ignore_status=True) build.make(ltp_dir) build.make(ltp_dir, extra_args='install') fsstress_dir = os.path.join(ltp_dir, 'testcases/kernel/fs/fsstress') os.chdir(fsstress_dir) def create_raid(self, l_disk, l_raid_name): self.sraid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sraid.create() def delete_raid(self): self.sraid.stop() self.sraid.clear_superblock() def create_lv(self, l_disk): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(vgname, l_disk) lv_utils.lv_create(vgname, lvname, lv_size) return '/dev/%s/%s' % (vgname, lvname) def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname) def create_fs(self, l_disk, mountpoint, fstype): self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def delete_fs(self, l_disk): self.part_obj.unmount() delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % l_disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s" % l_disk) def test_fsstress_run(self): ''' Downloads LTP, compiles, installs and runs filesystem tests on a user specified disk ''' if self.script == 'fsstress': arg = (" -d %s -n 500 -p 500 -r -l %s" % (self.mount_point, self.fsstress_run)) self.log.info("Args = %s" % arg) cmd = "dmesg -C" process.system(cmd, shell=True, ignore_status=True, sudo=True) cmd = './%s %s' % (self.script, arg) result = process.run(cmd, ignore_status=True) cmd = "dmesg --level=err" if process.system_output(cmd, shell=True, ignore_status=True, sudo=False): self.fail("FSSTRESS test failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: if self.fs_create: self.delete_fs(self.disk) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid()
class Tiobench(Test): """ Avocado test for tiobench. """ def setUp(self): """ Build tiobench. Source: https://github.com/mkuoppal/tiobench.git """ self.fstype = self.params.get('fs', default='') self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default="/mnt") self.raid_name = '/dev/md/sraid' self.vgname = 'avocado_vg' self.lvname = 'avocado_lv' self.err_mesg = [] smm = SoftwareManager() packages = ['gcc', 'mdadm'] if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': packages.extend(['btrfs-tools']) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test." % package) locations = ["https://github.com/mkuoppal/tiobench/archive/master.zip"] tarball = self.fetch_asset("tiobench.zip", locations=locations) archive.extract(tarball, self.teststmpdir) os.chdir(os.path.join(self.teststmpdir, "tiobench-master")) build.make(".") self.target = self.disk self.lv_disk = self.disk self.part_obj = Partition(self.disk, mountpoint=self.dir) self.sw_raid = softwareraid.SoftwareRaid(self.raid_name, '0', self.disk.split(), '1.2') dmesg.clear_dmesg() if self.disk is not None: if self.disk: self.pre_cleanup() if raid_needed: self.create_raid(self.disk, self.raid_name) self.raid_create = True self.target = self.raid_name if lv_needed: self.lv_disk = self.target self.target = self.create_lv(self.target) self.lv_create = True if self.fstype: self.create_fs(self.target, self.dir, self.fstype) self.fs_create = True else: self.cancel("Missing disk %s in OS" % self.disk) else: self.cancel("Please provide a valid disk name") def create_raid(self, l_disk, l_raid_name): """ creates a softwareraid with given raid name on given disk :param l_disk: disk name on which raid will be created :l_raid_name: name of the softwareraid :return: None """ self.log.info("creating softwareraid on {}".format(l_disk)) self.sw_raid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sw_raid.create() def create_lv(self, l_disk): """ creates a volume group then logical volume on it and returns lv. :param l_disk: disk name on which a lv will be created :returns: Returns the lv name :rtype: str """ lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(self.vgname, l_disk) lv_utils.lv_create(self.vgname, self.lvname, lv_size) return '/dev/%s/%s' % (self.vgname, self.lvname) def create_fs(self, l_disk, mountpoint, fstype): """ umounts the given disk if mounted then creates a filesystem on it and then mounts it on given directory :param l_disk: disk name on which fs will be created :param mountpoint: directory name on which the disk will be mounted :param fstype: filesystem type like ext4,xfs,btrfs etc :returns: None """ self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def pre_cleanup(self): """ cleanup the disk and directory before test starts on it """ self.log.info("Pre_cleaning of disk and diretories...") disk_list = [ '/dev/mapper/avocado_vg-avocado_lv', self.raid_name, self.disk ] for disk in disk_list: self.delete_fs(disk) self.log.info("checking ...lv/vg existance...") if lv_utils.lv_check(self.vgname, self.lvname): self.log.info("found lv existance... deleting it") self.delete_lv() elif lv_utils.vg_check(self.vgname): self.log.info("found vg existance ... deleting it") lv_utils.vg_remove(self.vgname) self.log.info("checking for softwareraid existance...") if self.sw_raid.exists(): self.log.info("found softwareraid existance... deleting it") self.delete_raid() else: self.log.info("No softwareraid detected ") self.log.info("\n End of pre_cleanup") def delete_raid(self): """ it checks for existing of raid and deletes it if exists """ self.log.info("deleting Sraid %s" % self.raid_name) def is_raid_deleted(): self.sw_raid.stop() self.sw_raid.clear_superblock() self.log.info("checking for raid metadata") cmd = "wipefs -af %s" % self.disk process.system(cmd, shell=True, ignore_status=True) if self.sw_raid.exists(): return False return True self.log.info("checking lvm_metadata on %s" % self.raid_name) cmd = 'blkid -o value -s TYPE %s' % self.raid_name out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.raid_name process.system(cmd, shell=True, ignore_status=True) if wait.wait_for(is_raid_deleted, timeout=10): self.log.info("software raid %s deleted" % self.raid_name) else: self.err_mesg.append("failed to delete sraid %s" % self.raid_name) def delete_lv(self): """ checks if lv/vg exists and delete them along with its metadata if exists. """ def is_lv_deleted(): lv_utils.lv_remove(self.vgname, self.lvname) time.sleep(5) lv_utils.vg_remove(self.vgname) if lv_utils.lv_check(self.vgname, self.lvname): return False return True if wait.wait_for(is_lv_deleted, timeout=10): self.log.info("lv %s deleted" % self.lvname) else: self.err_mesg.append("failed to delete lv %s" % self.lvname) # checking and deleteing if lvm_meta_data exists after lv removed cmd = 'blkid -o value -s TYPE %s' % self.lv_disk out = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") if out == 'LVM2_member': cmd = "wipefs -af %s" % self.lv_disk process.system(cmd, shell=True, ignore_status=True) def delete_fs(self, l_disk): """ checks for disk/dir mount, unmount if mounted and checks for filesystem exitance and wipe it off after dir/disk unmount. :param l_disk: disk name for which you want to check the mount status :return: None """ def is_fs_deleted(): cmd = "wipefs -af %s" % l_disk process.system(cmd, shell=True, ignore_status=True) if disk.fs_exists(l_disk): return False return True def is_disk_unmounted(): cmd = "umount %s" % l_disk cmd1 = 'umount /dev/mapper/avocado_vg-avocado_lv' process.system(cmd, shell=True, ignore_status=True) process.system(cmd1, shell=True, ignore_status=True) if disk.is_disk_mounted(l_disk): return False return True def is_dir_unmounted(): cmd = 'umount %s' % self.dir process.system(cmd, shell=True, ignore_status=True) if disk.is_dir_mounted(self.dir): return False return True self.log.info("checking if disk is mounted.") if disk.is_disk_mounted(l_disk): self.log.info("%s is mounted, unmounting it ....", l_disk) if wait.wait_for(is_disk_unmounted, timeout=10): self.log.info("%s unmounted successfully" % l_disk) else: self.err_mesg.append("%s unmount failed", l_disk) else: self.log.info("disk %s not mounted." % l_disk) self.log.info("checking if dir %s is mounted." % self.dir) if disk.is_dir_mounted(self.dir): self.log.info("%s is mounted, unmounting it ....", self.dir) if wait.wait_for(is_dir_unmounted, timeout=10): self.log.info("%s unmounted successfully" % self.dir) else: self.err_mesg.append("failed to unount %s", self.dir) else: self.log.info("dir %s not mounted." % self.dir) self.log.info("checking if fs exists in {}".format(l_disk)) if disk.fs_exists(l_disk): self.log.info("found fs on %s, removing it....", l_disk) if wait.wait_for(is_fs_deleted, timeout=10): self.log.info("fs removed successfully..") else: self.err_mesg.append(f'failed to delete fs on {l_disk}') else: self.log.info(f'No fs detected on {self.disk}') def test(self): """ Test execution with necessary arguments. :params blocks: The blocksize in Bytes to use. Defaults to 4096. :params threads: The number of concurrent test threads. :params size: The total size in MBytes of the files may use together. :params num_runs: This number specifies over how many runs each test should be averaged. """ blocks = self.params.get('blocks', default=4096) threads = self.params.get('threads', default=10) size = self.params.get('size', default=1024) num_runs = self.params.get('numruns', default=2) self.log.info("Test will run on %s and %s", self.target, self.dir) self.whiteboard = process.system_output( 'perl ./tiobench.pl ' '--target {} --block={} ' '--threads={} --numruns={} ' '-size={}'.format(self.dir, blocks, threads, num_runs, size)).decode("utf-8") def tearDown(self): """ Cleanup of disk used to perform this test """ if self.disk is not None: if self.fs_create: self.delete_fs(self.target) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid() dmesg.clear_dmesg() if self.err_mesg: self.warn("test failed due to following errors %s" % self.err_mesg)
class Thp(Test): ''' The test enables THP and stress the system using dd load and verifies whether THP has been allocated for usage or not :avocado: tags=memory,privileged ''' @skipIf(PAGESIZE, "No THP support for kernel with 4K PAGESIZE") def setUp(self): ''' Sets all the reqd parameter and also mounts the tmpfs to be used in test. ''' # Set params as per available memory in system self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space') free_mem = int(memory.freememtotal() / 1024) self.dd_timeout = 900 # Set block size as hugepage size * 2 self.block_size = (memory.get_huge_page_size() / 1024) * 2 self.count = free_mem / self.block_size # Mount device as per free memory size if not os.path.exists(self.mem_path): os.makedirs(self.mem_path) self.device = Partition(device="none", mountpoint=self.mem_path) self.device.mount(mountpoint=self.mem_path, fstype="tmpfs", args='-o size=%dM' % free_mem) def test(self): ''' Enables THP , Runs the dd workload and checks whether THP has been allocated. ''' # Enables THP try: memory.set_thp_value("enabled", "always") except Exception as details: self.fail("Failed %s" % details) # Read thp values before stressing the system thp_alloted_before = int(memory.read_from_vmstat("thp_fault_alloc")) thp_split_before = int(memory.read_from_vmstat("thp_split_page")) thp_collapse_alloc_before = int(memory.read_from_vmstat ("thp_collapse_alloc")) # Start Stresssing the System self.log.info('Stress testing using dd command') for iterator in range(self.count): stress_cmd = 'dd if=/dev/zero of=%s/%d bs=%dM count=1'\ % (self.mem_path, iterator, self.block_size) if(process.system(stress_cmd, timeout=self.dd_timeout, verbose=False, ignore_status=True, shell=True)): self.fail('dd command failed %s' % stress_cmd) # Read thp values after stressing the system thp_alloted_after = int(memory.read_from_vmstat("thp_fault_alloc")) thp_split_after = int(memory.read_from_vmstat("thp_split_page")) thp_collapse_alloc_after = int(memory.read_from_vmstat ("thp_collapse_alloc")) # Check whether THP is Used or not if thp_alloted_after <= thp_alloted_before: e_msg = "Thp usage count has not increased\n" e_msg += "Before Stress:%d\nAfter stress:%d" % (thp_alloted_before, thp_alloted_after) self.fail(e_msg) else: thp_fault_alloc = thp_alloted_after - thp_alloted_before thp_split = thp_split_after - thp_split_before thp_collapse_alloc = (thp_collapse_alloc_after - thp_collapse_alloc_before) self.log.info("\nTest statistics, changes during test run:") self.log.info("thp_fault_alloc=%d\nthp_split=%d\n" "thp_collapse_alloc=%d\n", thp_fault_alloc, thp_split, thp_collapse_alloc) def tearDown(self): ''' Removes the files created and unmounts the tmpfs. ''' if self.mem_path: self.log.info('Cleaning Up!!!') self.device.unmount() process.system('rm -rf %s' % self.mem_path, ignore_status=True)
class NXGZipTests(Test): """ nx-gzip test cases make use of testsuite provided by the library source package and performs functional tests. """ def download_tarball(self): ''' Get linux source tarball for compress/decompress ''' url = 'https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.15.tar.gz' tarball = self.fetch_asset(self.params.get("linuxsrc_url", default=url)) os.chdir(self.workdir) archive.extract(tarball, self.workdir) archive.compress("%s/linux-src.tar" % self.workdir, self.workdir) def create_ddfile(self): ''' create dd file for compress/decompress ''' blk_size = self.params.get('blk_size', default='1073741824') file_size = self.params.get('file_size', default='5') count = self.params.get('count', default='150') dd_cmd = 'dd if=/dev/urandom of=%sgb-file bs=%s count=%s'\ % (file_size, blk_size, count) if process.system(dd_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: create_ddfile: dd file creation failed") def build_tests(self, testdir_name): ''' build different test builds ''' if self.name.uid == 13: test_dir = os.path.join(self.buldir, testdir_name) else: test_dir = os.path.join(self.teststmpdir, testdir_name) os.chdir(test_dir) testdir_dict = { "": "check", "selftest": "run_tests", "test": "unsafe-check", "samples": "bench", "oct": "-j16", "tools/testing/selftests/powerpc/nx-gzip": "run_tests" } failed_tests = [] output = build.run_make(test_dir, extra_args=testdir_dict[testdir_name], process_kwargs={"ignore_status": True}) for line in output.stdout.decode('utf-8').splitlines(): if "failed" in line: failed_tests.append(line) if failed_tests: self.fail("%s" % failed_tests) @skipUnless(IS_POWER_NV | IS_POWER10, "NX-GZIP tests are supported only on PowerNV(POWER9) or " "POWER10 platform.") def setUp(self): """ Install pre-requisite packages """ smg = SoftwareManager() self.dist = distro.detect() if self.dist.name not in ['rhel', 'SuSE']: self.cancel('Unsupported OS %s' % self.dist.name) deps = ['gcc', 'make', 'glibc-static', 'zlib', 'zlib-devel'] for package in deps: if not smg.check_installed(package) and not smg.install(package): self.cancel("Fail to install %s required for this test." % (package)) self.url = self.params.get( 'url', default="https://github.com/libnxz/power-gzip") self.branch = self.params.get('git_branch', default='master') git.get_repo(self.url, branch=self.branch, destination_dir=self.teststmpdir) if self.branch == 'develop': process.run('./configure', sudo=True, shell=True) os.chdir(self.teststmpdir) build.make(self.teststmpdir) def test_inflate_deflate(self): ''' Running NX-GZIP: Inflate and Deflate tests ''' self.log.info("NX-GZIP: test_inflate_deflate:\ Inflate and Deflate tests") self.build_tests("") def test_basic_comp_decomp(self): ''' Running NX-GZIP: Simple compression/decompression ''' self.log.info("NX-GZIP: test_basic_comp_decomp:\ basic compression/decompression tests") self.build_tests("selftest") def test_kernel_oops(self): ''' Running NX-GZIP: testing kernel oops ''' self.log.info("NX-GZIP: test_kernel_oops: testing kernel oops tests") self.build_tests("test") def test_zpipe(self): ''' Running NX-GZIP: Compress/Decompress using zpipe which uses nx-gzip ''' self.log.info("NX-GZIP: test_zpipe:\ Compress/Decompress using zpipe which uses nx-gzip") self.build_tests("samples") file_size = self.params.get('file_size', default='5') out_file = self.params.get('out_file', default='out_file') new_file = self.params.get('new_file', default='new_file') self.create_ddfile() comp_cmd = './zpipe < %sgb-file > %s' % (file_size, out_file) if process.system(comp_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_zpipe: zpipe compress failed") decomp_cmd = './zpipe -d < %s > %s' % (out_file, new_file) if process.system(decomp_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_zpipe: zpipe decompress failed") def test_gzip_series(self): ''' Running NX-GZIP: Compress/Decompress using gzip series ''' self.log.info("NX-GZIP: test_gzip_series: Running gzip series") self.build_tests("samples") self.create_ddfile() mnt_path = "/mnt/ramdisk" file_size = self.params.get('file_size', default='5') tmpfs_size = self.params.get('tmpfs_size', default='50') free_mem = memory.meminfo.MemFree.m if int(tmpfs_size) > free_mem: self.cancel("NX-GZIP: test_gzip_series: Test needs minimum %s\ memory" % tmpfs_size) if not os.path.ismount('%s' % mnt_path): if not os.path.exists('%s' % mnt_path): os.makedirs('%s' % mnt_path) self.device = Partition(device="none", mountpoint='%s' % mnt_path) self.device.mount(mountpoint='%s' % mnt_path, fstype="tmpfs", args="-o size=%sG" % tmpfs_size, mnt_check=False) gzip_series_cmd = './gzip-series.sh %sgb-file' % file_size if process.system(gzip_series_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_gzip_series: gzip_series tests failed") self.log.info("NX-GZIP: test_gzip_series: Cleaning..") if os.path.exists('%s' % mnt_path): self.device.unmount() shutil.rmtree('%s' % mnt_path) def test_numamany(self): ''' Running NX-GZIP: Run compress/decompress on multiple numa nodes ''' self.log.info("NX-GZIP: test_numamany:\ Run compress/decompress on multiple numa nodes") self.build_tests("samples") self.create_ddfile() file_size = self.params.get('file_size', default='5') numamany_cmd = './runnumamany.sh %sgb-file' % file_size if process.system(numamany_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_numamany: numa node tests failed") def test_simple_decomp(self): ''' Running NX-GZIP: Run simple decomp tests ''' self.log.info("NX-GZIP: test_simple_decomp:simple decomp tests") self.build_tests("samples") self.create_ddfile() file_size = self.params.get('file_size', default='5') dcomp_cmd = 'sh ./rundecomp.sh %sgb-file' % file_size if process.system(dcomp_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_simple_dcomp: dcomp tests failed") def test_zpipe_repeat(self): ''' Running NX-GZIP: Run zpipe repeates tests ''' self.cancel("NX-GZIP:Intentionally Cancelled test_zpipe_repeat tests,\ due to test case issues.") self.log.info("NX-GZIP: test_zpipe: Repeated zpipe tests") self.download_tarball() self.build_tests("samples") gcc_cmd = 'gcc -O3 -I../inc_nx -I../ -L../ -L/usr/lib/ \ -o zpipe-repeat-test zpipe-repeat-test.c \ ../lib/libnxz.a -lpthread' if process.system(gcc_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_zpipe_repeat: zpipe repeat tests failed") zpipe_cmd = './zpipe-repeat-test < %s/linux-src.tar> %s/junk.Z' \ % (self.workdir, self.workdir) if process.system(zpipe_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_zpipe_repeat: zpipe repeat tests failed") zpipe_d_cmd = './zpipe-repeat-test -d < %s/junk.Z > /dev/null' \ % self.workdir if process.system(zpipe_d_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_zpipe_repeat: zpipe repeat tests failed") def test_compdecomp_threads(self): ''' Running NX-GZIP: Run 100 parallel threads and compress/decompress the source file 5 times ''' self.log.info("NX-GZIP: test_compdecomp_threads: Run 100\ parallel threads and compress/decompress\ the source file 5 times") self.download_tarball() self.build_tests("samples") thr = self.params.get('comp_decomp_thr', default='100') iters = self.params.get('comp_decomp_iter', default='5') compdecomp_cmd = './compdecomp_th %s/linux-src.tar %s %s'\ % (self.workdir, thr, iters) if process.system(compdecomp_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_compdecomp_threads:\ compress/decompress with parallel threads failed") def test_dictionary(self): ''' Running NX-GZIP: Test deflate/inflate with dictionary file ''' self.log.info("NX-GZIP: test_dictionary:\ Run deflate/inflate with dictionary file") self.download_tarball() self.build_tests("samples") make_cmd = 'make zpipe_dict' if process.system(make_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_dictionary: make failed") dict_cmd = './dict-test.sh alice29.txt %s/linux-src.tar'\ % self.workdir if process.system(dict_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_test_dictionary:\ deflate/inflate with dictionary tests failed") def test_nxdht(self): ''' Running NX-GZIP: Run nxdht tests ''' self.log.info("NX-GZIP: test_nxdht: Run nxdht tests") self.download_tarball() self.build_tests("samples") nxdht_cmd = './gzip_nxdht_test %s/linux-src.tar' % self.workdir if process.system(nxdht_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_nxdht: nxdht tests failed") def test_oct(self): ''' Running NX-GZIP: Run OCT - Libnxz Output Comparison Tests ''' self.log.info("NX-GZIP: test_oct:\ Libnxz Output Comparison Tests") test_dir = os.path.join(self.teststmpdir, "oct") shutil.copyfile(self.get_data('minigzipsh'), os.path.join(test_dir, 'minigzipsh')) os.chdir(test_dir) os.chmod('minigzipsh', 0o777) self.build_tests("oct") def test_kself_nxgzip(self): ''' nx-gzip tests from kself tests ''' self.testdir = "tools/testing/selftests/powerpc/nx-gzip" linux_src = 'https://github.com/torvalds/linux/archive/master.zip' self.output = "linux-master" match = next((ext for ext in [".zip", ".tar"] if ext in linux_src), None) if match: tarball = self.fetch_asset("kselftest%s" % match, locations=[linux_src], expire='1d') archive.extract(tarball, self.teststmpdir) else: git.get_repo(linux_src, destination_dir=self.teststmpdir) self.buldir = os.path.join(self.teststmpdir, self.output) self.build_tests(self.testdir) def test_bench_initend(self): ''' Running NX-GZIP: Running bench tests with deflat/inflate - init/end ''' self.log.info("NX-GZIP: test_bench_initend:\ Running tests with Deflat/Inflate InitEnd") self.build_tests("samples") bench_cmd = './bench_initend' if process.system(bench_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP:test_bench_initend:bench_initend tests failed") def test_compdecomp_2nx(self): ''' Running NX-GZIP: Run compress/decompress on 2nx devices ''' self.log.info("NX-GZIP: test_compdecomp_2nx:\ Run compress/decompress on 2nx devices") self.build_tests("samples") self.create_ddfile() file_size = self.params.get('file_size', default='5') nx2_cmd = './run-series_2nx.sh %sgb-file' % file_size if process.system(nx2_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_compdecomp_2nx:\ comp/decomp on 2nx devices tests failed") def test_zlib_series(self): ''' Running NX-GZIP: Run compress/decompress zlib series ''' self.log.info("NX-GZIP: test_zlib_series:\ Run compress/decomp zlib test series") self.build_tests("samples") self.create_ddfile() file_size = self.params.get('file_size', default='5') zlib_cmd = './zlib-run-series.sh %sgb-file' % file_size if process.system(zlib_cmd, shell=True, ignore_status=True): self.fail("NX-GZIP: test_zlib_series: zlib test series failed")
class FioTest(Test): """ fio is an I/O tool meant to be used both for benchmark and stress/hardware verification. :see: http://freecode.com/projects/fio :param fio_tarbal: name of the tarbal of fio suite located in deps path :param fio_job: config defining set of executed tests located in deps path """ def setUp(self): """ Build 'fio'. """ default_url = "http://brick.kernel.dk/snaps/fio-2.1.10.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default=self.srcdir) fstype = self.params.get('fs', default='ext4') tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) fio_version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.teststmpdir, fio_version) build.make(self.sourcedir) smm = SoftwareManager() if fstype == 'btrfs': if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dir) self.part_obj.mount() def test(self): """ Execute 'fio' with appropriate parameters. """ self.log.info("Test will run on %s", self.dir) fio_job = self.params.get('fio_job', default='fio-simple.job') self.fio_file = 'fiotest-image' cmd = '%s/fio %s %s --filename=%s' % (self.sourcedir, os.path.join( self.datadir, fio_job), self.dir, self.fio_file) process.system(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting directory %s", self.dir) self.part_obj.unmount() if os.path.exists(self.fio_file): os.remove(self.fio_file)
class FSMark(Test): """ The fs_mark program is meant to give a low level bashing to file systems. The write pattern that we concentrate on is heavily synchronous IO across mutiple directories, drives, etc. """ def setUp(self): """ fs_mark """ lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False smm = SoftwareManager() if raid_needed: if not smm.check_installed('mdadm') and not smm.install('mdadm'): self.cancel("mdadm is needed for the test to be run") tarball = self.fetch_asset('https://github.com/josefbacik/fs_mark/' 'archive/master.zip') archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join(self.teststmpdir, 'fs_mark-master') os.chdir(self.sourcedir) process.run('make') build.make(self.sourcedir) self.disk = self.params.get('disk', default=None) self.num = self.params.get('num_files', default='1024') self.size = self.params.get('size', default='1000') self.fstype = self.params.get('fs', default='') self.fs_create = False if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': if not smm.check_installed("btrfs-tools") and not \ smm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') self.dirs = self.disk if self.disk is not None: if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/md/mdsraid' self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name self.dirs = self.disk if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True self.dirs = self.disk if self.fstype: self.dirs = self.workdir self.create_fs(self.disk, self.dirs, self.fstype) self.fs_create = True self.link = "/tmp/link" os.symlink(self.dirs, self.link) def create_raid(self, l_disk, l_raid_name): self.sraid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sraid.create() def delete_raid(self): self.sraid.stop() self.sraid.clear_superblock() def create_lv(self, l_disk): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(vgname, l_disk) lv_utils.lv_create(vgname, lvname, lv_size) return '/dev/%s/%s' % (vgname, lvname) def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname) def create_fs(self, l_disk, mountpoint, fstype): self.part_obj = Partition(l_disk, mountpoint=mountpoint) self.part_obj.unmount() self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, mountpoint)) def delete_fs(self, l_disk): self.part_obj.unmount() delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % l_disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", l_disk) def test(self): """ Run fs_mark """ os.chdir(self.sourcedir) cmd = "./fs_mark -d %s -s %s -n %s" % (self.link, self.size, self.num) process.run(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.link: os.unlink(self.link) if self.disk is not None: if self.fs_create: self.delete_fs(self.disk) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid()
class Tiobench(Test): """ Avocado test for tiobench. """ def setUp(self): """ Build tiobench. Source: https://github.com/mkuoppal/tiobench.git """ self.fstype = self.params.get('fs', default='ext4') smm = SoftwareManager() packages = ['gcc'] if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': packages.extend(['btrfs-tools']) for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s package required for this test." % package) locations = ["https://github.com/mkuoppal/tiobench/archive/master.zip"] tarball = self.fetch_asset("tiobench.zip", locations=locations) archive.extract(tarball, self.teststmpdir) os.chdir(os.path.join(self.teststmpdir, "tiobench-master")) build.make(".") self.target = self.params.get('dir', default=self.workdir) self.disk = self.params.get('disk', default=None) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.target) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating %s file system", self.fstype) self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.target) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.target)) def test(self): """ Test execution with necessary arguments. :params blocks: The blocksize in Bytes to use. Defaults to 4096. :params threads: The number of concurrent test threads. :params size: The total size in MBytes of the files may use together. :params num_runs: This number specifies over how many runs each test should be averaged. """ blocks = self.params.get('blocks', default=4096) threads = self.params.get('threads', default=10) size = self.params.get('size', default=1024) num_runs = self.params.get('numruns', default=2) self.log.info("Test will run on %s", self.target) self.whiteboard = process.system_output( 'perl ./tiobench.pl ' '--target {} --block={} ' '--threads={} --numruns={} ' '-size={}'.format(self.target, blocks, threads, num_runs, size)).decode("utf-8") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.target) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class FioTest(Test): """ fio is an I/O tool meant to be used both for benchmark and stress/hardware verification. :see: http://freecode.com/projects/fio :param fio_tarbal: name of the tarbal of fio suite located in deps path :param fio_job: config defining set of executed tests located in deps path """ def setUp(self): """ Build 'fio'. """ default_url = "https://brick.kernel.dk/snaps/fio-git-latest.tar.gz" url = self.params.get('fio_tool_url', default=default_url) self.disk = self.params.get('disk', default=None) self.dirs = self.params.get('dir', default=self.workdir) fstype = self.params.get('fs', default='ext4') tarball = self.fetch_asset(url) archive.extract(tarball, self.teststmpdir) self.sourcedir = os.path.join(self.teststmpdir, "fio") build.make(self.sourcedir) pkg_list = ['libaio', 'libaio-devel'] smm = SoftwareManager() if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': pkg_list.append('btrfs-tools') for pkg in pkg_list: if pkg and not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Package %s is missing and could not be installed" % pkg) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dirs) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dirs) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.dirs)) self.fio_file = 'fiotest-image' def test(self): """ Execute 'fio' with appropriate parameters. """ self.log.info("Test will run on %s", self.dirs) fio_job = self.params.get('fio_job', default='fio-simple.job') cmd = '%s/fio %s %s --filename=%s' % (self.sourcedir, self.get_data(fio_job), self.dirs, self.fio_file) process.system(cmd) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting directory %s", self.dirs) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk) if os.path.exists(self.fio_file): os.remove(self.fio_file)
class LtpFs(Test): ''' Using LTP (Linux Test Project) testsuite to run Filesystem related tests ''' def setUp(self): ''' To check and install dependencies for the test ''' smm = SoftwareManager() for package in ['gcc', 'make', 'automake', 'autoconf']: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) self.disk = self.params.get('disk', default=None) self.mount_point = self.params.get('dir', default=self.srcdir) self.script = self.params.get('script') fstype = self.params.get('fs', default='ext4') self.args = self.params.get('args', default='') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.mount_point) self.log.info("Unmounting the disk/dir if it is already mounted") self.part_obj.unmount() self.log.info("creating %s file system on %s", fstype, self.disk) self.part_obj.mkfs(fstype) self.log.info("mounting %s on %s", self.disk, self.mount_point) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed", self.disk, self.mount_point) url = "https://github.com/linux-test-project/ltp/" url += "archive/master.zip" tarball = self.fetch_asset("ltp-master.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) ltp_dir = os.path.join(self.teststmpdir, "ltp-master") os.chdir(ltp_dir) build.make(ltp_dir, extra_args='autotools') self.ltpbin_dir = os.path.join(ltp_dir, 'bin') if not os.path.isdir(self.ltpbin_dir): os.mkdir(self.ltpbin_dir) process.system('./configure --prefix=%s' % self.ltpbin_dir, ignore_status=True) build.make(ltp_dir) build.make(ltp_dir, extra_args='install') def test_fs_run(self): ''' Downloads LTP, compiles, installs and runs filesystem tests on a user specified disk ''' if self.script == 'runltp': logfile = os.path.join(self.logdir, 'ltp.log') failcmdfile = os.path.join(self.logdir, 'failcmdfile') self.args += (" -q -p -l %s -C %s -d %s" % (logfile, failcmdfile, self.mount_point)) self.log.info("Args = %s", self.args) cmd = '%s %s' % (os.path.join(self.ltpbin_dir, self.script), self.args) result = process.run(cmd, ignore_status=True) # Walk the stdout and try detect failed tests from lines # like these: # aio01 5 TPASS : Test 5: 10 reads and # writes in 0.000022 sec # vhangup02 1 TFAIL : vhangup02.c:88: # vhangup() failed, errno:1 # and check for fail_status The first part contain test name fail_status = ['TFAIL', 'TBROK', 'TWARN'] split_lines = (line.split(None, 3) for line in result.stdout.splitlines()) failed_tests = [ items[0] for items in split_lines if len(items) == 4 and items[2] in fail_status ] if failed_tests: self.fail("LTP tests failed: %s" % ", ".join(failed_tests)) elif result.exit_status != 0: self.fail( "No test failures detected, but LTP finished with %s" % (result.exit_status)) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.mount_point) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class Bonnie(Test): """ Bonnie++ is a benchmark suite that is aimed at performing a number of simple tests of hard drive and file system performance. """ def setUp(self): """ Use distro provided bonnie++ bin if not available Build bonnie++ from below Source: http://www.coker.com.au/bonnie++/experimental/bonnie++-1.03e.tgz """ fstype = self.params.get('fs', default='ext4') if process.system("which bonnie++", ignore_status=True): smm = SoftwareManager() if not smm.check_installed('bonnie++')\ and not smm.check_installed('bonnie++'): # Install the package from web deps = ['gcc', 'make'] if distro.detect().name == 'Ubuntu': deps.extend(['g++']) else: deps.extend(['gcc-c++']) if fstype == 'btrfs': if distro.detect().name == 'Ubuntu': deps.extend(['btrfs-tools']) for package in deps: if not smm.check_installed(package)\ and not smm.install(package): self.cancel("Fail to install/check %s, which is" " needed for Bonnie test to run" % package) tarball = self.fetch_asset( 'http://www.coker.com.au/bonnie++/' 'bonnie++-1.03e.tgz', expire='7d') archive.extract(tarball, self.teststmpdir) self.source = os.path.join( self.teststmpdir, os.path.basename(tarball.split('.tgz')[0])) os.chdir(self.source) process.run('./configure') build.make(self.source) build.make(self.source, extra_args='install') self.disk = self.params.get('disk', default=None) self.scratch_dir = self.params.get('dir', default=self.srcdir) self.uid_to_use = self.params.get('uid-to-use', default=getpass.getuser()) self.number_to_stat = self.params.get('number-to-stat', default=2048) self.data_size = self.params.get('data_size_to_pass', default=0) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.scratch_dir) self.log.info("Test will run on %s", self.scratch_dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating %s file system on %s disk", fstype, self.disk) self.part_obj.mkfs(fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.scratch_dir) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed", self.disk, self.scratch_dir) def test(self): """ Run 'bonnie' with its arguments """ args = [] args.append('-d %s' % self.scratch_dir) args.append('-n %s' % self.number_to_stat) args.append('-s %s' % self.data_size) args.append('-u %s' % self.uid_to_use) cmd = ('bonnie++ %s' % " ".join(args)) if process.system(cmd, shell=True, ignore_status=True): self.fail("test failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.scratch_dir) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class DiskInfo(Test): """ DiskInfo test for different storage block device tools """ def setUp(self): """ Verifies if we have list of packages installed on OS and also skips the test if user gives the current OS boot disk as disk input it may erase the data :param disk: test disk where the disk operations can be done :param fs: type of filesystem to create :param dir: path of the directory to mount the disk device """ smm = SoftwareManager() pkg = "" if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") self.disk = self.params.get('disk', default=None) self.dirs = self.params.get('dir', default=self.workdir) self.fstype = self.params.get('fs', default='ext4') self.log.info("disk: %s, dir: %s, fstype: %s", self.disk, self.dirs, self.fstype) if not self.disk: self.cancel("No disk input, please update yaml and re-run") cmd = "df --output=source" if self.disk in process.system_output(cmd, ignore_status=True) \ .decode("utf-8"): self.cancel("Given disk is os boot disk," "it will be harmful to run this test") pkg_list = ["lshw"] self.distro = distro.detect().name if self.distro == 'Ubuntu': pkg_list.append("hwinfo") if self.fstype == 'ext4': pkg_list.append('e2fsprogs') if self.fstype == 'xfs': pkg_list.append('xfsprogs') if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") if self.distro == 'Ubuntu': pkg_list.append("btrfs-tools") for pkg in pkg_list: if pkg and not smm.check_installed(pkg) and not smm.install(pkg): self.cancel( "Package %s is missing and could not be installed" % pkg) self.disk_nodes = [] self.disk_base = os.path.basename(self.disk) if multipath.is_mpath_dev(self.disk_base): self.mpath = True self.disk_abs = os.path.basename(os.readlink(self.disk)) mwwid = multipath.get_multipath_wwid(self.disk_base) self.disk_nodes = multipath.get_paths(mwwid) else: self.mpath = False self.disk_abs = self.disk_base self.disk_nodes.append(self.disk_base) def run_command(self, cmd): """ Run command and fail the test if any command fails """ try: process.run(cmd, shell=True, sudo=True) except CmdError as details: self.fail("Command %s failed %s" % (cmd, details)) def test_commands(self): """ Test block device tools to list different disk devices """ cmd_list = [ "lsblk -l", "fdisk -l", "sfdisk -l", "parted -l", "df -h", "blkid", "lshw -c disk", "grub2-probe /boot" ] if self.distro == 'Ubuntu': cmd_list.append("hwinfo --block --short") for cmd in cmd_list: self.run_command(cmd) def test(self): """ Test disk devices with different operations of creating filesystem and mount it on a directory and verify it with certain parameters name, size, UUID and IO sizes etc """ msg = [] if process.system("ls /dev/disk/by-id -l| grep -i %s" % self.disk_abs, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s is not in /dev/disk/by-id" % self.disk_abs) for disk_node in self.disk_nodes: if process.system( "ls /dev/disk/by-path -l| grep -i %s" % disk_node, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s is not in /dev/disk/by-path" % disk_node) # Verify disk listed in all tools if self.mpath: cmd_list = ["fdisk -l ", "lsblk "] else: cmd_list = ["fdisk -l ", "parted -l", "lsblk ", "lshw -c disk "] if self.distro == 'Ubuntu': cmd_list.append("hwinfo --short --block") for cmd in cmd_list: cmd = cmd + " | grep -i %s" % self.disk_base if process.system(cmd, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s is not present in %s" % (self.disk_base, cmd)) if self.mpath: for disk_node in self.disk_nodes: if process.system("lshw -c disk | grep -i %s" % disk_node, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s is not in lshw -c disk" % disk_node) # Get the size and UUID of the disk cmd = "lsblk -l %s --output SIZE -b |sed -n 2p" % self.disk output = process.system_output(cmd, ignore_status=True, shell=True, sudo=True).decode("utf-8") if not output: self.cancel("No information available in lsblk") self.size_b = (output.strip("\n"))[0] self.log.info("Disk: %s Size: %s", self.disk, self.size_b) # Get the physical/logical and minimal/optimal sector sizes pbs_sysfs = "/sys/block/%s/queue/physical_block_size" % self.disk_abs pbs = genio.read_file(pbs_sysfs).rstrip("\n") lbs_sysfs = "/sys/block/%s/queue/logical_block_size" % self.disk_abs lbs = genio.read_file(lbs_sysfs).rstrip("\n") mis_sysfs = "/sys/block/%s/queue/minimum_io_size" % self.disk_abs mis = genio.read_file(mis_sysfs).rstrip("\n") ois_sysfs = "/sys/block/%s/queue/optimal_io_size" % self.disk_abs ois = genio.read_file(ois_sysfs).rstrip("\n") self.log.info("pbs: %s, lbs: %s, mis: %s, ois: %s", pbs, lbs, mis, ois) # Verify sector sizes sector_string = "Sector size (logical/physical): %s " \ "bytes / %s bytes" % (lbs, pbs) output = process.system_output("fdisk -l %s" % self.disk, ignore_status=True, shell=True, sudo=True).decode("utf-8") if sector_string not in output: msg.append("Mismatch in sector sizes of lbs,pbs in " "fdisk o/p w.r.t sysfs paths") io_size_string = "I/O size (minimum/optimal): %s " \ "bytes / %s bytes" % (mis, mis) if io_size_string not in output: msg.append("Mismatch in IO sizes of mis and ois" " in fdisk o/p w.r.t sysfs paths") # Verify disk size in other tools cmd = "fdisk -l %s | grep -i %s" % (self.disk, self.disk) if self.size_b not in process.system_output(cmd, ignore_status=True, shell=True, sudo=True).decode("utf-8"): msg.append("Size of disk %s mismatch in fdisk o/p" % self.disk) cmd = "sfdisk -l %s | grep -i %s" % (self.disk, self.disk) if self.size_b not in process.system_output(cmd, ignore_status=True, shell=True, sudo=True).decode("utf-8"): msg.append("Size of disk %s mismatch in sfdisk o/p" % self.disk) # Mount self.part_obj = Partition(self.disk, mountpoint=self.dirs) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dirs) try: self.part_obj.mount() except PartitionError: msg.append("failed to mount %s fs on %s to %s" % (self.fstype, self.disk, self.dirs)) # Get UUID of the disk for each filesystem mount cmd = "blkid %s | cut -d '=' -f 2" % self.disk output = process.system_output(cmd, ignore_status=True, shell=True, sudo=True).decode("utf-8") self.uuid = output.split('"')[1] self.log.info("Disk: %s UUID: %s", self.disk, self.uuid) # Verify mount point, filesystem type and UUID for each test variant output = process.system_output("lsblk -l %s" % self.disk, ignore_status=True, shell=True, sudo=True).decode("utf-8") if self.dirs in output: self.log.info("Mount point %s for disk %s updated in lsblk o/p", self.dirs, self.disk) output = process.system_output("df %s" % self.disk, ignore_status=True, shell=True, sudo=True).decode("utf-8") if self.dirs in output: self.log.info("Mount point %s for disk %s updated in df o/p", self.dirs, self.disk) if process.system( "ls /dev/disk/by-uuid -l| grep -i %s" % self.disk_abs, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s not having uuid" % self.disk_abs) output = process.system_output("blkid %s" % self.disk, ignore_status=True, shell=True, sudo=True).decode("utf-8") if (self.disk in output and self.fstype in output and self.uuid in output): self.log.info( "Disk %s of file system %s and " "uuid %s is updated in blkid o/p", self.disk, self.fstype, self.uuid) if process.system("grub2-probe %s" % self.dirs, ignore_status=True): msg.append("Given disk %s's fs not detected by grub2" % self.disk_base) # Un-mount the directory self.log.info("Unmounting directory %s", self.dirs) self.part_obj.unmount() cmd = 'lshw -c disk | grep -n "%s" | cut -d ":" -f 1' % self.disk middle = process.system_output(cmd, ignore_status=True, shell=True, sudo=True).decode('utf-8') if middle: cmd = r'lshw -c disk | grep -n "\-disk" | cut -d ":" -f 1' total = process.system_output(cmd, ignore_status=True, shell=True, sudo=True).decode('utf-8') lst = total.splitlines() + middle.splitlines() lst.sort() index = lst.index(middle.splitlines()[0]) low = lst[index - 1] high = lst[index + 1] cmd = "lshw -c disk |sed -n '%s, %sp'" % (low, high) disk_details = process.system_output(cmd, ignore_status=True, shell=True, sudo=True).decode('utf-8') ls_string = "logicalsectorsize=%s sectorsize=%s" % (lbs, pbs) if ls_string not in disk_details: msg.append("Mismatch in sector sizes of lbs,pbs" " in lshw o/p w.r.t sysfs paths") if msg: self.fail("Some tests failed. Details below:\n%s" % "\n".join(msg)) def tearDown(self): ''' Unmount the directory at the end if incase of test fails in between ''' if hasattr(self, "part_obj"): if self.disk is not None: self.log.info("Unmounting directory %s", self.dirs) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class Disktest(Test): """ Avocado module for disktest. Pattern test of the disk, using unique signatures for each block and each iteration of the test. Designed to check for data corruption issues in the disk and disk controller. It writes 50MB/s of 500KB size ops. """ def setUp(self): """ Verifies if we have gcc to compile disktest. :param disk: Disk to be used in test. :param dir: Directory of used in test. When the target does not exist, it's created. :param gigabytes: Disk space that will be used for the test to run. :param chunk_mb: Size of the portion of the disk used to run the test. Cannot be smaller than the total amount of RAM. """ softm = SoftwareManager() if not softm.check_installed("gcc") and not softm.install("gcc"): self.cancel('Gcc is needed for the test to be run') # Log of all the disktest processes self.disk_log = os.path.abspath(os.path.join(self.outputdir, "log.txt")) self._init_params() self._compile_disktest() def _init_params(self): """ Retrieves and checks the test params """ self.disk = self.params.get('disk', default=None) self.dirs = self.params.get('dir', default=self.workdir) self.fstype = self.params.get('fs', default='ext4') if self.fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") gigabytes = lv_utils.get_device_total_space(self.disk) // 1073741824 memory_mb = memory.meminfo.MemTotal.m self.chunk_mb = gigabytes * 950 self.no_chunks = 1024 * gigabytes // self.chunk_mb if self.no_chunks == 0: self.cancel("Free disk space is lower than chunk size (%s, %s)" % (1024 * gigabytes, self.chunk_mb)) self.log.info( "Test will use %s chunks %sMB each in %sMB RAM using %s " "GB of disk space on %s dirs (%s).", self.no_chunks, self.chunk_mb, memory_mb, self.no_chunks * self.chunk_mb, len(self.dirs), self.dirs) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dirs) self.log.info("Unmounting the disk/dir if it is already mounted") self.part_obj.unmount() self.log.info("creating %s fs on %s", self.fstype, self.disk) self.part_obj.mkfs(self.fstype) self.log.info("mounting %s on %s", self.disk, self.dirs) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.dirs)) def _compile_disktest(self): """ Compiles the disktest """ c_file = self.get_data("disktest.c") shutil.copy(c_file, self.teststmpdir) build.make(self.teststmpdir, extra_args="disktest", env={ "CFLAGS": "-O2 -Wall -D_FILE_OFFSET_BITS=64 " "-D _GNU_SOURCE" }) def one_disk_chunk(self, disk, chunk): """ Tests one part of the disk by spawning a disktest instance. :param disk: Directory (usually a mountpoint). :param chunk: Portion of the disk used. """ cmd = ("%s/disktest -m %d -f %s/testfile.%d -i -S >> \"%s\" 2>&1" % (self.teststmpdir, self.chunk_mb, disk, chunk, self.disk_log)) proc = process.get_sub_process_klass(cmd)(cmd, shell=True, verbose=False) pid = proc.start() return pid, proc def test(self): """ Runs one iteration of disktest. """ procs = [] errors = [] for i in range(self.no_chunks): self.log.debug("Testing chunk %s...", i) procs.append(self.one_disk_chunk(self.dirs, i)) for pid, proc in procs: if proc.wait(): errors.append(str(pid)) if errors: self.fail("The %s pid(s) failed, please check the logs and %s" " for details." % (", ".join(errors), self.disk_log)) def tearDown(self): """ To clean all the testfiles generated """ for disk in getattr(self, "dirs", []): for filename in glob.glob("%s/testfile.*" % disk): os.remove(filename) if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.dirs) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class Thp_Swapping(Test): ''' The test fills out the total avl memory and tries to swap the thp out. :avocado: tags=memory,privileged ''' @skipIf(PAGESIZE, "No THP support for kernel with 4K PAGESIZE") def setUp(self): ''' Sets the Required params for dd and mounts the tmpfs dir ''' self.swap_free = [] mem_free = memory.meminfo.MemFree.m mem = memory.meminfo.MemTotal.m swap = memory.meminfo.SwapTotal.m self.hugepage_size = memory.meminfo.Hugepagesize.m self.swap_free.append(memory.meminfo.SwapFree.m) self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space') self.dd_timeout = 900 # If swap is enough fill all memory with dd if self.swap_free[0] > (mem - mem_free): self.count = (mem / self.hugepage_size) / 2 tmpfs_size = mem else: self.count = (mem_free / self.hugepage_size) / 2 tmpfs_size = mem_free if swap <= 0: self.cancel("Swap is not enabled in the system") if not os.path.ismount(self.mem_path): if not os.path.isdir(self.mem_path): os.makedirs(self.mem_path) self.device = Partition(device="none", mountpoint=self.mem_path) self.device.mount(mountpoint=self.mem_path, fstype="tmpfs", args="-o size=%sM" % tmpfs_size) def test(self): ''' Enables THP Runs dd, fills out the available memory and checks whether THP is swapped out. ''' # Enables THP try: memory.set_thp_value("enabled", "always") except Exception as details: self.fail("Failed %s" % details) for iterator in range(self.count): swap_cmd = "dd if=/dev/zero of=%s/%d bs=%sM "\ "count=1" % (self.mem_path, iterator, self.hugepage_size * 2) if(process.system(swap_cmd, timeout=self.dd_timeout, verbose=False, ignore_status=True, shell=True)): self.fail('Swap command Failed %s' % swap_cmd) self.swap_free.append(memory.meminfo.SwapFree.m) # Checks Swap is used or not if self.swap_free[1] - self.swap_free[0] >= 0: self.fail("Swap Space remains untouched") def tearDown(self): ''' Removes directories in tmpfs and unmounts it. ''' if self.mem_path: self.log.info('Cleaning Up!!!') self.device.unmount() process.system('rm -rf %s' % self.mem_path, ignore_status=True)
class Dbench(Test): """ Dbench is a tool to generate I/O workloads to either a filesystem or to a networked CIFS or NFS server. Dbench is a utility to benchmark a system based on client workload profiles. """ def setUp(self): ''' Build Dbench Source: http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz ''' fstype = self.params.get('fs', default='') self.fs_create = False lv_needed = self.params.get('lv', default=False) self.lv_create = False raid_needed = self.params.get('raid', default=False) self.raid_create = False self.disk = self.params.get('disk', default=None) if not self.disk: self.cancel("Provide the test disks to proceed !") self.md_name = self.params.get('raid_name', default='md127') self.mountpoint = self.params.get('dir', default='/mnt') self.disk_obj = Partition(self.disk, mountpoint=self.mountpoint) self.pre_cleanup() self.clear_disk(self.disk_obj, self.disk) if not os.path.exists(self.mountpoint): os.mkdir(self.mountpoint) sm = SoftwareManager() pkgs = ["gcc", "patch"] if raid_needed: pkgs.append('mdadm') for pkg in pkgs: if not sm.check_installed(pkg) and not sm.install(pkg): self.error('%s is needed for the test to be run' % pkg) if fstype == 'btrfs': ver = int(distro.detect().version) rel = int(distro.detect().release) if distro.detect().name == 'rhel': if (ver == 7 and rel >= 4) or ver > 7: self.cancel("btrfs is not supported with \ RHEL 7.4 onwards") if distro.detect().name == 'Ubuntu': if not sm.check_installed("btrfs-tools") and not \ sm.install("btrfs-tools"): self.cancel('btrfs-tools is needed for the test to be run') self.results = [] tarball = self.fetch_asset( 'http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz') archive.extract(tarball, self.teststmpdir) cb_version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.teststmpdir, cb_version) os.chdir(self.sourcedir) patch = self.params.get('patch', default='dbench_startup.patch') process.run('patch -p1 < %s' % self.get_data(patch), shell=True) process.run('./configure') build.make(self.sourcedir) if self.disk is not None: if self.disk in disk.get_disks(): if raid_needed: raid_name = '/dev/%s' % self.md_name self.create_raid(self.disk, raid_name) self.raid_create = True self.disk = raid_name if lv_needed: self.disk = self.create_lv(self.disk) self.lv_create = True if fstype: self.create_fs(self.disk, fstype) self.fs_create = True def pre_cleanup(self): umount_dir = "umount -f %s" % self.mountpoint process.system(umount_dir, shell=True, ignore_status=True) delete_lv = "lvremove -f /dev/mapper/avocado_vg-avocado_lv" process.system(delete_lv, shell=True, ignore_status=True) delete_vg = "vgremove -f avocado_vg" process.system(delete_vg, shell=True, ignore_status=True) delete_rd = 'mdadm --stop /dev/%s' % self.md_name process.system(delete_rd, shell=True, ignore_status=True) def clear_disk(self, obj, disk): obj.unmount() delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % disk if process.system(delete_fs, shell=True, ignore_status=False): self.fail("Failed to delete filesystem on %s", disk) def create_raid(self, l_disk, l_raid_name): self.sraid = softwareraid.SoftwareRaid(l_raid_name, '0', l_disk.split(), '1.2') self.sraid.create() def delete_raid(self): self.sraid.stop() self.sraid.clear_superblock() def create_lv(self, l_disk): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_size = lv_utils.get_device_total_space(l_disk) / 2330168 lv_utils.vg_create(vgname, l_disk) lv_utils.lv_create(vgname, lvname, lv_size) return '/dev/mapper/%s-%s' % (vgname, lvname) def delete_lv(self): vgname = 'avocado_vg' lvname = 'avocado_lv' lv_utils.lv_remove(vgname, lvname) lv_utils.vg_remove(vgname) def create_fs(self, l_disk, fstype): self.part_obj = Partition(l_disk, mountpoint=self.mountpoint) self.part_obj.unmount(force=True) self.part_obj.mkfs(fstype) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (l_disk, self.mountpoint)) def test(self): ''' Test Execution with necessary args ''' nprocs = self.params.get('nprocs', default=None) seconds = self.params.get('seconds', default=60) args = self.params.get('args', default='') if not nprocs: nprocs = multiprocessing.cpu_count() loadfile = os.path.join(self.sourcedir, 'client.txt') cmd = '%s/dbench %s %s -D %s -c %s -t %d' % ( self.sourcedir, nprocs, args, self.mountpoint, loadfile, seconds) process.run(cmd) self.results = process.system_output(cmd).decode("utf-8") pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs") (throughput, procs) = pattern.findall(self.results)[0] self.whiteboard = json.dumps({ 'throughput': throughput, 'procs': procs }) def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: if self.fs_create: self.clear_disk(self.part_obj, self.disk) if self.lv_create: self.delete_lv() if self.raid_create: self.delete_raid()
class LtpFs(Test): ''' Using LTP (Linux Test Project) testsuite to run Filesystem related tests ''' def setUp(self): ''' To check and install dependencies for the test ''' smm = SoftwareManager() for package in ['gcc', 'make', 'automake', 'autoconf']: if not smm.check_installed(package) and not smm.install(package): self.cancel("%s is needed for the test to be run" % package) self.disk = self.params.get('disk', default=None) self.mount_point = self.params.get('dir', default=self.workdir) self.script = self.params.get('script') fstype = self.params.get('fs', default='ext4') self.fsstress_run = self.params.get('fsstress_loop', default='1') if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.mount_point) self.log.info("Unmounting the disk/dir if it is already mounted") self.part_obj.unmount() self.log.info("creating %s file system on %s", fstype, self.disk) self.part_obj.mkfs(fstype) self.log.info("mounting %s on %s", self.disk, self.mount_point) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.mount_point)) url = "https://github.com/linux-test-project/ltp/" url += "archive/master.zip" tarball = self.fetch_asset("ltp-master.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) ltp_dir = os.path.join(self.teststmpdir, "ltp-master") os.chdir(ltp_dir) build.make(ltp_dir, extra_args='autotools') process.system('./configure', ignore_status=True) build.make(ltp_dir) build.make(ltp_dir, extra_args='install') fsstress_dir = os.path.join(ltp_dir, 'testcases/kernel/fs/fsstress') os.chdir(fsstress_dir) def test_fsstress_run(self): ''' Downloads LTP, compiles, installs and runs filesystem tests on a user specified disk ''' if self.script == 'fsstress': arg = (" -d %s -n 500 -p 500 -r -l %s" % (self.mount_point, self.fsstress_run)) self.log.info("Args = %s" % arg) cmd = "dmesg -C" process.system(cmd, shell=True, ignore_status=True, sudo=True) cmd = './%s %s' % (self.script, arg) result = process.run(cmd, ignore_status=True) cmd = "dmesg --level=err" if process.system_output(cmd, shell=True, ignore_status=True, sudo=False): self.fail("FSSTRESS test failed") def tearDown(self): ''' Cleanup of disk used to perform this test ''' if self.disk is not None: self.log.info("Unmounting disk %s on directory %s", self.disk, self.mount_point) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)
class Fsx(Test): ''' The Fsx test is a file system exerciser test :avocado: tags=fs ''' @staticmethod def mount_point(mount_dir): lines = genio.read_file('/proc/mounts').rstrip('\t\r\0').splitlines() for substr in lines: mop = substr.split(" ")[1] if mop == mount_dir: return True return False def check_thp(self): if 'thp_file_alloc' in genio.read_file('/proc/vm' 'stat').rstrip('\t\r\n\0'): self.thp = True return self.thp def setup_tmpfs_dir(self): # check for THP page cache self.check_thp() if not os.path.isdir(self.mount_dir): os.makedirs(self.mount_dir) self.device = None if not self.mount_point(self.mount_dir): if self.thp: self.device = Partition(device="none", mountpoint=self.mount_dir, mount_options="huge=always") else: self.device = Partition(device="none", mountpoint=self.mount_dir) self.device.mount(mountpoint=self.mount_dir, fstype="tmpfs") def setUp(self): ''' Setup fsx ''' smm = SoftwareManager() for package in ['gcc', 'make']: if not smm.check_installed(package) and not smm.install(package): self.cancel(package + ' is needed for the test to be run') fsx = self.fetch_asset( 'https://raw.githubusercontent.com/linux-test-project/ltp/' 'master/testcases/kernel/fs/fsx-linux/fsx-linux.c', expire='7d') os.chdir(self.workdir) process.system('gcc -o fsx %s' % fsx, shell=True, ignore_status=True) self.thp = False def test(self): ''' Run Fsx test for exercising file system ''' file_ub = self.params.get('file_ub', default='1000000') op_ub = self.params.get('op_ub', default='1000000') output = self.params.get('output_file', default='/tmp/result') num_times = self.params.get('num_times', default='10000') self.mount_dir = self.params.get('tmpfs_mount_dir', default=None) thp_page_cache = self.params.get('thp_page_cache', default=None) if thp_page_cache: if self.mount_dir: self.setup_tmpfs_dir() output = os.path.join(self.mount_dir, 'result') else: self.cancel("tmpfs_mount_dir not specified") else: output = self.params.get('output_file', default='/tmp/result') results = process.system_output( './fsx -l %s -o %s -n -s 1 -N %s -d %s' % (file_ub, op_ub, num_times, output)) if b'All operations completed' not in results.splitlines()[-1]: self.fail('Fsx test failed') def tearDown(self): if self.mount_dir: self.device.unmount()
class DiskInfo(Test): """ DiskInfo test for different storage block device tools """ def setUp(self): """ Verifies if we have list of packages installed on OS and also skips the test if user gives the current OS boot disk as disk input it may erase the data :param disk: test disk where the disk operations can be done :param fs: type of filesystem to create :param dir: path of the directory to mount the disk device """ smm = SoftwareManager() pkg = "" if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") self.disk = self.params.get('disk', default=None) self.dir = self.params.get('dir', default=self.srcdir) self.fstype = self.params.get('fs', default='ext4') self.log.info("disk: %s, dir: %s, fstype: %s", self.disk, self.dir, self.fstype) if not self.disk: self.cancel("No disk input, please update yaml and re-run") cmd = "df --output=source" if self.disk in process.system_output(cmd, ignore_status=True): self.cancel("Given disk is os boot disk," "it will be harmful to run this test") pkg_list = ["lshw"] self.distro = distro.detect().name if self.distro == 'Ubuntu': pkg_list.append("hwinfo") if self.fstype == 'ext4': pkg_list.append('e2fsprogs') if self.fstype == 'xfs': pkg_list.append('xfsprogs') if self.fstype == 'btrfs': if self.distro == 'Ubuntu': pkg_list.append("btrfs-tools") for pkg in pkg_list: if pkg and not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Package %s is missing and could not be installed" % pkg) def run_command(self, cmd): """ Run command and fail the test if any command fails """ try: process.run(cmd, shell=True, sudo=True) except CmdError as details: self.fail("Command %s failed %s" % (cmd, details)) def test_commands(self): """ Test block device tools to list different disk devices """ cmd_list = ["lsblk -l", "fdisk -l", "sfdisk -l", "parted -l", "df -h", "blkid", "lshw -c disk", "grub2-probe /boot"] if self.distro == 'Ubuntu': cmd_list.append("hwinfo --block --short") for cmd in cmd_list: self.run_command(cmd) def test(self): """ Test disk devices with different operations of creating filesystem and mount it on a directory and verify it with certain parameters name, size, UUID and IO sizes etc """ msg = [] disk = (self.disk.split("/dev/"))[1] if process.system("ls /dev/disk/by-id -l| grep -i %s" % disk, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s is not present in /dev/disk/by-id", disk) if process.system("ls /dev/disk/by-path -l| grep -i %s" % disk, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s is not present in /dev/disk/by-path", disk) # Verify disk listed in all tools cmd_list = ["fdisk -l ", "parted -l", "lsblk ", "lshw -c disk "] if self.distro == 'Ubuntu': cmd_list.append("hwinfo --short --block") for cmd in cmd_list: cmd = cmd + " | grep -i %s" % disk if process.system(cmd, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s is not present in %s" % (disk, cmd)) # Get the size and UUID of the disk cmd = "lsblk -l %s --output SIZE -b |sed -n 2p" % self.disk output = process.system_output(cmd, ignore_status=True, shell=True, sudo=True) self.size_bytes = (output.strip("\n"))[0] self.log.info("Disk: %s Size: %s", self.disk, self.size_bytes) # Get the physical/logical and minimal/optimal sector sizes pbs_sysfs = "/sys/block/%s/queue/physical_block_size" % disk pbs = genio.read_file(pbs_sysfs).rstrip("\n") lbs_sysfs = "/sys/block/%s/queue/logical_block_size" % disk lbs = genio.read_file(lbs_sysfs).rstrip("\n") mis_sysfs = "/sys/block/%s/queue/minimum_io_size" % disk mis = genio.read_file(mis_sysfs).rstrip("\n") ois_sysfs = "/sys/block/%s/queue/optimal_io_size" % disk ois = genio.read_file(ois_sysfs).rstrip("\n") self.log.info("pbs: %s, lbs: %s, mis: %s, ois: %s", pbs, lbs, mis, ois) # Verify sector sizes sector_string = "Sector size (logical/physical): %s " \ "bytes / %s bytes" % (lbs, pbs) output = process.system_output("fdisk -l %s" % self.disk, ignore_status=True, shell=True, sudo=True) if sector_string not in output: msg.append("Mismatch in sector sizes of lbs,pbs in " "fdisk o/p w.r.t sysfs paths") io_size_string = "I/O size (minimum/optimal): %s " \ "bytes / %s bytes" % (mis, mis) if io_size_string not in output: msg.append("Mismatch in IO sizes of mis and ois" " in fdisk o/p w.r.t sysfs paths") # Verify disk size in other tools cmd = "fdisk -l %s | grep -i %s" % (self.disk, self.disk) if self.size_bytes not in process.system_output(cmd, ignore_status=True, shell=True, sudo=True): msg.append("Size of disk %s mismatch in fdisk o/p" % self.disk) cmd = "sfdisk -l %s | grep -i %s" % (self.disk, self.disk) if self.size_bytes not in process.system_output(cmd, ignore_status=True, shell=True, sudo=True): msg.append("Size of disk %s mismatch in sfdisk o/p" % self.disk) # Mount self.part_obj = Partition(self.disk, mountpoint=self.dir) self.log.info("Unmounting disk/dir before creating file system") self.part_obj.unmount() self.log.info("creating file system") self.part_obj.mkfs(self.fstype) self.log.info("Mounting disk %s on directory %s", self.disk, self.dir) try: self.part_obj.mount() except PartitionError: msg.append("failed to mount %s fs on %s to %s" % (self.fstype, self.disk, self.dir)) # Get UUID of the disk for each filesystem mount cmd = "blkid %s | cut -d '=' -f 2" % self.disk output = process.system_output(cmd, ignore_status=True, shell=True, sudo=True) self.uuid = output.split('"')[1] self.log.info("Disk: %s UUID: %s", self.disk, self.uuid) # Verify mount point, filesystem type and UUID for each test variant output = process.system_output("lsblk -l %s" % self.disk, ignore_status=True, shell=True, sudo=True) if self.dir in output: self.log.info("Mount point %s for disk %s updated in lsblk o/p", self.dir, self.disk) output = process.system_output("df %s" % self.disk, ignore_status=True, shell=True, sudo=True) if self.dir in output: self.log.info("Mount point %s for disk %s updated in df o/p", self.dir, self.disk) if process.system("ls /dev/disk/by-uuid -l| grep -i %s" % disk, ignore_status=True, shell=True, sudo=True) != 0: msg.append("Given disk %s not having uuid" % disk) output = process.system_output("blkid %s" % self.disk, ignore_status=True, shell=True, sudo=True) if (self.disk in output and self.fstype in output and self.uuid in output): self.log.info("Disk %s of file system %s and " "uuid %s is updated in blkid o/p", self.disk, self.fstype, self.uuid) if process.system("grub2-probe %s" % self.dir, ignore_status=True): msg.append("Given disk %s's fs not detected by grub2" % disk) # Un-mount the directory self.log.info("Unmounting directory %s", self.dir) self.part_obj.unmount() cmd = 'lshw -c disk | grep -n "%s" | cut -d ":" -f 1' % self.disk middle = process.system_output(cmd, ignore_status=True, shell=True, sudo=True) if middle: cmd = r'lshw -c disk | grep -n "\-disk" | cut -d ":" -f 1' total = process.system_output(cmd, ignore_status=True, shell=True, sudo=True) lst = total.splitlines() + middle.splitlines() lst.sort() index = lst.index(middle.splitlines()[0]) low = lst[index-1] high = lst[index+1] cmd = "lshw -c disk |sed -n '%s, %sp'" % (low, high) disk_details = process.system_output(cmd, ignore_status=True, shell=True, sudo=True) ls_string = "logicalsectorsize=%s sectorsize=%s" % (lbs, pbs) if ls_string not in disk_details: msg.append("Mismatch in sector sizes of lbs,pbs" " in lshw o/p w.r.t sysfs paths") if msg: self.fail("Some tests failed. Details below:\n%s" % "\n".join(msg)) def tearDown(self): ''' Unmount the directory at the end if incase of test fails in between ''' if hasattr(self, "part_obj"): if self.disk is not None: self.log.info("Unmounting directory %s", self.dir) self.part_obj.unmount() self.log.info("Removing the filesystem created on %s", self.disk) delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk if process.system(delete_fs, shell=True, ignore_status=True): self.fail("Failed to delete filesystem on %s", self.disk)