def setUp(self): ''' Build interbench Source: http://ck.kolivas.org/apps/interbench/interbench-0.31.tar.bz2 ''' sm_manager = SoftwareManager() for pkg in ['gcc', 'patch']: if (not sm_manager.check_installed(pkg) and not sm_manager.install(pkg)): self.cancel("%s is needed for the test to be run" % pkg) disk_free_mb = (disk.freespace(self.teststmpdir) / 1024) / 1024 if memory.memtotal()/1024 > disk_free_mb: self.cancel('Disk space is less than total memory. Skipping test') tarball = self.fetch_asset('http://slackware.cs.utah.edu/pub/kernel' '.org/pub/linux/kernel/people/ck/apps/' 'interbench/interbench-0.31.tar.gz') data_dir = os.path.abspath(self.datadir) archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.sourcedir = os.path.join(self.srcdir, version) # Patch for make file os.chdir(self.sourcedir) makefile_patch = 'patch -p1 < %s ' % ( os.path.join(data_dir, 'makefile_fix.patch')) process.run(makefile_patch, shell=True) build.make(self.sourcedir)
def setUp(self): self.iter = int(self.params.get('iterations', default='5')) self.memsize = int(self.params.get( 'mem_size', default=memory.memtotal() * 0.9)) self.ddfile = os.path.join(self.workdir, 'ddfile') if (disk.freespace(self.workdir) / 1024) < self.memsize: self.cancel('%sM is needed for the test to be run' % self.memsize)
def setUp(self): ''' Build Stutter Test Source: https://github.com/gaowanlong/stutter/archive/master.zip ''' # Check for basic utilities smm = SoftwareManager() if not smm.check_installed("gcc") and not smm.install("gcc"): self.error('Gcc is needed for the test to be run') locations = ["https://github.com/gaowanlong/stutter/archive/" "master.zip"] tarball = self.fetch_asset("stutter.zip", locations=locations, expire='7d') archive.extract(tarball, self.srcdir) self.srcdir = os.path.join(self.srcdir, 'stutter-master') mem_byte = str(memory.memtotal()) print mem_byte self._memory = self.params.get('memory', default=mem_byte) self._iteration = self.params.get('iteration', default='10') self._logdir = self.params.get('logdir', default='/var/tmp/logdir') self._rundir = self.params.get('rundir', default='/tmp') process.run('mkdir -p %s' % self._logdir) # export env variable, used by test script os.environ['MEMTOTAL_BYTES'] = self._memory os.environ['ITERATIONS'] = self._iteration os.environ['LOGDIR_RESULTS'] = self._logdir os.environ['TESTDISK_DIR'] = self._rundir build.make(self.srcdir)
def setUp(self): ''' Build interbench Source: http://ck.kolivas.org/apps/interbench/interbench-0.31.tar.bz2 ''' sm_manager = SoftwareManager() for pkg in ['gcc', 'patch']: if (not sm_manager.check_installed(pkg) and not sm_manager.install(pkg)): self.cancel("%s is needed for the test to be run" % pkg) disk_free_mb = (disk.freespace(self.teststmpdir) / 1024) / 1024 if memory.memtotal() / 1024 > disk_free_mb: self.cancel('Disk space is less than total memory. Skipping test') tarball = self.fetch_asset('http://slackware.cs.utah.edu/pub/kernel' '.org/pub/linux/kernel/people/ck/apps/' 'interbench/interbench-0.31.tar.gz') data_dir = os.path.abspath(self.datadir) archive.extract(tarball, self.srcdir) version = os.path.basename(tarball.split('.tar.')[0]) self.srcdir = os.path.join(self.srcdir, version) # Patch for make file os.chdir(self.srcdir) makefile_patch = 'patch -p1 < %s ' % (os.path.join( data_dir, 'makefile_fix.patch')) process.run(makefile_patch, shell=True) build.make(self.srcdir)
def test(self): ''' Enables THP, Turns off the defrag and fragments the memory. Once the memory gets fragmented turns on the defrag and checks whether defrag happened. ''' # Enables THP memory.set_thp_value("enabled", "always") # Turns off Defrag memory.set_thp_value("khugepaged/defrag", "0") # Fragments The memory self.log.info("Fragmenting the memory Using dd command \n") for iterator in range(self.count): defrag_cmd = 'dd if=/dev/urandom of=%s/%d bs=%dK count=1'\ % (self.mem_path, iterator, self.block_size) if (process.system(defrag_cmd, timeout=900, verbose=False, ignore_status=True, shell=True)): self.fail('Defrag command Failed %s' % defrag_cmd) total = memory.memtotal() hugepagesize = memory.get_huge_page_size() nr_full = int(0.8 * (total / hugepagesize)) # Sets max possible hugepages before defrag on nr_hp_before = self.set_max_hugepages(nr_full) # Turns Defrag ON memory.set_thp_value("khugepaged/defrag", "1") self.log.info("Sleeping %d seconds to settle out things", 10) time.sleep(10) # Sets max hugepages after defrag on nr_hp_after = self.set_max_hugepages(nr_full) # Check for memory defragmentation if nr_hp_before >= nr_hp_after: e_msg = "No Memory Defragmentation\n" e_msg += "%d hugepages before turning khugepaged on,\n"\ "%d After it" % (nr_hp_before, nr_hp_after) self.fail(e_msg) self.log.info("Defrag test passed")
def setUp(self): sm = SoftwareManager() detected_distro = distro.detect() deps = ['gcc', 'make'] for package in deps: if not sm.check_installed(package) and not sm.install(package): self.error(package + ' is needed for the test to be run') url = 'https://github.com/julman99/eatmemory/archive/master.zip' tarball = self.fetch_asset("eatmemory.zip", locations=[url], expire='7d') archive.extract(tarball, self.srcdir) self.sourcedir = os.path.join(self.srcdir, "eatmemory-master") build.make(self.sourcedir) mem = self.params.get('memory_to_test', default=memory.memtotal()) self.mem_to_eat = self._mem_to_mbytes(mem) if self.mem_to_eat is None: self.error("Memory '%s' not valid." % mem)
def _init_params(self): """ Retrieves and checks the test params """ self.disk = self.params.get('disk', default=None) self.dirs = self.params.get('dir', default=self.workdir) self.fstype = self.params.get('fs', default='ext4') memory_mb = memory.memtotal() / 1024 self.chunk_mb = int(self.params.get('chunk_mb', default=None)) if self.chunk_mb is None: # By default total RAM self.chunk_mb = memory_mb if self.chunk_mb == 0: self.chunk_mb = 1 if memory_mb > self.chunk_mb: self.cancel("Chunk size has to be greater or equal to RAM size. " "(%s > %s)" % (self.chunk_mb, memory_mb)) gigabytes = int(self.params.get('gigabytes', default=None)) if gigabytes is None: free = 107374182400 # cap it at 100GB by default free = min(utils_disk.freespace(self.dirs) / 1073741824, free) gigabytes = free self.no_chunks = 1024 * gigabytes / self.chunk_mb if self.no_chunks == 0: self.cancel("Free disk space is lower than chunk size (%s, %s)" % (1024 * gigabytes, self.chunk_mb)) self.log.info("Test will use %s chunks %sMB each in %sMB RAM using %s " "GB of disk space on %s dirs (%s).", self.no_chunks, self.chunk_mb, memory_mb, self.no_chunks * self.chunk_mb, len(self.dirs), self.dirs) if self.disk is not None: self.part_obj = Partition(self.disk, mountpoint=self.dirs) self.log.info("Unmounting the disk/dir if it is already mounted") self.part_obj.unmount() self.log.info("creating %s fs on %s", self.fstype, self.disk) self.part_obj.mkfs(self.fstype) self.log.info("mounting %s on %s", self.disk, self.dirs) try: self.part_obj.mount() except PartitionError: self.fail("Mounting disk %s on directory %s failed" % (self.disk, self.dirs))
def test(self): ''' Enables THP, Turns off the defrag and fragments the memory. Once the memory gets fragmented turns on the defrag and checks whether defrag happened. ''' # Enables THP memory.set_thp_value("enabled", "always") # Turns off Defrag memory.set_thp_value("khugepaged/defrag", "0") # Fragments The memory self.log.info("Fragmenting the memory Using dd command \n") for iterator in range(self.count): defrag_cmd = 'dd if=/dev/urandom of=%s/%d bs=%dK count=1'\ % (self.mem_path, iterator, self.block_size) if(process.system(defrag_cmd, timeout=900, verbose=False, ignore_status=True, shell=True)): self.fail('Defrag command Failed %s' % defrag_cmd) total = memory.memtotal() hugepagesize = memory.get_huge_page_size() nr_full = int(0.8 * (total / hugepagesize)) # Sets max possible hugepages before defrag on nr_hp_before = self.set_max_hugepages(nr_full) # Turns Defrag ON memory.set_thp_value("khugepaged/defrag", "1") self.log.info("Sleeping %d seconds to settle out things", 10) time.sleep(10) # Sets max hugepages after defrag on nr_hp_after = self.set_max_hugepages(nr_full) # Check for memory defragmentation if nr_hp_before >= nr_hp_after: e_msg = "No Memory Defragmentation\n" e_msg += "%d hugepages before turning khugepaged on,\n"\ "%d After it" % (nr_hp_before, nr_hp_after) self.fail(e_msg) self.log.info("Defrag test passed")
def setUp(self): sm = SoftwareManager() detected_distro = distro.detect() deps = ['gcc', 'make'] for package in deps: if not sm.check_installed(package) and not sm.install(package): self.error(package + ' is needed for the test to be run') url = 'https://github.com/julman99/eatmemory/archive/master.zip' tarball = self.fetch_asset("eatmemory.zip", locations=[url], expire='7d') archive.extract(tarball, self.srcdir) self.srcdir = os.path.join(self.srcdir, "eatmemory-master") build.make(self.srcdir) mem = self.params.get('memory_to_test', default=memory.memtotal()) self.mem_to_eat = self._mem_to_mbytes(mem) if self.mem_to_eat is None: self.error("Memory '%s' not valid." % mem)
def setUp(self): ''' Build Stutter Test Source: https://github.com/gaowanlong/stutter/archive/master.zip ''' # Check for basic utilities smm = SoftwareManager() for package in ['gcc', 'make']: if not smm.check_installed(package) and not smm.install(package): self.cancel("Fail to install %s required for this test." % package) locations = [ "https://github.com/gaowanlong/stutter/archive/" "master.zip" ] tarball = self.fetch_asset("stutter.zip", locations=locations, expire='7d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, 'stutter-master') mem_byte = str(memory.memtotal()) print mem_byte self._memory = self.params.get('memory', default=mem_byte) self._iteration = self.params.get('iteration', default='10') self._logdir = self.params.get('logdir', default='/var/tmp/logdir') self._rundir = self.params.get('rundir', default='/tmp') if not os.path.exists(self._logdir): os.makedirs(self._logdir) # export env variable, used by test script os.environ['MEMTOTAL_BYTES'] = self._memory os.environ['ITERATIONS'] = self._iteration os.environ['LOGDIR_RESULTS'] = self._logdir os.environ['TESTDISK_DIR'] = self._rundir build.make(self.sourcedir)
def _init_params(self): """ Retrieves and checks the test params """ disks = self.params.get('disks', default=None) if disks is None: # Avocado does not accept lists in params.get() disks = [self.workdir] elif isinstance(disks, basestring): # Allow specifying disks as str disks = disks.split(',') # it's string pylint: disable=E1101 for disk in disks: # Disks have to be mounted dirs if not os.path.isdir(disk): os.makedirs(disk) self.disks = disks memory_mb = memory.memtotal() / 1024 self.chunk_mb = self.params.get('chunk_mb', default=None) if self.chunk_mb is None: # By default total RAM self.chunk_mb = memory_mb if self.chunk_mb == 0: self.chunk_mb = 1 if memory_mb > self.chunk_mb: self.skip("Chunk size has to be greater or equal to RAM size. " "(%s > %s)" % (self.chunk_mb, memory_mb)) gigabytes = self.params.get('gigabytes', default=None) if gigabytes is None: free = 100 # cap it at 100GB by default for disk in self.disks: free = min(utils_disk.freespace(disk) / 1073741824, free) gigabytes = free self.no_chunks = 1024 * gigabytes / self.chunk_mb if self.no_chunks == 0: self.skip("Free disk space is lower than chunk size (%s, %s)" % (1024 * gigabytes, self.chunk_mb)) self.log.info("Test will use %s chunks %sMB each in %sMB RAM using %s " "GB of disk space on %s disks (%s).", self.no_chunks, self.chunk_mb, memory_mb, self.no_chunks * self.chunk_mb, len(self.disks), self.disks)
def setUp(self): ''' Build Stutter Test Source: https://github.com/gaowanlong/stutter/archive/master.zip ''' # Check for basic utilities smm = SoftwareManager() for package in ['gcc', 'make']: if not smm.check_installed(package) and not smm.install(package): self.cancel( "Fail to install %s required for this test." % package) locations = ["https://github.com/gaowanlong/stutter/archive/" "master.zip"] tarball = self.fetch_asset("stutter.zip", locations=locations, expire='7d') archive.extract(tarball, self.workdir) self.sourcedir = os.path.join(self.workdir, 'stutter-master') mem_byte = str(memory.memtotal()) print mem_byte self._memory = self.params.get('memory', default=mem_byte) self._iteration = self.params.get('iteration', default='10') self._logdir = self.params.get('logdir', default='/var/tmp/logdir') self._rundir = self.params.get('rundir', default='/tmp') if not os.path.exists(self._logdir): os.makedirs(self._logdir) # export env variable, used by test script os.environ['MEMTOTAL_BYTES'] = self._memory os.environ['ITERATIONS'] = self._iteration os.environ['LOGDIR_RESULTS'] = self._logdir os.environ['TESTDISK_DIR'] = self._rundir build.make(self.sourcedir)
def test(self): args = [] cmdline = '' timeout = '' if not (self.stressors or self.v_stressors): if 'all' in self.class_type: args.append('--all %s ' % self.workers) elif 'cpu' in self.class_type: self.workers = 2 * multiprocessing.cpu_count() args.append('--cpu %s --cpu-method all ' % self.workers) else: args.append('--class %s --sequential %s ' % (self.class_type, self.workers)) else: if self.parallel: if self.stressors: for stressor in self.stressors.split(' '): cmdline += '--%s %s ' % (stressor, self.workers) if self.v_stressors: for v_stressor in self.v_stressors.split(' '): cmdline += '--%s %s ' % (v_stressor, self.workers) args.append(cmdline) if self.class_type in ['memory', 'vm', 'all']: args.append('--vm-bytes 80% ') if self.aggressive and self.maximize: args.append('--aggressive --maximize --oomable ') if self.exclude: args.append('--exclude %s ' % self.exclude) if self.verify: args.append('--verify ') if self.syslog: args.append('--syslog ') if self.metrics: args.append('--metrics ') if self.times: args.append('--times ') cmd = 'stress-ng %s' % " ".join(args) if self.parallel: if self.ttimeout: cmd += ' --timeout %s ' % self.ttimeout process.run(cmd, ignore_status=True, sudo=True) else: if self.ttimeout: timeout = ' --timeout %s ' % self.ttimeout if self.stressors: for stressor in self.stressors.split(' '): stress_cmd = ' --%s %s %s' % (stressor, self.workers, timeout) process.run("%s %s" % (cmd, stress_cmd), ignore_status=True, sudo=True) if self.ttimeout and self.v_stressors: timeout = ' --timeout %s ' % str( int(self.ttimeout) + int(memory.memtotal() / 1024 / 1024)) if self.v_stressors: for stressor in self.v_stressors.split(' '): stress_cmd = ' --%s %s %s' % (stressor, self.workers, timeout) process.run("%s %s" % (cmd, stress_cmd), ignore_status=True, sudo=True) collect_dmesg(self) ERROR = [] pattern = [ 'WARNING: CPU:', 'Oops', 'Segfault', 'soft lockup', 'Unable to handle', 'Hard LOCKUP' ] logs = process.system_output('dmesg').splitlines() for fail_pattern in pattern: for log in logs: if fail_pattern in log: ERROR.append(log) if ERROR: self.fail("Test failed with following errors in demsg : %s " % "\n".join(ERROR))
def run(test, params, env): """ Qemu memory hotplug test: 1) Boot guest with -m option. 2) Hotplug memory with invalid params. 3) Check qemu prompt message. 4) Check vm is alive after hotplug. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ @error_context.context_aware def _hotplug_memory(vm, name): hotplug_test = MemoryHotplugTest(test, params, env) devices = vm.devices.memory_define_by_params(params, name) for dev in devices: if isinstance(dev, qdevices.Dimm): if params["set_addr"] == "yes": addr = params["addr_dimm_%s" % name] else: addr = hotplug_test.get_mem_addr(vm, dev.get_qid()) dev.set_param("addr", addr) error_context.context( "Hotplug %s '%s' to VM" % ("pc-dimm", dev.get_qid()), logging.info) vm.devices.simple_hotplug(dev, vm.monitor) hotplug_test.update_vm_after_hotplug(vm, dev) return devices def collect_hotplug_info(): details = {} for target_mem in params.objects("target_mems"): try: _hotplug_memory(vm, target_mem) except Exception as e: error_context.context("Error happen %s: %s" % (target_mem, e), logging.info) details.update({target_mem: str(e)}) else: error_context.context("Hotplug memory successful", logging.info) details.update({target_mem: "Hotplug memory successful"}) return details def check_msg(keywords, msg): if not re.search(r"%s" % keywords, msg): test.fail( "No valid keywords were found in the qemu prompt message") if params["size_mem"] == "<overcommit>": overcommit_mem = normalize_data_size("%sK" % (memory.memtotal() * 2), "G") params["size_mem"] = "%sG" % round(float(overcommit_mem)) if params["policy_mem"] == "bind": params["host-nodes"] = str(max(memory.numa_nodes()) + 1) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login() msg = collect_hotplug_info() if len(params.objects("target_mems")) == 1: error_context.context("Check qemu prompt message.", logging.info) check_msg(params["keywords"], msg[params["target_mems"]]) else: for target_mem in params.objects("target_mems"): mem_params = params.object_params(target_mem) error_context.context( "Check %s qemu prompt " "message." % target_mem, logging.info) check_msg(mem_params["keywords"], msg[target_mem])
def test(self): args = [] cmdline = '' timeout = '' if not (self.stressors or self.v_stressors): if 'all' in self.class_type: args.append('--all %s ' % self.workers) elif 'cpu' in self.class_type: self.workers = 2 * multiprocessing.cpu_count() args.append('--cpu %s --cpu-method all ' % self.workers) else: args.append('--class %s --sequential %s ' % (self.class_type, self.workers)) else: if self.parallel: if self.stressors: for stressor in self.stressors.split(' '): cmdline += '--%s %s ' % (stressor, self.workers) if self.v_stressors: for v_stressor in self.v_stressors.split(' '): cmdline += '--%s %s ' % (v_stressor, self.workers) args.append(cmdline) if self.class_type in ['memory', 'vm', 'all']: args.append('--vm-bytes 80% ') if self.aggressive and self.maximize: args.append('--aggressive --maximize --oomable ') if self.exclude: args.append('--exclude %s ' % self.exclude) if self.verify: args.append('--verify ') if self.syslog: args.append('--syslog ') if self.metrics: args.append('--metrics ') if self.times: args.append('--times ') cmd = 'stress-ng %s' % " ".join(args) if self.parallel: if self.ttimeout: cmd += ' --timeout %s ' % self.ttimeout process.run(cmd, ignore_status=True, sudo=True) else: if self.ttimeout: timeout = ' --timeout %s ' % self.ttimeout if self.stressors: for stressor in self.stressors.split(' '): stress_cmd = ' --%s %s %s' % (stressor, self.workers, timeout) process.run("%s %s" % (cmd, stress_cmd), ignore_status=True, sudo=True) if self.ttimeout and self.v_stressors: timeout = ' --timeout %s ' % str( int(self.ttimeout) + int(memory.memtotal() / 1024 / 1024)) if self.v_stressors: for stressor in self.v_stressors.split(' '): stress_cmd = ' --%s %s %s' % (stressor, self.workers, timeout) process.run("%s %s" % (cmd, stress_cmd), ignore_status=True, sudo=True) collect_dmesg(self) ERROR = [] pattern = ['WARNING: CPU:', 'Oops', 'Segfault', 'soft lockup', 'Unable to handle', 'Hard LOCKUP'] logs = process.system_output('dmesg').splitlines() for fail_pattern in pattern: for log in logs: if fail_pattern in log: ERROR.append(log) if ERROR: self.fail("Test failed with following errors in dmesg : %s " % "\n".join(ERROR))