Beispiel #1
0
    def setUp(self):
        '''
        Sets the Required params for dd and mounts the tmpfs dir
        '''

        self.swap_free = []
        mem_free = memory.read_from_meminfo("MemFree") / 1024
        mem = memory.read_from_meminfo("MemTotal") / 1024
        swap = memory.read_from_meminfo("SwapTotal") / 1024
        self.hugepage_size = memory.read_from_meminfo("Hugepagesize") / 1024
        self.swap_free.append(memory.read_from_meminfo("SwapFree") / 1024)
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        self.dd_timeout = 900

        # If swap is enough fill all memory with dd
        if self.swap_free > (mem - mem_free):
            self.count = (mem / self.hugepage_size) / 2
            tmpfs_size = mem
        else:
            self.count = (mem_free / self.hugepage_size) / 2
            tmpfs_size = mem_free

        if swap <= 0:
            self.skip("Swap is not enabled in the system")

        if not os.path.ismount(self.mem_path):
            if not os.path.isdir(self.mem_path):
                os.makedirs(self.mem_path)
            self.device = Partition(device="none", mountpoint=self.mem_path)
            self.device.mount(mountpoint=self.mem_path,
                              fstype="tmpfs",
                              args="-o size=%sM" % tmpfs_size)
Beispiel #2
0
    def test(self):
        '''
        Enables THP Runs dd, fills out the available memory and checks whether
        THP is swapped out.
        '''

        # Enables THP
        try:
            memory.set_thp_value("enabled", "always")
        except Exception as details:
            self.fail("Failed  %s" % details)

        for iterator in range(self.count):
            swap_cmd = "dd if=/dev/zero of=%s/%d bs=%sM "\
                       "count=1" % (self.mem_path, iterator,
                                    self.hugepage_size * 2)
            if (process.system(swap_cmd,
                               timeout=self.dd_timeout,
                               verbose=False,
                               ignore_status=True,
                               shell=True)):
                self.fail('Swap command Failed %s' % swap_cmd)

        self.swap_free.append(memory.read_from_meminfo("SwapFree") / 1024)

        # Checks Swap is used or not
        if self.swap_free[1] - self.swap_free[0] >= 0:
            self.fail("Swap Space remains untouched")
    def test(self):
        """
        Execute 'stress' with proper arguments.
        """
        length = self.params.get('stress_lenght', default=60)
        threads = self.params.get('threads', default=None)
        memory_per_thread = self.params.get('memory_per_thread', default=None)
        file_size_per_thread = self.params.get('file_size_per_thread',
                                               default=None)
        if threads is None:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * multiprocessing.cpu_count()

        if memory_per_thread is None:
            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start
            # to  happen. Let's avoid that.
            mb = (memory.freememtotal() +
                  memory.read_from_meminfo('SwapFree') / 2)
            memory_per_thread = (mb * 1024) / threads

        if file_size_per_thread is None:
            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = disk.freespace(self.sourcedir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

        # Number of CPU workers spinning on sqrt()
        args = '--cpu %d ' % threads
        # Number of IO workers spinning on sync()
        args += '--io %d ' % threads
        # Number of Memory workers spinning on malloc()/free()
        args += '--vm %d ' % threads
        # Amount of memory used per each worker
        args += '--vm-bytes %d ' % memory_per_thread
        # Number of HD workers spinning on write()/ulink()
        args += '--hdd %d ' % threads
        # Size of the files created by each worker in bytes
        args += '--hdd-bytes %d ' % file_size_per_thread
        # Time for which the stress test will run
        args += '--timeout %d ' % length
        # Verbose flag
        args += '--verbose'

        os.chdir(self.sourcedir)
        cmd = ('./src/stress %s' % args)
        process.run(cmd)
Beispiel #4
0
    def test(self):
        """
        Execute 'stress' with proper arguments.
        """
        length = self.params.get('stress_lenght', default=60)
        threads = self.params.get('threads', default=None)
        memory_per_thread = self.params.get('memory_per_thread', default=None)
        file_size_per_thread = self.params.get('file_size_per_thread',
                                               default=None)
        if threads is None:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * multiprocessing.cpu_count()

        if memory_per_thread is None:
            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start
            # to  happen. Let's avoid that.
            mb = (memory.freememtotal() +
                  memory.read_from_meminfo('SwapFree') / 2)
            memory_per_thread = (mb * 1024) / threads

        if file_size_per_thread is None:
            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = disk.freespace(self.sourcedir)
            file_size_per_thread = 1024**2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

        # Number of CPU workers spinning on sqrt()
        args = '--cpu %d ' % threads
        # Number of IO workers spinning on sync()
        args += '--io %d ' % threads
        # Number of Memory workers spinning on malloc()/free()
        args += '--vm %d ' % threads
        # Amount of memory used per each worker
        args += '--vm-bytes %d ' % memory_per_thread
        # Number of HD workers spinning on write()/ulink()
        args += '--hdd %d ' % threads
        # Size of the files created by each worker in bytes
        args += '--hdd-bytes %d ' % file_size_per_thread
        # Time for which the stress test will run
        args += '--timeout %d ' % length
        # Verbose flag
        args += '--verbose'

        os.chdir(self.sourcedir)
        cmd = ('./src/stress %s' % args)
        process.run(cmd)