Exemplo n.º 1
0
def get_memory_info(lvms):
    """
    Get memory information from host and guests in format:
    Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}

    @params lvms: List of VM objects
    @return: String with memory info report
    """
    if not isinstance(lvms, list):
        raise error.TestError("Invalid list passed to get_stat: %s " % lvms)

    try:
        meminfo = "Host: memfree = "
        meminfo += str(int(utils.freememtotal()) / 1024) + "M; "
        meminfo += "swapfree = "
        mf = int(utils.read_from_meminfo("SwapFree")) / 1024
        meminfo += str(mf) + "M; "
    except Exception, e:
        raise error.TestFail("Could not fetch host free memory info, "
                             "reason: %s" % e)
Exemplo n.º 2
0
    def run_once(self, args = '', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = utils.freememtotal() + utils.read_from_meminfo('SwapFree') / 2
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
Exemplo n.º 3
0
    def run_once(self, args='', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = utils.freememtotal() + utils.read_from_meminfo('SwapFree') / 2
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024**2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
    def run_once(self,
                 seconds=60,
                 free_memory_fraction=0.95,
                 wait_secs=0,
                 disk_thread=True):
        '''
        Args:
          free_memory_fraction: Fraction of free memory (as determined by
            utils.freememtotal()) to use.
          wait_secs: time to wait in seconds before executing stressapptest.
          disk_thread: also stress disk using -f argument of stressapptest.
        '''
        assert free_memory_fraction > 0
        assert free_memory_fraction < 1

        # Wait other parallel tests memory usage to settle to a stable value, so
        # stressapptest will not claim too much memory.
        if wait_secs:
            time.sleep(wait_secs)

        # Allow shmem access to all of memory. This is used for 32 bit
        # access to > 1.4G. Virtual address space limitation prevents
        # directly mapping the memory.
        utils.run('mount -o remount,size=100% /dev/shm')
        cpus = max(utils.count_cpus(), 1)
        mbytes = max(int(utils.freememtotal() * free_memory_fraction / 1024),
                     512)
        # Even though shared memory allows us to go past the 1.4G
        # limit, ftruncate still limits us to 2G max on 32 bit systems.
        if sys.maxsize < 2**32 and mbytes > 2047:
            mbytes = 2047
        # SAT should use as much memory as possible, while still
        # avoiding OOMs and allowing the kernel to run, so that
        # the maximum amoun tof memory can be tested.
        args = ' -M %d' % mbytes  # megabytes to test
        # The number of seconds under test can be chosen to fit into
        # manufacturing or test flow. 60 seconds gives several
        # passes and several patterns over each memory location
        # and should catch clearly fautly memeory. 4 hours
        # is an effective runin test, to catch lower frequency errors.
        args += ' -s %d' % seconds  # seconds to run
        # One memory copy thread per CPU should keep the memory bus
        # as saturated as possible, while keeping each CPU busy as well.
        args += ' -m %d' % cpus  # memory copy threads.
        # SSE copy and checksum increases the rate at which the CPUs
        # can drive memory, as well as stressing the CPU.
        args += ' -W'  # Use SSE optimizatin in memory threads.
        # File IO threads allow stressful transactions over the
        # south bridge and SATA, as well as potentially finding SSD
        # or disk cache problems. Two threads ensure multiple
        # outstanding transactions to the disk, if supported.
        if disk_thread:
            args += ' -f sat.diskthread.a'  # disk thread
            args += ' -f sat.diskthread.b'

        if utils.get_board() == 'link':
            args += memory_channel_args_snb_bdw(
                [['U1', 'U2', 'U3', 'U4'],
                 ['U6', 'U5', 'U7', 'U8']])  # yes, U6 is actually before U5

        if utils.get_board() == 'samus':
            args += memory_channel_args_snb_bdw([['U11', 'U12'],
                                                 ['U13', 'U14']])

        # 'stressapptest' is provided by dev-util/stressapptest, pre-installed
        # in test images.
        sat = utils.run('stressapptest' + args)
        logging.debug(sat.stdout)
        if not re.search('Status: PASS', sat.stdout):
            raise error.TestFail(sat.stdout)