Esempio n. 1
0
    def execute(self, disks = None, gigabytes = None,
                chunk_mb = utils.memtotal() / 1024):
        os.chdir(self.srcdir)

        if not disks:
            disks = [self.tmpdir]
        if not gigabytes:
            free = 100       # cap it at 100GB by default
            for disk in disks:
                free = min(utils.freespace(disk) / 1024**3, free)
            gigabytes = free
            logging.info("resizing to %s GB", gigabytes)
            sys.stdout.flush()

        self.chunk_mb = chunk_mb
        self.memory_mb = utils.memtotal()/1024
        if self.memory_mb > chunk_mb:
            e_msg = "Too much RAM (%dMB) for this test to work" % self.memory_mb
            raise error.TestError(e_msg)

        chunks = (1024 * gigabytes) / chunk_mb

        for i in range(chunks):
            pids = []
            for disk in disks:
                pid = self.test_one_disk_chunk(disk, i)
                pids.append(pid)
            errors = []
            for pid in pids:
                (junk, retval) = os.waitpid(pid, 0)
                if (retval != 0):
                    errors.append(retval)
            if errors:
                raise error.TestError("Errors from children: %s" % errors)
Esempio n. 2
0
 def test_mem(self):
     """Test the RAM configuration."""
     errors = ''
     warning = ''
     if self.min_memory_kb > utils.memtotal():
         errors += 'Expecting at least %dKB memory but found %sKB\n' % (
             self.min_memory_kb, utils.memtotal())
     return errors, warning
Esempio n. 3
0
    def run_once(self):
        mem_size = utils.memtotal()
        swap_size = utils.swaptotal()
        logging.info("MemTotal: %.0f KB", mem_size)
        logging.info("SwapTotal: %.0f KB", swap_size)

        # First run memory-eater wth 60% of total memory size to measure the
        # page access throughput
        cmd = ("memory-eater --size %d --speed --repeat 4 --chunk 500 "
               "--wait 0" % long(mem_size * 0.60 / 1024))
        logging.debug('cmd: %s', cmd)
        out = utils.system_output(cmd)
        self._log_results("60_Percent_RAM", out)

        # Then run memory-eater wth total memory + 30% swap size to measure the
        # page access throughput. On 32-bit system with 4GB of RAM, the memory
        # footprint needed to generate enough memory pressure is larger than
        # a single user-space process can own. So we divide the memory usage
        # by half and the test itself will fork a child process to double the
        # memory usage. Each process will take turns to access 500 pages
        # (via --chunk) until all pages are accessed 4 times (via --repeat).
        half_mem_pressure_size = long((mem_size + swap_size * 0.3) / 1024) / 2
        cmd = ("memory-eater --size %d --speed --fork --repeat 4 --chunk 500"
               "--wait 0" % half_mem_pressure_size)
        logging.debug('cmd: %s', cmd)
        out = utils.system_output(cmd)
        self._log_results("30_Percent_SWAP", out)
    def run_once(self):
        errors = 0
        # The minimum available space we expect on temp filesystems.
        # TempFS allows 1/2 of Total Memory for each temp fs. Our threshold
        # allows for 50% usage of space allocated before this test is run.

        threshold = utils.memtotal()/4
        tempdirs = ['/dev', '/tmp', '/dev/shm', '/var/tmp', '/run',
                    '/run/lock']

        for dir in tempdirs:
            if os.path.isdir(dir):
                # utils.freespace is in bytes, so convert to kb.
                avail = utils.freespace(dir)/1024
                if avail < threshold:
                    logging.error('Not enough available space on %s', dir)
                    logging.error('%d bytes is minimum, found %d bytes',
                                  (threshold, avail))
                    errors += 1
            else:
                logging.error('%s does not exist!' % dir)
                errors += 1

        if errors:
            raise error.TestFail('There were %d temp directory errors' % errors)
Esempio n. 5
0
    def run_once(self, disks=None, gigabytes=None, chunk_mb=None):
        """
        Runs one iteration of disktest.

        @param disks: List of directories (usually mountpoints) to be passed
                to the test.
        @param gigabytes: Disk space that will be used for the test to run.
        @param chunk_mb: Size of the portion of the disk used to run the test.
                Cannot be larger than the total amount of free RAM.
        """
        os.chdir(self.srcdir)
        if chunk_mb is None:
            chunk_mb = utils.memtotal() / 1024
        if disks is None:
            disks = [self.tmpdir]
        if gigabytes is None:
            free = 100 # cap it at 100GB by default
            for disk in disks:
                free = min(utils.freespace(disk) / 1024**3, free)
            gigabytes = free
            logging.info("Resizing to %s GB", gigabytes)
            sys.stdout.flush()

        self.chunk_mb = chunk_mb
        self.memory_mb = utils.memtotal()/1024
        if self.memory_mb > chunk_mb:
            raise error.TestError("Too much RAM (%dMB) for this test to work" %
                                  self.memory_mb)

        chunks = (1024 * gigabytes) / chunk_mb

        logging.info("Total of disk chunks that will be used: %s", chunks)
        for i in range(chunks):
            pids = []
            for disk in disks:
                pid = self.test_one_disk_chunk(disk, i)
                pids.append(pid)
            errors = []
            for pid in pids:
                (junk, retval) = os.waitpid(pid, 0)
                if (retval != 0):
                    errors.append(retval)
            if errors:
                raise error.TestError("Errors from children: %s" % errors)
def get_alloc_size_per_page():
    """Returns the default alloc size per page in MB."""
    ALLOC_MB_PER_PAGE_DEFAULT = 800
    ALLOC_MB_PER_PAGE_SUB_2GB = 400

    alloc_mb_per_page = ALLOC_MB_PER_PAGE_DEFAULT
    # Allocate less memory per page for devices with 2GB or less memory.
    if utils.memtotal() * KB_TO_BYTE < 2 * GB_TO_BYTE:
        alloc_mb_per_page = ALLOC_MB_PER_PAGE_SUB_2GB
    return alloc_mb_per_page
def create_pages_and_check_oom(create_page_func, size_mb, bindir):
    """Common code to create pages and to check OOM.

    Args:
        create_page_func: function to create page, it takes 3 arguments,
            cr: chrome wrapper, size_mb: alloc size per page in MB,
            bindir: path to the test directory.
        bindir: path to the test directory.
    Returns:
        Dictionary of test results.
    """
    kills_monitor = MemoryKillsMonitor()

    # The amount of tabs that can trigger OOM consistently if the tabs are not
    # discarded properly.
    tab_count = 1 + (utils.memtotal() * KB_TO_BYTE * 4) / (size_mb *
                                                           MB_TO_BYTE)

    # The tab count at the first tab discard.
    first_discard = -1
    # The number of tabs actually created.
    tabs_created = tab_count

    # Opens a specific amount of tabs, breaks if the OOM killer is invoked.
    with chrome.Chrome(init_network_controller=True) as cr:
        cr.browser.platform.SetHTTPServerDirectories(bindir)
        for i in range(tab_count):
            create_page_func(cr, size_mb, bindir)
            time.sleep(3)
            kills_monitor.check_events()
            if first_discard == -1 and kills_monitor.discarded:
                first_discard = i + 1
            if kills_monitor.oom:
                tabs_created = i + 1
                break

    # Test is successful if at least one Chrome tab is killed by tab
    # discarder and the kernel OOM killer isn't invoked.
    if kills_monitor.oom:
        raise error.TestFail('OOM Killer invoked')

    if not kills_monitor.discarded:
        raise error.TestFail('No tab discarded')

    # TODO: reports the page loading time.
    return {
        'NumberOfTabsAtFirstDiscard': first_discard,
        'NumberOfTabsCreated': tabs_created
    }
Esempio n. 8
0
    def run_the_test(self, iterations):
        utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
        utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')

        cmd = os.path.join(self.srcdir, 'linus_stress')
        args = "%d" % (utils.memtotal() / 32)

        profilers = self.job.profilers
        if profilers.present():
            profilers.start(self)

        for i in range(iterations):
            utils.system(cmd + ' ' + args)

        if profilers.present():
            profilers.stop(self)
            profilers.report(self)
Esempio n. 9
0
    def run_the_test(self, iterations):
        utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
        utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')

        cmd = os.path.join(self.srcdir, 'linus_stress')
        args = "%d" % (utils.memtotal() / 32)

        profilers = self.job.profilers
        if profilers.present():
            profilers.start(self)

        for i in range(iterations):
            utils.system(cmd + ' ' + args)

        if profilers.present():
            profilers.stop(self)
            profilers.report(self)
Esempio n. 10
0
    def run_once(self):
        # TODO(zmo@): this may not get total physical memory size on ARM
        #             or some x86 machines.
        mem_size = utils.memtotal()
        gb = mem_size / 1024.0 / 1024.0
        self.write_perf_keyval({"gb_memory_total": gb})
        logging.info("MemTotal: %.3f GB" % gb)

        # x86 and ARM SDRAM configurations differ significantly from each other.
        # Use a value specific to the architecture.
        # On x86, I see 1.85GiB (2GiB - reserved memory).
        # On ARM, I see 0.72GiB (1GiB - 256MiB carveout).
        cpuType = utils.get_cpu_arch()
        limit = 1.65
        if cpuType == "arm":
            limit = 0.65

        if gb <= limit:
            raise error.TestFail("total system memory size < %.3f GB" % limit)
Esempio n. 11
0
def discover_container_style():
    global super_root_path, cpuset_prefix
    global mem_isolation_on, fake_numa_containers
    global node_mbytes, root_container_bytes
    if super_root_path != '':
        return  # already looked up
    if os.path.exists('/dev/cgroup/tasks'):
        # running on 2.6.26 or later kernel with containers on:
        super_root_path = '/dev/cgroup'
        cpuset_prefix = 'cpuset.'
        if get_boot_numa():
            mem_isolation_on = fake_numa_containers = True
        else:  # memcg containers IFF compiled-in & mounted & non-fakenuma boot
            fake_numa_containers = False
            mem_isolation_on = os.path.exists(
                    '/dev/cgroup/memory.limit_in_bytes')
            # TODO: handle possibility of where memcg is mounted as its own
            #       cgroup hierarchy, separate from cpuset??
    elif os.path.exists('/dev/cpuset/tasks'):
        # running on 2.6.18 kernel with containers on:
        super_root_path = '/dev/cpuset'
        cpuset_prefix = ''
        mem_isolation_on = fake_numa_containers = get_boot_numa() != ''
    else:
        # neither cpuset nor cgroup filesystem active:
        super_root_path = None
        cpuset_prefix = 'no_cpusets_or_cgroups_exist'
        mem_isolation_on = fake_numa_containers = False

    logging.debug('mem_isolation: %s', mem_isolation_on)
    logging.debug('fake_numa_containers: %s', fake_numa_containers)
    if fake_numa_containers:
        node_mbytes = int(mbytes_per_mem_node())
    elif mem_isolation_on:  # memcg-style containers
        # For now, limit total of all containers to using just 98% of system's
        #   visible total ram, to avoid oom events at system level, and avoid
        #   page reclaim overhead from going above kswapd highwater mark.
        system_visible_pages = utils.memtotal() >> 2
        usable_pages = int(system_visible_pages * 0.98)
        root_container_bytes = usable_pages << 12
        logging.debug('root_container_bytes: %s',
                      utils.human_format(root_container_bytes))
Esempio n. 12
0
def discover_container_style():
    global super_root_path, cpuset_prefix
    global mem_isolation_on, fake_numa_containers
    global node_mbytes, root_container_bytes
    if super_root_path != '':
        return  # already looked up
    if os.path.exists('/dev/cgroup/tasks'):
        # running on 2.6.26 or later kernel with containers on:
        super_root_path = '/dev/cgroup'
        cpuset_prefix = 'cpuset.'
        if get_boot_numa():
            mem_isolation_on = fake_numa_containers = True
        else:  # memcg containers IFF compiled-in & mounted & non-fakenuma boot
            fake_numa_containers = False
            mem_isolation_on = os.path.exists(
                '/dev/cgroup/memory.limit_in_bytes')
            # TODO: handle possibility of where memcg is mounted as its own
            #       cgroup hierarchy, separate from cpuset??
    elif os.path.exists('/dev/cpuset/tasks'):
        # running on 2.6.18 kernel with containers on:
        super_root_path = '/dev/cpuset'
        cpuset_prefix = ''
        mem_isolation_on = fake_numa_containers = get_boot_numa() != ''
    else:
        # neither cpuset nor cgroup filesystem active:
        super_root_path = None
        cpuset_prefix = 'no_cpusets_or_cgroups_exist'
        mem_isolation_on = fake_numa_containers = False

    logging.debug('mem_isolation: %s', mem_isolation_on)
    logging.debug('fake_numa_containers: %s', fake_numa_containers)
    if fake_numa_containers:
        node_mbytes = int(mbytes_per_mem_node())
    elif mem_isolation_on:  # memcg-style containers
        # For now, limit total of all containers to using just 98% of system's
        #   visible total ram, to avoid oom events at system level, and avoid
        #   page reclaim overhead from going above kswapd highwater mark.
        system_visible_pages = utils.memtotal() >> 2
        usable_pages = int(system_visible_pages * 0.98)
        root_container_bytes = usable_pages << 12
        logging.debug('root_container_bytes: %s',
                      utils.human_format(root_container_bytes))
    def run_once(self, size=0, loop=10):
        """
        Executes the test and logs the output.

        @param size: size to test in KB. 0 means all usable
        @param loop: number of iteration to test memory
        """
        if size == 0:
            size = utils.usable_memtotal()
        elif size > utils.memtotal():
            raise error.TestFail('Specified size is more than total memory.')

        if size <= 0:
            raise error.TestFail('Size must be more than zero.')

        logging.info('Memory test size: %dK', size)

        cmd = 'memtester %dK %d' % (size, loop)
        logging.info('cmd: %s', cmd)

        with open(os.path.join(self.resultsdir, 'memtester_stdout'), 'w') as f:
            utils.run(cmd, stdout_tee=f)
Esempio n. 14
0
    def execute(self, testdir = None, iterations = 10000):
        if not testdir:
            testdir = self.tmpdir
        os.chdir(testdir)
        file = os.path.join(testdir, 'foo')
        # Want to use 3/4 of all memory for each of
        # bash-shared-mapping and usemem
        kilobytes = (3 * utils.memtotal()) / 4

        # Want two usemem -m megabytes in parallel in background.
        pid = [None, None]
        usemem = os.path.join(self.srcdir, 'usemem')
        args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
        # print_to_tty ('2 x ' + ' '.join(args))
        for i in (0,1):
            pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)

        cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
                        (self.srcdir, file, kilobytes,
                         utils.count_cpus(), iterations)
        os.system(cmd)

        for i in (0, 1):
            os.kill(pid[i], signal.SIGKILL)
Esempio n. 15
0
        if os.path.exists(e_rh):
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, e:
            raise error.TestFail("Failed to load KSM: %s" % e)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils.memtotal() - utils.read_from_meminfo("MemFree"))
                        / 1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
Esempio n. 16
0
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, e:
            raise error.TestFail("Failed to load KSM: %s" % e)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = (
            (utils.memtotal() - utils.read_from_meminfo("MemFree")) / 1024 +
            128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else: