def get_memory_keyvals(self):
        """
        Reads the graphics memory values and returns them as keyvals.
        """
        keyvals = {}

        # Get architecture type and list of sysfs fields to read.
        soc = utils.get_cpu_soc_family()

        arch = utils.get_cpu_arch()
        if arch == 'x86_64' or arch == 'i386':
            pci_vga_device = utils.run("lspci | grep VGA").stdout.rstrip('\n')
            if "Advanced Micro Devices" in pci_vga_device:
                soc = 'amdgpu'
            elif "Intel Corporation" in pci_vga_device:
                soc = 'i915'
            elif "Cirrus Logic" in pci_vga_device:
                # Used on qemu with kernels 3.18 and lower. Limited to 800x600
                # resolution.
                soc = 'cirrus'
            else:
                pci_vga_device = utils.run('lshw -c video').stdout.rstrip()
                groups = re.search('configuration:.*driver=(\S*)',
                                   pci_vga_device)
                if groups and 'virtio' in groups.group(1):
                    soc = 'virtio'

        if not soc in self.arch_fields:
            raise error.TestFail('Error: Architecture "%s" not yet supported.' % soc)
        fields = self.arch_fields[soc]

        for field_name in fields:
            possible_field_paths = fields[field_name]
            field_value = None
            for path in possible_field_paths:
                if utils.system('ls %s' % path):
                    continue
                field_value = utils.system_output('cat %s' % path)
                break

            if not field_value:
                logging.error('Unable to find any sysfs paths for field "%s"',
                              field_name)
                self.num_errors += 1
                continue

            parsed_results = GraphicsKernelMemory._parse_sysfs(field_value)

            for key in parsed_results:
                keyvals['%s_%s' % (field_name, key)] = parsed_results[key]

            if 'bytes' in parsed_results and parsed_results['bytes'] == 0:
                logging.error('%s reported 0 bytes', field_name)
                self.num_errors += 1

        keyvals['meminfo_MemUsed'] = (utils.read_from_meminfo('MemTotal') -
                                      utils.read_from_meminfo('MemFree'))
        keyvals['meminfo_SwapUsed'] = (utils.read_from_meminfo('SwapTotal') -
                                       utils.read_from_meminfo('SwapFree'))
        return keyvals
    def run_once(self, compression_factor=3, num_procs=50, cycles=20,
                 selections=None, swap_targets=None, switch_delay=0.0):
        if selections is None:
            selections = ['sequential', 'uniform', 'exponential']
        if swap_targets is None:
            swap_targets = [0.00, 0.25, 0.50, 0.75, 0.95]

        swaptotal = utils.read_from_meminfo('SwapTotal')

        # Check for proper swap space configuration.
        # If the swap enable file says "0", swap.conf does not create swap.
        if os.path.exists(self.swap_enable_file):
            enable_size = utils.read_one_line(self.swap_enable_file)
        else:
            enable_size = "nonexistent" # implies nonzero
        if enable_size == "0":
            if swaptotal != 0:
                raise error.TestFail('The swap enable file said 0, but'
                                     ' swap was still enabled for %d.' %
                                     swaptotal)
            logging.info('Swap enable (0), swap disabled.')
        else:
            # Rather than parsing swap.conf logic to calculate a size,
            # use the value it writes to /sys/block/zram0/disksize.
            if not os.path.exists(self.swap_disksize_file):
                raise error.TestFail('The %s swap enable file should have'
                                     ' caused zram to load, but %s was'
                                     ' not found.' %
                                     (enable_size, self.swap_disksize_file))
            disksize = utils.read_one_line(self.swap_disksize_file)
            swaprequested = int(disksize) / 1000
            if (swaptotal < swaprequested * 0.9 or
                swaptotal > swaprequested * 1.1):
                raise error.TestFail('Our swap of %d K is not within 10%%'
                                     ' of the %d K we requested.' %
                                     (swaptotal, swaprequested))
            logging.info('Swap enable (%s), requested %d, total %d',
                         enable_size, swaprequested, swaptotal)

        # We should try to autodetect this if we add other swap methods.
        swap_method = 'zram'

        for swap_target in swap_targets:
            logging.info('swap_target is %f', swap_target)
            temp_dir = tempfile.mkdtemp()
            try:
                # Reset swap space to make sure nothing leaks between runs.
                swap_reset = swap_reset_funcs[swap_method]
                swap_reset()
                self.run_single_test(compression_factor, num_procs, cycles,
                                     swap_target, switch_delay, temp_dir,
                                     selections)
            except socket.error:
                logging.debug('swap target %f failed; oom killer?', swap_target)

            shutil.rmtree(temp_dir)
    def sample_memory_state(self):
        """
        Samples memory info from /proc/meminfo and use that to calculate swap
        usage and total memory usage, adjusted for double-counting swap space.
        """
        self.mem_total = utils.read_from_meminfo('MemTotal')
        self.swap_total = utils.read_from_meminfo('SwapTotal')
        self.mem_free = utils.read_from_meminfo('MemFree')
        self.swap_free = utils.read_from_meminfo('SwapFree')
        self.swap_used = self.swap_total - self.swap_free

        used_phys_memory = self.mem_total - self.mem_free

        # Get zram's actual compressed size and convert to KiB.
        swap_phys_size = utils.read_one_line('/sys/block/zram0/compr_data_size')
        swap_phys_size = int(swap_phys_size) / 1024

        self.total_usage = used_phys_memory - swap_phys_size + self.swap_used
        self.usage_ratio = float(self.swap_used) / self.swap_total
    def get_memory_keyvals(self):
        """
        Reads the graphics memory values and returns them as keyvals.
        """
        keyvals = {}

        # Get architecture type and list of sysfs fields to read.
        arch = utils.get_cpu_soc_family()

        if not arch in self.arch_fields:
            raise error.TestFail('Architecture "%s" not yet supported.' % arch)
        fields = self.arch_fields[arch]

        for field_name in fields:
            possible_field_paths = fields[field_name]
            field_value = None
            for path in possible_field_paths:
                if utils.system('ls %s' % path):
                    continue
                field_value = utils.system_output('cat %s' % path)
                break

            if not field_value:
                logging.error('Unable to find any sysfs paths for field "%s"',
                              field_name)
                self.num_errors += 1
                continue

            parsed_results = GraphicsKernelMemory._parse_sysfs(field_value)

            for key in parsed_results:
                keyvals['%s_%s' % (field_name, key)] = parsed_results[key]

            if 'bytes' in parsed_results and parsed_results['bytes'] == 0:
                logging.error('%s reported 0 bytes', field_name)
                self.num_errors += 1

        keyvals['meminfo_MemUsed'] = (utils.read_from_meminfo('MemTotal') -
                                      utils.read_from_meminfo('MemFree'))
        keyvals['meminfo_SwapUsed'] = (utils.read_from_meminfo('SwapTotal') -
                                       utils.read_from_meminfo('SwapFree'))
        return keyvals
Exemple #5
0
def get_memory_info(lvms):
    """
    Get memory information from host and guests in format:
    Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}

    @params lvms: List of VM objects
    @return: String with memory info report
    """
    if not isinstance(lvms, list):
        raise error.TestError("Invalid list passed to get_stat: %s " % lvms)

    try:
        meminfo = "Host: memfree = "
        meminfo += str(int(utils.freememtotal()) / 1024) + "M; "
        meminfo += "swapfree = "
        mf = int(utils.read_from_meminfo("SwapFree")) / 1024
        meminfo += str(mf) + "M; "
    except Exception, e:
        raise error.TestFail("Could not fetch host free memory info, "
                             "reason: %s" % e)
Exemple #6
0
def consume_free_memory(memory_to_reserve_mb):
    """Consumes free memory until |memory_to_reserve_mb| is remained.

    Non-swappable memory is allocated to consume memory.
    memory_to_reserve_mb: Consume memory until this amount of free memory
        is remained.
    @return The MemoryEater() object on which memory is allocated. One can
        catch it in a context manager.
    """
    consumer = MemoryEater()
    while True:
        mem_free_mb = utils.read_from_meminfo('MemFree') / 1024
        logging.info('Current Free Memory %d', mem_free_mb)
        if mem_free_mb <= memory_to_reserve_mb:
            break
        memory_to_consume = min(
            2047, mem_free_mb - memory_to_reserve_mb + 1)
        logging.info('Consuming %d MB locked memory', memory_to_consume)
        consumer.consume_locked_memory(memory_to_consume)
    return consumer
Exemple #7
0
    def run_once(self, args = '', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = utils.freememtotal() + utils.read_from_meminfo('SwapFree') / 2
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
Exemple #8
0
    def run_once(self, args='', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = utils.freememtotal() + utils.read_from_meminfo('SwapFree') / 2
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024**2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
Exemple #9
0
    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill in "
                             "VM %d on allocator loop" % (j, i))
                    raise error.TestFail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            a_cmd = "mem.static_random_fill()"
            logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
                          "vm: %s", a_cmd, vm.name)
            session.sendline(a_cmd)

            out = ""
            try:
                logging.debug("Watching host memory while filling vm %s memory",
                              vm.name)
                while not out.startswith("PASS") and not out.startswith("FAIL"):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing static_random_fill"
                                 " on allocator loop" % i)
                        raise error.TestFail(e_msg)
                    free_mem = int(utils.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (free_mem +
                                    int(utils.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and
                                              free_mem < (450000 * perf_ratio)):
                        vm.monitor.cmd("stop")
                        for j in range(0, i):
                            lvms[j].destroy(gracefully = False)
                        time.sleep(20)
                        vm.monitor.cmd("c")
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                        break
                    out = session.read_nonblocking(0.1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.monitor.cmd("stop")
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully = False)
                time.sleep(20)
                vm.monitor.cmd("c")
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Check if memory in max loading guest is right")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(virt_test_utils.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully = False)

        # Verify last machine with randomly generated memory
        a_cmd = "mem.static_random_verify()"
        _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(virt_test_utils.get_memory_info([lvms[last_vm]]))

        lsessions[i].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully = False)
        logging.info("Phase 3b: PASS")
Exemple #10
0
        if os.path.exists(e_rh):
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, e:
            raise error.TestFail("Failed to load KSM: %s" % e)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils.memtotal() - utils.read_from_meminfo("MemFree"))
                        / 1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
Exemple #11
0
    def run_once(self):
        errors = 0
        keyval = dict()
        # The total memory will shrink if the system bios grabs more of the
        # reserved memory. We derived the value below by giving a small
        # cushion to allow for more system BIOS usage of ram. The memref value
        # is driven by the supported netbook model with the least amount of
        # total memory.  ARM and x86 values differ considerably.
        cpuType = utils.get_cpu_arch()
        memref = 986392
        vmemref = 102400
        if cpuType == "arm":
            memref = 700000
            vmemref = 210000

        speedref = 1333
        os_reserve = 600000

        # size reported in /sys/block/zram0/disksize is in byte
        swapref = int(utils.read_one_line(self.swap_disksize_file)) / 1024

        less_refs = ['MemTotal', 'MemFree', 'VmallocTotal']
        approx_refs = ['SwapTotal']

        # read physical HW size from mosys and adjust memref if need
        cmd = 'mosys memory spd print geometry -s size_mb'
        phy_size_run = utils.run(cmd)
        phy_size = 0
        for line in phy_size_run.stdout.split():
            phy_size += int(line)
        # memref is in KB but phy_size is in MB
        phy_size *= 1024
        keyval['PhysicalSize'] = phy_size
        memref = max(memref, phy_size - os_reserve)
        freeref = memref / 2

        # Special rule for free memory size for parrot and butterfly
        board = utils.get_board()
        if board.startswith('parrot'):
            freeref = 100 * 1024
        elif board.startswith('butterfly'):
            freeref = freeref - 400 * 1024
        elif board.startswith('rambi') or board.startswith('expresso'):
            logging.info('Skipping test on rambi and expresso, '
                         'see crbug.com/411401')
            return

        ref = {
            'MemTotal': memref,
            'MemFree': freeref,
            'SwapTotal': swapref,
            'VmallocTotal': vmemref,
        }

        logging.info('board: %s, phy_size: %d memref: %d freeref: %d', board,
                     phy_size, memref, freeref)

        error_list = []

        for k in ref:
            value = utils.read_from_meminfo(k)
            keyval[k] = value
            if k in less_refs:
                if value < ref[k]:
                    logging.warning('%s is %d', k, value)
                    logging.warning('%s should be at least %d', k, ref[k])
                    errors += 1
                    error_list += [k]
            elif k in approx_refs:
                if value < ref[k] * 0.9 or ref[k] * 1.1 < value:
                    logging.warning('%s is %d', k, value)
                    logging.warning('%s should be within 10%% of %d', k,
                                    ref[k])
                    errors += 1
                    error_list += [k]

        # read spd timings
        cmd = 'mosys memory spd print timings -s speeds'
        # result example
        # DDR3-800, DDR3-1066, DDR3-1333, DDR3-1600
        pattern = '[A-Z]*DDR([3-9]|[1-9]\d+)[A-Z]*-(?P<speed>\d+)'
        timing_run = utils.run(cmd)

        keyval['speedref'] = speedref
        for dimm, line in enumerate(timing_run.stdout.split('\n')):
            if not line:
                continue
            max_timing = line.split(', ')[-1]
            keyval['timing_dimm_%d' % dimm] = max_timing
            m = re.match(pattern, max_timing)
            if not m:
                logging.warning('Error parsing timings for dimm #%d (%s)',
                                dimm, max_timing)
                errors += 1
                continue
            logging.info('dimm #%d timings: %s', dimm, max_timing)
            max_speed = int(m.group('speed'))
            keyval['speed_dimm_%d' % dimm] = max_speed
            if max_speed < speedref:
                logging.warning('ram speed is %s', max_timing)
                logging.warning('ram speed should be at least %d', speedref)
                error_list += ['speed_dimm_%d' % dimm]
                errors += 1

        # If self.error is not zero, there were errors.
        if errors > 0:
            error_list_str = ', '.join(error_list)
            raise error.TestFail('Found incorrect values: %s' % error_list_str)

        self.write_perf_keyval(keyval)
Exemple #12
0
    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = (
                        "VM %d died while executing static_random_fill in "
                        "VM %d on allocator loop" % (j, i))
                    raise error.TestFail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            a_cmd = "mem.static_random_fill()"
            logging.debug(
                "Executing %s on ksm_overcommit_guest.py loop, "
                "vm: %s", a_cmd, vm.name)
            session.sendline(a_cmd)

            out = ""
            try:
                logging.debug(
                    "Watching host memory while filling vm %s memory", vm.name)
                while not out.startswith("PASS") and not out.startswith(
                        "FAIL"):
                    if not vm.is_alive():
                        e_msg = (
                            "VM %d died while executing static_random_fill"
                            " on allocator loop" % i)
                        raise error.TestFail(e_msg)
                    free_mem = int(utils.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (free_mem +
                                    int(utils.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and free_mem <
                                              (450000 * perf_ratio)):
                        vm.monitor.cmd("stop")
                        for j in range(0, i):
                            lvms[j].destroy(gracefully=False)
                        time.sleep(20)
                        vm.monitor.cmd("c")
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                        break
                    out = session.read_nonblocking(0.1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.monitor.cmd("stop")
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully=False)
                time.sleep(20)
                vm.monitor.cmd("c")
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Check if memory in max loading guest is right")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(virt_test_utils.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully=False)

        # Verify last machine with randomly generated memory
        a_cmd = "mem.static_random_verify()"
        _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(virt_test_utils.get_memory_info([lvms[last_vm]]))

        lsessions[i].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully=False)
        logging.info("Phase 3b: PASS")
Exemple #13
0
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, e:
            raise error.TestFail("Failed to load KSM: %s" % e)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = (
            (utils.memtotal() - utils.read_from_meminfo("MemFree")) / 1024 +
            128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
Exemple #14
0
    def run_once(self, just_checking_lowmem=False, checking_for_oom=False):

        memtotal = utils.read_from_meminfo('MemTotal')
        swaptotal = utils.read_from_meminfo('SwapTotal')
        free_target = (memtotal + swaptotal) * 0.03

        # Check for proper swap space configuration.
        # If the swap enable file says "0", swap.conf does not create swap.
        if not just_checking_lowmem and not checking_for_oom:
            if os.path.exists(self.swap_enable_file):
                enable_size = utils.read_one_line(self.swap_enable_file)
            else:
                enable_size = "nonexistent"  # implies nonzero
            if enable_size == "0":
                if swaptotal != 0:
                    raise error.TestFail('The swap enable file said 0, but'
                                         ' swap was still enabled for %d.' %
                                         swaptotal)
                logging.info('Swap enable (0), swap disabled.')
            else:
                # Rather than parsing swap.conf logic to calculate a size,
                # use the value it writes to /sys/block/zram0/disksize.
                if not os.path.exists(self.swap_disksize_file):
                    raise error.TestFail(
                        'The %s swap enable file should have'
                        ' caused zram to load, but %s was'
                        ' not found.' % (enable_size, self.swap_disksize_file))
                disksize = utils.read_one_line(self.swap_disksize_file)
                swaprequested = int(disksize) / 1000
                if (swaptotal < swaprequested * 0.9
                        or swaptotal > swaprequested * 1.1):
                    raise error.TestFail('Our swap of %d K is not within 10%'
                                         ' of the %d K we requested.' %
                                         (swaptotal, swaprequested))
                logging.info('Swap enable (%s), requested %d, total %d' %
                             (enable_size, swaprequested, swaptotal))

        first_oom = 0
        first_lowmem = 0
        cleared_low_mem_notification = False

        # Loop over hog creation until MemFree+SwapFree approaches 0.
        # Confirm we do not see any OOMs (procs killed due to Out Of Memory).
        hogs = []
        cmd = [self.srcdir + '/' + self.executable, '50']
        logging.debug('Memory hog command line is %s' % cmd)
        while len(hogs) < 200:
            memfree = utils.read_from_meminfo('MemFree')
            swapfree = utils.read_from_meminfo('SwapFree')
            total_free = memfree + swapfree
            logging.debug('nhogs %d: memfree %d, swapfree %d' %
                          (len(hogs), memfree, swapfree))
            if not checking_for_oom and total_free < free_target:
                break

            p = subprocess.Popen(cmd)
            utils.write_one_line('/proc/%d/oom_score_adj' % p.pid, '1000')
            hogs.append(p)

            time.sleep(2)

            if self.check_for_oom(hogs):
                first_oom = len(hogs)
                break

            # Check for low memory notification.
            if self.getting_low_mem_notification():
                if first_lowmem == 0:
                    first_lowmem = len(hogs)
                logging.info('Got low memory notification after hog %d' %
                             len(hogs))

        logging.info('Finished creating %d hogs, SwapFree %d, MemFree %d, '
                     'low mem at %d, oom at %d' %
                     (len(hogs), swapfree, memfree, first_lowmem, first_oom))

        if not checking_for_oom and first_oom > 0:
            utils.system("killall -TERM hog")
            raise error.TestFail('Oom detected after %d hogs created' %
                                 len(hogs))

        # Before cleaning up all the hogs, verify that killing hogs back to
        # our initial low memory notification causes notification to end.
        if first_lowmem > 0:
            hogs_killed = 0
            for p in hogs:
                if not self.getting_low_mem_notification():
                    cleared_low_mem_notification = True
                    logging.info('Cleared low memory notification after %d '
                                 'hogs were killed' % hogs_killed)
                    break
                try:
                    p.kill()
                except OSError, e:
                    if e.errno == errno.ESRCH:
                        logging.info('Hog %d not found to kill, assume Oomed' %
                                     (hogs.index(p) + 1))
                    else:
                        logging.warning(
                            'Hog %d kill failed: %s' %
                            (hogs.index(p) + 1, os.strerror(e.errno)))
                else:
                    hogs_killed += 1
                time.sleep(2)
Exemple #15
0
class platform_CompressedSwap(test.test):
    """
    Verify compressed swap is configured and basically works.
    """
    version = 1
    executable = 'hog'
    swap_enable_file = '/home/chronos/.swap_enabled'
    swap_disksize_file = '/sys/block/zram0/disksize'

    def setup(self):
        os.chdir(self.srcdir)
        utils.make(self.executable)

    def check_for_oom(self, hogs):
        for p in hogs:
            retcode = p.poll()  # returns None if the thread is still running
            if retcode is not None:
                logging.info('hog %d of %d is gone, assume oom: retcode %s' %
                             (hogs.index(p) + 1, len(hogs), retcode))
                return True
        return False

    # Check for low memory notification by polling /dev/chromeos-low-mem.
    def getting_low_mem_notification(self):
        lowmem_fd = open('/dev/chromeos-low-mem', 'r')
        lowmem_poller = select.poll()
        lowmem_poller.register(lowmem_fd, select.POLLIN)
        events = lowmem_poller.poll(0)
        lowmem_fd.close()
        for fd, flag in events:
            if flag & select.POLLIN:
                return True
        return False

    def run_once(self, just_checking_lowmem=False, checking_for_oom=False):

        memtotal = utils.read_from_meminfo('MemTotal')
        swaptotal = utils.read_from_meminfo('SwapTotal')
        free_target = (memtotal + swaptotal) * 0.03

        # Check for proper swap space configuration.
        # If the swap enable file says "0", swap.conf does not create swap.
        if not just_checking_lowmem and not checking_for_oom:
            if os.path.exists(self.swap_enable_file):
                enable_size = utils.read_one_line(self.swap_enable_file)
            else:
                enable_size = "nonexistent"  # implies nonzero
            if enable_size == "0":
                if swaptotal != 0:
                    raise error.TestFail('The swap enable file said 0, but'
                                         ' swap was still enabled for %d.' %
                                         swaptotal)
                logging.info('Swap enable (0), swap disabled.')
            else:
                # Rather than parsing swap.conf logic to calculate a size,
                # use the value it writes to /sys/block/zram0/disksize.
                if not os.path.exists(self.swap_disksize_file):
                    raise error.TestFail(
                        'The %s swap enable file should have'
                        ' caused zram to load, but %s was'
                        ' not found.' % (enable_size, self.swap_disksize_file))
                disksize = utils.read_one_line(self.swap_disksize_file)
                swaprequested = int(disksize) / 1000
                if (swaptotal < swaprequested * 0.9
                        or swaptotal > swaprequested * 1.1):
                    raise error.TestFail('Our swap of %d K is not within 10%'
                                         ' of the %d K we requested.' %
                                         (swaptotal, swaprequested))
                logging.info('Swap enable (%s), requested %d, total %d' %
                             (enable_size, swaprequested, swaptotal))

        first_oom = 0
        first_lowmem = 0
        cleared_low_mem_notification = False

        # Loop over hog creation until MemFree+SwapFree approaches 0.
        # Confirm we do not see any OOMs (procs killed due to Out Of Memory).
        hogs = []
        cmd = [self.srcdir + '/' + self.executable, '50']
        logging.debug('Memory hog command line is %s' % cmd)
        while len(hogs) < 200:
            memfree = utils.read_from_meminfo('MemFree')
            swapfree = utils.read_from_meminfo('SwapFree')
            total_free = memfree + swapfree
            logging.debug('nhogs %d: memfree %d, swapfree %d' %
                          (len(hogs), memfree, swapfree))
            if not checking_for_oom and total_free < free_target:
                break

            p = subprocess.Popen(cmd)
            utils.write_one_line('/proc/%d/oom_score_adj' % p.pid, '1000')
            hogs.append(p)

            time.sleep(2)

            if self.check_for_oom(hogs):
                first_oom = len(hogs)
                break

            # Check for low memory notification.
            if self.getting_low_mem_notification():
                if first_lowmem == 0:
                    first_lowmem = len(hogs)
                logging.info('Got low memory notification after hog %d' %
                             len(hogs))

        logging.info('Finished creating %d hogs, SwapFree %d, MemFree %d, '
                     'low mem at %d, oom at %d' %
                     (len(hogs), swapfree, memfree, first_lowmem, first_oom))

        if not checking_for_oom and first_oom > 0:
            utils.system("killall -TERM hog")
            raise error.TestFail('Oom detected after %d hogs created' %
                                 len(hogs))

        # Before cleaning up all the hogs, verify that killing hogs back to
        # our initial low memory notification causes notification to end.
        if first_lowmem > 0:
            hogs_killed = 0
            for p in hogs:
                if not self.getting_low_mem_notification():
                    cleared_low_mem_notification = True
                    logging.info('Cleared low memory notification after %d '
                                 'hogs were killed' % hogs_killed)
                    break
                try:
                    p.kill()
                except OSError, e:
                    if e.errno == errno.ESRCH:
                        logging.info('Hog %d not found to kill, assume Oomed' %
                                     (hogs.index(p) + 1))
                    else:
                        logging.warning(
                            'Hog %d kill failed: %s' %
                            (hogs.index(p) + 1, os.strerror(e.errno)))
                else:
                    hogs_killed += 1
                time.sleep(2)

        # Clean up the rest of our hogs since they otherwise live forever.
        utils.system("killall -TERM hog")
        time.sleep(5)
        swapfree2 = utils.read_from_meminfo('SwapFree')
        logging.info('SwapFree was %d before cleanup, %d after.' %
                     (swapfree, swapfree2))

        # Raise exceptions due to low memory notification failures.
        if first_lowmem == 0:
            raise error.TestFail('We did not get low memory notification!')
        elif not cleared_low_mem_notification:
            raise error.TestFail('We did not clear low memory notification!')
        elif len(hogs) - hogs_killed < first_lowmem - 3:
            raise error.TestFail('We got low memory notification at hog %d, '
                                 'but we did not clear it until we dropped to '
                                 'hog %d' %
                                 (first_lowmem, len(hogs) - hogs_killed))
    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill in "
                             "VM %d on allocator loop" % (j, i))
                    raise error.TestFail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            a_cmd = "mem.static_random_fill()"
            logging.debug("Executing %s on allocator.py loop, vm: %s",
                          a_cmd, vm.name)
            session.sendline(a_cmd)

            out = ""
            try:
                logging.debug("Watching host memory while filling vm %s memory",
                              vm.name)
                while not out.startswith("PASS") and not out.startswith("FAIL"):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing static_random_fill"
                                 " on allocator loop" % i)
                        raise error.TestFail(e_msg)
                    free_mem = int(utils.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (free_mem +
                                    int(utils.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d" % (free_mem))

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and
                                              free_mem < (450000 * perf_ratio)):
                        vm.monitor.cmd("stop")
                        for j in range(0, i):
                            lvms[j].destroy(gracefully = False)
                        time.sleep(20)
                        vm.monitor.cmd("c")
                        logging.debug("Only %s free memory, killing %d guests" %
                                      (free_mem, (i-1)))
                        last_vm = i
                        break
                    out = session.read_nonblocking(0.1)
                    time.sleep(2)
            except OSError, (err):
                logging.debug("Only %s host free memory, killing %d guests" %
                              (free_mem, (i - 1)))
                logging.debug("Stopping %s", vm.name)
                vm.monitor.cmd("stop")
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully = False)
                time.sleep(20)
                vm.monitor.cmd("c")
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s" % (vm.name))