def initialize(self, dir = None, pages_requested = 20):
        self.dir = None

        self.job.require_gcc()

        utils.check_kernel_ver("2.6.16")

        # Check huge page number
        pages_available = 0
        if os.path.exists('/proc/sys/vm/nr_hugepages'):
            utils.write_one_line('/proc/sys/vm/nr_hugepages',
                                          str(pages_requested))
            nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages')
            pages_available = int(nr_hugepages)
        else:
            raise error.TestNAError('Kernel does not support hugepages')

        if pages_available < pages_requested:
            raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))

        # Check if hugetlbfs has been mounted
        if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
            if not dir:
                dir = os.path.join(self.tmpdir, 'hugetlbfs')
                os.makedirs(dir)
            utils.system('mount -t hugetlbfs none %s' % dir)
            self.dir = dir
def reset_zram():
    """
    Resets zram, clearing all swap space.
    """
    swapoff_timeout = 60
    zram_device = 'zram0'
    zram_device_path = os.path.join('/dev', zram_device)
    reset_path = os.path.join('/sys/block', zram_device, 'reset')
    disksize_path = os.path.join('/sys/block', zram_device, 'disksize')

    disksize = utils.read_one_line(disksize_path)

    # swapoff is prone to hanging, especially after heavy swap usage, so
    # time out swapoff if it takes too long.
    ret = utils.system('swapoff ' + zram_device_path,
                       timeout=swapoff_timeout, ignore_status=True)

    if ret != 0:
        raise error.TestFail('Could not reset zram - swapoff failed.')

    # Sleep to avoid "device busy" errors.
    time.sleep(1)
    utils.write_one_line(reset_path, '1')
    time.sleep(1)
    utils.write_one_line(disksize_path, disksize)
    utils.system('mkswap ' + zram_device_path)
    utils.system('swapon ' + zram_device_path)
Example #3
0
    def initialize(self, dir = None, pages_requested = 20):
        self.dir = None

        self.job.require_gcc()

        utils.check_kernel_ver("2.6.16")

        # Check huge page number
        pages_available = 0
        if os.path.exists('/proc/sys/vm/nr_hugepages'):
            utils.write_one_line('/proc/sys/vm/nr_hugepages',
                                          str(pages_requested))
            nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages')
            pages_available = int(nr_hugepages)
        else:
            raise error.TestNAError('Kernel does not support hugepages')

        if pages_available < pages_requested:
            raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))

        # Check if hugetlbfs has been mounted
        if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
            if not dir:
                dir = os.path.join(self.tmpdir, 'hugetlbfs')
                os.makedirs(dir)
            utils.system('mount -t hugetlbfs none %s' % dir)
            self.dir = dir
Example #4
0
 def execute(self, iterations = 1):
     dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio')
     dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio')
     try:
         self.run_the_test(iterations)
     finally:
         utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio)
         utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
Example #5
0
def move_tasks_into_container(name, tasks):
    task_file = tasks_path(name)
    for task in tasks:
        try:
            logging.debug('moving task %s into container "%s"', task, name)
            utils.write_one_line(task_file, task)
        except Exception:
            if utils.pid_is_alive(task):
                raise  # task exists but couldn't move it
Example #6
0
def move_tasks_into_container(name, tasks):
    task_file = tasks_path(name)
    for task in tasks:
        try:
            logging.debug('moving task %s into container "%s"', task, name)
            utils.write_one_line(task_file, task)
        except Exception:
            if utils.pid_is_alive(task):
                raise   # task exists but couldn't move it
Example #7
0
    def _cg_set_quota(self, quota=-1):
        """Set CPU quota that can be used for cgroup

        Default of -1 will disable throttling
        """
        utils.write_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.cfs_quota_us"), quota)
        rd_quota = utils.read_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.cfs_quota_us"))
        if rd_quota != quota:
            error.TestFail("Setting cpu quota to %d" % quota)
Example #8
0
    def initialize(self, outputsize=1048576, tracepoints=None, **dargs):
        self.job.require_gcc()

        self.tracepoints = tracepoints
        self.ltt_bindir = os.path.join(self.srcdir, 'lttctl')
        self.lttctl = os.path.join(self.ltt_bindir, 'lttctl')
        self.lttd = os.path.join(self.srcdir, 'lttd', 'lttd')
        self.armall = os.path.join(self.ltt_bindir, 'ltt-armall')
        self.disarmall = os.path.join(self.ltt_bindir, 'ltt-disarmall')
        self.mountpoint = '/mnt/debugfs'
        self.outputsize = outputsize

        os.putenv('LTT_DAEMON', self.lttd)

        if not os.path.exists(self.mountpoint):
            os.mkdir(self.mountpoint)

        utils.system('mount -t debugfs debugfs ' + self.mountpoint,
                     ignore_status=True)
        utils.system('modprobe ltt-control')
        utils.system('modprobe ltt-statedump')
        # clean up from any tracing we left running
        utils.system(self.lttctl + ' -n test -R', ignore_status=True)
        utils.system(self.disarmall, ignore_status=True)

        if tracepoints is None:
            utils.system(self.armall, ignore_status=True)
        else:
            for tracepoint in self.tracepoints:
                if tracepoint in ('list_process_state',
                                  'user_generic_thread_brand', 'fs_exec',
                                  'kernel_process_fork', 'kernel_process_free',
                                  'kernel_process_exit',
                                  'kernel_arch_kthread_create',
                                  'list_statedump_end', 'list_vm_map'):
                    channel = 'processes'
                elif tracepoint in ('list_interrupt', 'statedump_idt_table',
                                    'statedump_sys_call_table'):
                    channel = 'interrupts'
                elif tracepoint in ('list_network_ipv4_interface',
                                    'list_network_ip_interface'):
                    channel = 'network'
                elif tracepoint in ('kernel_module_load',
                                    'kernel_module_free'):
                    channel = 'modules'
                else:
                    channel = ''
                print 'Connecting ' + tracepoint
                utils.write_one_line(
                    '/proc/ltt',
                    'connect ' + tracepoint + ' default dynamic ' + channel)
Example #9
0
    def _cg_set_shares(self, shares=None):
        """Set CPU shares that can be used for cgroup

        Default of None reads total shares for cpu group and assigns that so
        there will be no throttling
        """
        if shares is None:
            shares = self._cg_total_shares()
        utils.write_one_line(os.path.join(self._CG_CRB_DIR, "cpu.shares"),
                             shares)
        rd_shares = utils.read_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.shares"))
        if rd_shares != shares:
            error.TestFail("Setting cpu shares to %d" % shares)
Example #10
0
    def initialize(self, outputsize=1048576, tracepoints=None, **dargs):
        self.job.require_gcc()

        self.tracepoints = tracepoints
        self.ltt_bindir = os.path.join(self.srcdir, 'lttctl')
        self.lttctl = os.path.join(self.ltt_bindir, 'lttctl')
        self.lttd = os.path.join(self.srcdir, 'lttd', 'lttd')
        self.armall = os.path.join(self.ltt_bindir, 'ltt-armall')
        self.disarmall = os.path.join(self.ltt_bindir, 'ltt-disarmall')
        self.mountpoint = '/mnt/debugfs'
        self.outputsize = outputsize

        os.putenv('LTT_DAEMON', self.lttd)

        if not os.path.exists(self.mountpoint):
            os.mkdir(self.mountpoint)

        utils.system('mount -t debugfs debugfs ' + self.mountpoint,
                                                            ignore_status=True)
        utils.system('modprobe ltt-control')
        utils.system('modprobe ltt-statedump')
        # clean up from any tracing we left running
        utils.system(self.lttctl + ' -n test -R', ignore_status=True)
        utils.system(self.disarmall, ignore_status=True)

        if tracepoints is None:
            utils.system(self.armall, ignore_status=True)
        else:
            for tracepoint in self.tracepoints:
                if tracepoint in ('list_process_state',
                                  'user_generic_thread_brand', 'fs_exec',
                                  'kernel_process_fork', 'kernel_process_free',
                                  'kernel_process_exit',
                                  'kernel_arch_kthread_create',
                                  'list_statedump_end', 'list_vm_map'):
                    channel = 'processes'
                elif tracepoint in ('list_interrupt',
                                    'statedump_idt_table',
                                    'statedump_sys_call_table'):
                    channel = 'interrupts'
                elif tracepoint in ('list_network_ipv4_interface',
                                    'list_network_ip_interface'):
                    channel = 'network'
                elif tracepoint in ('kernel_module_load', 'kernel_module_free'):
                    channel = 'modules'
                else:
                    channel = ''
                print 'Connecting ' + tracepoint
                utils.write_one_line('/proc/ltt', 'connect ' + tracepoint
                                     + ' default dynamic ' + channel)
    def flush(self):
        """Flush trace buffer.

        Raises:
            error.TestFail: If unable to flush
        """
        self.off()
        fname = os.path.join(self._TRACE_ROOT, 'free_buffer')
        utils.write_one_line(fname, 1)
        self._buffer_ptr = 0

        fname = os.path.join(self._TRACE_ROOT, 'buffer_size_kb')
        result = utils.read_one_line(fname).strip()
        if result is '0':
            return True
        return False
    def _onoff(self, val):
        """Enable/Disable tracing.

        Arguments:
            val: integer, 1 for on, 0 for off

        Raises:
            error.TestFail: If unable to enable/disable tracing
              boolean of tracing on/off status
        """
        utils.write_one_line(self._TRACE_EN_PATH, val)
        fname = os.path.join(self._TRACE_ROOT, 'tracing_on')
        result = int(utils.read_one_line(fname).strip())
        if not result == val:
            raise error.TestFail("Unable to %sable tracing" %
                                 'en' if val == 1 else 'dis')
Example #13
0
    def run_the_test(self, iterations):
        utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
        utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')

        cmd = os.path.join(self.srcdir, 'linus_stress')
        args = "%d" % (utils.memtotal() / 32)

        profilers = self.job.profilers
        if profilers.present():
            profilers.start(self)

        for i in range(iterations):
            utils.system(cmd + ' ' + args)

        if profilers.present():
            profilers.stop(self)
            profilers.report(self)
Example #14
0
    def run_the_test(self, iterations):
        utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
        utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')

        cmd = os.path.join(self.srcdir, 'linus_stress')
        args = "%d" % (utils.memtotal() / 32)

        profilers = self.job.profilers
        if profilers.present():
            profilers.start(self)

        for i in range(iterations):
            utils.system(cmd + ' ' + args)

        if profilers.present():
            profilers.stop(self)
            profilers.report(self)
Example #15
0
    def _cg_start_task(self, in_cgroup=True):
        """Start a CPU hogging task and add to cgroup.

        Args:
            in_cgroup: Boolean, if true add to cgroup otherwise just start.

        Returns:
            integer of pid of task started
        """
        null_fd = open("/dev/null", "w")
        cmd = ['seq', '0', '0', '0']
        task = subprocess.Popen(cmd, stdout=null_fd)
        self._tasks.append(task)

        if in_cgroup:
            utils.write_one_line(os.path.join(self._CG_CRB_DIR, "tasks"),
                                 task.pid)
        return task.pid
    def setup_power_manager(self):
        # create directory for temporary settings
        self.tempdir = tempfile.mkdtemp(prefix='IdleSuspend.')
        logging.info('using temporary directory %s', self.tempdir)

        # override power manager settings
        for key, val in POWER_MANAGER_SETTINGS.iteritems():
            logging.info('overriding %s to %s', key, val)
            tmp_path = '%s/%s' % (self.tempdir, key)
            mount_path = '/usr/share/power_manager/%s' % key
            utils.write_one_line(tmp_path, str(val))
            utils.run('mount --bind %s %s' % (tmp_path, mount_path))
            self.mounts.append(mount_path)

        # override /sys/power/state with fifo
        fifo_path = '%s/sys_power_state' % self.tempdir
        os.mkfifo(fifo_path)
        utils.run('mount --bind %s /sys/power/state' % fifo_path)
        self.mounts.append('/sys/power/state')
Example #17
0
def set_io_controls(container_name,
                    disks=[],
                    ioprio_classes=[PROPIO_NORMAL],
                    io_shares=[95],
                    io_limits=[0]):
    # set the propio controls for one container, for selected disks
    # writing directly to /dev/cgroup/container_name/io.io_service_level
    #    without using containerd or container.py
    # See wiki ProportionalIOScheduler for definitions
    # ioprio_classes: list of service classes, one per disk
    #    using numeric propio service classes as used by kernel API, namely
    #       1: RT, Real Time, aka PROPIO_PRIO
    #       2: BE, Best Effort, aka PROPIO_NORMAL
    #       3: PROPIO_IDLE
    # io_shares: list of disk-time-fractions, one per disk,
    #       as percentage integer 0..100
    # io_limits: list of limit on/off, one per disk
    #       0: no limit, shares use of other containers' unused disk time
    #       1: limited, container's use of disk time is capped to given DTF
    # ioprio_classes defaults to best-effort
    # io_limit defaults to no limit, use slack time
    if not disks:  # defaults to all drives
        disks = all_drive_names()
        io_shares = [io_shares[0]] * len(disks)
        ioprio_classes = [ioprio_classes[0]] * len(disks)
        io_limits = [io_limits[0]] * len(disks)
    if not (len(disks) == len(ioprio_classes) and len(disks) == len(io_shares)
            and len(disks) == len(io_limits)):
        raise error.AutotestError('Unequal number of values for io controls')
    service_level = io_attr(container_name, 'io_service_level')
    if not os.path.exists(service_level):
        return  # kernel predates propio features
        # or io cgroup is mounted separately from cpusets
    disk_infos = []
    for disk, ioclass, limit, share in zip(disks, ioprio_classes, io_limits,
                                           io_shares):
        parts = (disk, str(ioclass), str(limit), str(share))
        disk_info = ' '.join(parts)
        utils.write_one_line(service_level, disk_info)
        disk_infos.append(disk_info)
    logging.debug('set_io_controls of %s to %s', container_name,
                  ', '.join(disk_infos))
    def _event_onoff(self, event, val):
        """Enable/Disable tracing event.

        TODO(tbroch) Consider allowing wild card enabling of trace events via
            /sys/kernel/debug/tracing/set_event although it makes filling buffer
            really easy

        Arguments:
            event: list of events.
                   See kernel(Documentation/trace/events.txt) for formatting.
            val: integer, 1 for on, 0 for off

         Returns:
            True if success, false otherwise
        """
        logging.debug("event_onoff: event:%s val:%d", event, val)
        event_path = event.replace(':', '/')
        fname = os.path.join(self._TRACE_ROOT, 'events', event_path, 'enable')

        if not os.path.exists(fname):
            logging.warning("Unable to locate tracing event %s", fname)
            return False
        utils.write_one_line(fname, val)

        fname = os.path.join(self._TRACE_ROOT, "set_event")
        found = False
        with open(fname) as fd:
            for ln in fd.readlines():
                logging.debug("set_event ln:%s", ln)
                if re.findall(event, ln):
                    found = True
                    break

        if val == 1 and not found:
            logging.warning("Event %s not enabled", event)
            return False

        if val == 0 and found:
            logging.warning("Event %s not disabled", event)
            return False

        return True
Example #19
0
def set_io_controls(container_name, disks=[], ioprio_classes=[PROPIO_NORMAL],
                    io_shares=[95], io_limits=[0]):
    # set the propio controls for one container, for selected disks
    # writing directly to /dev/cgroup/container_name/io.io_service_level
    #    without using containerd or container.py
    # See wiki ProportionalIOScheduler for definitions
    # ioprio_classes: list of service classes, one per disk
    #    using numeric propio service classes as used by kernel API, namely
    #       1: RT, Real Time, aka PROPIO_PRIO
    #       2: BE, Best Effort, aka PROPIO_NORMAL
    #       3: PROPIO_IDLE
    # io_shares: list of disk-time-fractions, one per disk,
    #       as percentage integer 0..100
    # io_limits: list of limit on/off, one per disk
    #       0: no limit, shares use of other containers' unused disk time
    #       1: limited, container's use of disk time is capped to given DTF
    # ioprio_classes defaults to best-effort
    # io_limit defaults to no limit, use slack time
    if not disks:  # defaults to all drives
        disks = all_drive_names()
        io_shares      = [io_shares     [0]] * len(disks)
        ioprio_classes = [ioprio_classes[0]] * len(disks)
        io_limits      = [io_limits     [0]] * len(disks)
    if not (len(disks) == len(ioprio_classes) and len(disks) == len(io_shares)
                                              and len(disks) == len(io_limits)):
        raise error.AutotestError('Unequal number of values for io controls')
    service_level = io_attr(container_name, 'io_service_level')
    if not os.path.exists(service_level):
        return  # kernel predates propio features
            # or io cgroup is mounted separately from cpusets
    disk_infos = []
    for disk, ioclass, limit, share in zip(disks, ioprio_classes,
                                           io_limits, io_shares):
        parts = (disk, str(ioclass), str(limit), str(share))
        disk_info = ' '.join(parts)
        utils.write_one_line(service_level, disk_info)
        disk_infos.append(disk_info)
    logging.debug('set_io_controls of %s to %s',
                  container_name, ', '.join(disk_infos))
Example #20
0
def create_container_with_specific_mems_cpus(name, mems, cpus):
    need_fake_numa()
    os.mkdir(full_path(name))
    utils.write_one_line(cpuset_attr(name, 'mem_hardwall'), '1')
    utils.write_one_line(mems_path(name), ','.join(map(str, mems)))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug('container %s has %d cpus and %d nodes totalling %s bytes',
                  name, len(cpus), len(get_mem_nodes(name)),
                  utils.human_format(container_bytes(name)) )
Example #21
0
def create_container_with_specific_mems_cpus(name, mems, cpus):
    need_fake_numa()
    os.mkdir(full_path(name))
    utils.write_one_line(cpuset_attr(name, 'mem_hardwall'), '1')
    utils.write_one_line(mems_path(name), ','.join(map(str, mems)))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug('container %s has %d cpus and %d nodes totalling %s bytes',
                  name, len(cpus), len(get_mem_nodes(name)),
                  utils.human_format(container_bytes(name)))
Example #22
0
def create_container_via_memcg(name, parent, bytes, cpus):
    # create container via direct memcg cgroup writes
    os.mkdir(full_path(name))
    nodes = utils.read_one_line(mems_path(parent))
    utils.write_one_line(mems_path(name), nodes)  # inherit parent's nodes
    utils.write_one_line(memory_path(name)+'.limit_in_bytes', str(bytes))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug('Created container %s directly via memcg,'
                  ' has %d cpus and %s bytes',
                  name, len(cpus), utils.human_format(container_bytes(name)))
Example #23
0
def create_container_via_memcg(name, parent, bytes, cpus):
    # create container via direct memcg cgroup writes
    os.mkdir(full_path(name))
    nodes = utils.read_one_line(mems_path(parent))
    utils.write_one_line(mems_path(name), nodes)  # inherit parent's nodes
    utils.write_one_line(memory_path(name) + '.limit_in_bytes', str(bytes))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug(
        'Created container %s directly via memcg,'
        ' has %d cpus and %s bytes', name, len(cpus),
        utils.human_format(container_bytes(name)))
    def run_single_test(self, compression_factor, num_procs, cycles,
                        swap_target, switch_delay, temp_dir, selections):
        """
        Runs the benchmark for a single swap target usage.

        @param compression_factor: Compression factor (int)
                                   example: compression_factor=3 is 1:3 ratio
        @param num_procs: Number of hog processes to use
        @param cycles: Number of iterations over hogs list for a given swap lvl
        @param swap_target: Floating point value of target swap usage
        @param switch_delay: Number of seconds to wait between poking hogs
        @param temp_dir: Path of the temporary directory to use
        @param selections: List of selection function names
        """
        # Get initial memory state.
        self.sample_memory_state()
        swap_target_usage = swap_target * self.swap_total

        # usage_target is our estimate on the amount of memory that needs to
        # be allocated to reach our target swap usage.
        swap_target_phys = swap_target_usage / compression_factor
        usage_target = self.mem_free - swap_target_phys + swap_target_usage

        hogs = []
        paths = []
        sockets = []
        cmd = [ os.path.join(self.srcdir, self.executable) ]

        # Launch hog processes.
        while len(hogs) < num_procs:
            socket_path = os.path.join(temp_dir, str(len(hogs)))
            paths.append(socket_path)
            launch_cmd = list(cmd)
            launch_cmd.append(socket_path)
            launch_cmd.append(str(compression_factor))
            p = subprocess.Popen(launch_cmd)
            utils.write_one_line('/proc/%d/oom_score_adj' % p.pid, '15')
            hogs.append(p)

        # Open sockets to hog processes, waiting for them to bind first.
        time.sleep(5)
        for socket_path in paths:
            hog_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
            sockets.append(hog_sock)
            hog_sock.connect(socket_path)

        # Allocate conservatively until we reach our target.
        while self.usage_ratio <= swap_target:
            free_per_hog = (usage_target - self.total_usage) / len(hogs)
            alloc_per_hog_mb = int(0.80 * free_per_hog) / 1024
            if alloc_per_hog_mb <= 0:
                alloc_per_hog_mb = 1

            # Send balloon command.
            for hog_sock in sockets:
                self.send_balloon(hog_sock, alloc_per_hog_mb)

            # Wait until all hogs report back.
            for hog_sock in sockets:
                self.recv_balloon_results(hog_sock, alloc_per_hog_mb)

            # We need to sample memory and swap usage again.
            self.sample_memory_state()

        # Once memory is allocated, report how close we got to the swap target.
        self.report_stat('percent', swap_target, None,
                         'usage', 'value', self.usage_ratio)

        # Run tests by sending "touch memory" command to hogs.
        for f_name, f in get_selection_funcs(selections).iteritems():
            result_list = []

            for count in range(cycles):
                for i in range(len(hogs)):
                    selection = f(i, len(hogs))
                    hog_sock = sockets[selection]
                    retcode = hogs[selection].poll()

                    # Ensure that the hog is not dead.
                    if retcode is None:
                        # Delay between switching "tabs".
                        if switch_delay > 0.0:
                            time.sleep(switch_delay)

                        self.send_poke(hog_sock)

                        result = self.recv_poke_results(hog_sock)
                        if result:
                            result_list.append(result)
                    else:
                        logging.info("Hog died unexpectedly; continuing")

            # Convert from list of tuples (rtime, utime, stime, faults) to
            # a list of rtimes, a list of utimes, etc.
            results_unzipped = [list(x) for x in zip(*result_list)]
            wall_times = results_unzipped[0]
            user_times = results_unzipped[1]
            sys_times = results_unzipped[2]
            fault_counts = results_unzipped[3]

            # Calculate average time to service a fault for each sample.
            us_per_fault_list = []
            for i in range(len(sys_times)):
                if fault_counts[i] == 0.0:
                    us_per_fault_list.append(0.0)
                else:
                    us_per_fault_list.append(sys_times[i] * 1000.0 /
                                             fault_counts[i])

            self.report_stats('ms', swap_target, f_name, 'rtime', wall_times)
            self.report_stats('ms', swap_target, f_name, 'utime', user_times)
            self.report_stats('ms', swap_target, f_name, 'stime', sys_times)
            self.report_stats('faults', swap_target, f_name, 'faults',
                              fault_counts)
            self.report_stats('us_fault', swap_target, f_name, 'fault_time',
                              us_per_fault_list)

        # Send exit message to all hogs.
        for hog_sock in sockets:
            self.send_exit(hog_sock)

        time.sleep(1)

        # If hogs didn't exit normally, kill them.
        for hog in hogs:
            retcode = hog.poll()
            if retcode is None:
                logging.debug("killing all remaining hogs")
                utils.system("killall -TERM hog")
                # Wait to ensure hogs have died before continuing.
                time.sleep(5)
                break
 def initialize(self):
     # Save & disable console_suspend module param
     self.old_console_suspend = utils.read_file(SYSFS_CONSOLE_SUSPEND)
     utils.write_one_line(SYSFS_CONSOLE_SUSPEND, 'N')
Example #26
0
    def run_once(self, just_checking_lowmem=False, checking_for_oom=False):

        memtotal = utils.read_from_meminfo('MemTotal')
        swaptotal = utils.read_from_meminfo('SwapTotal')
        free_target = (memtotal + swaptotal) * 0.03

        # Check for proper swap space configuration.
        # If the swap enable file says "0", swap.conf does not create swap.
        if not just_checking_lowmem and not checking_for_oom:
            if os.path.exists(self.swap_enable_file):
                enable_size = utils.read_one_line(self.swap_enable_file)
            else:
                enable_size = "nonexistent"  # implies nonzero
            if enable_size == "0":
                if swaptotal != 0:
                    raise error.TestFail('The swap enable file said 0, but'
                                         ' swap was still enabled for %d.' %
                                         swaptotal)
                logging.info('Swap enable (0), swap disabled.')
            else:
                # Rather than parsing swap.conf logic to calculate a size,
                # use the value it writes to /sys/block/zram0/disksize.
                if not os.path.exists(self.swap_disksize_file):
                    raise error.TestFail(
                        'The %s swap enable file should have'
                        ' caused zram to load, but %s was'
                        ' not found.' % (enable_size, self.swap_disksize_file))
                disksize = utils.read_one_line(self.swap_disksize_file)
                swaprequested = int(disksize) / 1000
                if (swaptotal < swaprequested * 0.9
                        or swaptotal > swaprequested * 1.1):
                    raise error.TestFail('Our swap of %d K is not within 10%'
                                         ' of the %d K we requested.' %
                                         (swaptotal, swaprequested))
                logging.info('Swap enable (%s), requested %d, total %d' %
                             (enable_size, swaprequested, swaptotal))

        first_oom = 0
        first_lowmem = 0
        cleared_low_mem_notification = False

        # Loop over hog creation until MemFree+SwapFree approaches 0.
        # Confirm we do not see any OOMs (procs killed due to Out Of Memory).
        hogs = []
        cmd = [self.srcdir + '/' + self.executable, '50']
        logging.debug('Memory hog command line is %s' % cmd)
        while len(hogs) < 200:
            memfree = utils.read_from_meminfo('MemFree')
            swapfree = utils.read_from_meminfo('SwapFree')
            total_free = memfree + swapfree
            logging.debug('nhogs %d: memfree %d, swapfree %d' %
                          (len(hogs), memfree, swapfree))
            if not checking_for_oom and total_free < free_target:
                break

            p = subprocess.Popen(cmd)
            utils.write_one_line('/proc/%d/oom_score_adj' % p.pid, '1000')
            hogs.append(p)

            time.sleep(2)

            if self.check_for_oom(hogs):
                first_oom = len(hogs)
                break

            # Check for low memory notification.
            if self.getting_low_mem_notification():
                if first_lowmem == 0:
                    first_lowmem = len(hogs)
                logging.info('Got low memory notification after hog %d' %
                             len(hogs))

        logging.info('Finished creating %d hogs, SwapFree %d, MemFree %d, '
                     'low mem at %d, oom at %d' %
                     (len(hogs), swapfree, memfree, first_lowmem, first_oom))

        if not checking_for_oom and first_oom > 0:
            utils.system("killall -TERM hog")
            raise error.TestFail('Oom detected after %d hogs created' %
                                 len(hogs))

        # Before cleaning up all the hogs, verify that killing hogs back to
        # our initial low memory notification causes notification to end.
        if first_lowmem > 0:
            hogs_killed = 0
            for p in hogs:
                if not self.getting_low_mem_notification():
                    cleared_low_mem_notification = True
                    logging.info('Cleared low memory notification after %d '
                                 'hogs were killed' % hogs_killed)
                    break
                try:
                    p.kill()
                except OSError, e:
                    if e.errno == errno.ESRCH:
                        logging.info('Hog %d not found to kill, assume Oomed' %
                                     (hogs.index(p) + 1))
                    else:
                        logging.warning(
                            'Hog %d kill failed: %s' %
                            (hogs.index(p) + 1, os.strerror(e.errno)))
                else:
                    hogs_killed += 1
                time.sleep(2)
 def __init__(self, prefs):
     shutil.copytree(self._PREFDIR, self._TEMPDIR)
     for name, value in prefs.iteritems():
         utils.write_one_line('%s/%s' % (self._TEMPDIR, name), value)
     utils.system('mount --bind %s %s' % (self._TEMPDIR, self._PREFDIR))
     utils.restart_job('powerd')
 def initialize(self):
     # Store the setting if the system has CPUQuiet feature
     if os.path.exists(SYSFS_CPUQUIET_ENABLE):
         self.is_cpuquiet_enabled = utils.read_file(SYSFS_CPUQUIET_ENABLE)
         utils.write_one_line(SYSFS_CPUQUIET_ENABLE, '0')