def initialize(self, hugetlbfs_dir=None, pages_requested=20): self.hugetlbfs_dir = None # check if basic utilities are present self.job.require_gcc() utils.check_kernel_ver("2.6.16") os_dep.library('libpthread.a') # Check huge page number pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): utils.write_one_line('/proc/sys/vm/nr_hugepages', str(pages_requested)) nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages') pages_available = int(nr_hugepages) else: raise error.TestNAError('Kernel does not support hugepages') if pages_available < pages_requested: raise error.TestError('%d pages available, < %d pages requested' % (pages_available, pages_requested)) # Check if hugetlbfs has been mounted if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'): if not hugetlbfs_dir: hugetlbfs_dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(hugetlbfs_dir) utils.system('mount -t hugetlbfs none %s' % hugetlbfs_dir) self.hugetlbfs_dir = hugetlbfs_dir
def initialize(self, hugetlbfs_dir=None, pages_requested=20): self.install_required_pkgs() self.hugetlbfs_dir = None # check if basic utilities are present self.job.require_gcc() utils.check_kernel_ver("2.6.16") os_dep.library('libpthread.a') # Check huge page number pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): utils.write_one_line('/proc/sys/vm/nr_hugepages', str(pages_requested)) nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages') pages_available = int(nr_hugepages) else: raise error.TestNAError('Kernel does not support hugepages') if pages_available < pages_requested: raise error.TestError('%d pages available, < %d pages requested' % (pages_available, pages_requested)) # Check if hugetlbfs has been mounted if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'): if not hugetlbfs_dir: hugetlbfs_dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(hugetlbfs_dir) utils.system('mount -t hugetlbfs none %s' % hugetlbfs_dir) self.hugetlbfs_dir = hugetlbfs_dir
def initialize(self, dir = None, pages_requested = 20): self.dir = None self.job.require_gcc() utils.check_kernel_ver("2.6.16") # Check huge page number pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): utils.write_one_line('/proc/sys/vm/nr_hugepages', str(pages_requested)) nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages') pages_available = int(nr_hugepages) else: raise error.TestNAError('Kernel does not support hugepages') if pages_available < pages_requested: raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested)) # Check if hugetlbfs has been mounted if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'): if not dir: dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(dir) utils.system('mount -t hugetlbfs none %s' % dir) self.dir = dir
def execute(self, iterations=1): dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio') dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio') try: self.run_the_test(iterations) finally: utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio) utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
def execute(self, iterations = 1): dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio') dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio') try: self.run_the_test(iterations) finally: utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio) utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
def move_tasks_into_container(name, tasks): task_file = tasks_path(name) for task in tasks: try: logging.debug('moving task %s into container "%s"', task, name) utils.write_one_line(task_file, task) except Exception: if utils.pid_is_alive(task): raise # task exists but couldn't move it
def initialize(self, outputsize=1048576, tracepoints=None, **dargs): self.job.require_gcc() self.tracepoints = tracepoints self.ltt_bindir = os.path.join(self.srcdir, 'lttctl') self.lttctl = os.path.join(self.ltt_bindir, 'lttctl') self.lttd = os.path.join(self.srcdir, 'lttd', 'lttd') self.armall = os.path.join(self.ltt_bindir, 'ltt-armall') self.disarmall = os.path.join(self.ltt_bindir, 'ltt-disarmall') self.mountpoint = '/mnt/debugfs' self.outputsize = outputsize os.putenv('LTT_DAEMON', self.lttd) if not os.path.exists(self.mountpoint): os.mkdir(self.mountpoint) utils.system('mount -t debugfs debugfs ' + self.mountpoint, ignore_status=True) utils.system('modprobe ltt-control') utils.system('modprobe ltt-statedump') # clean up from any tracing we left running utils.system(self.lttctl + ' -n test -R', ignore_status=True) utils.system(self.disarmall, ignore_status=True) if tracepoints is None: utils.system(self.armall, ignore_status=True) else: for tracepoint in self.tracepoints: if tracepoint in ('list_process_state', 'user_generic_thread_brand', 'fs_exec', 'kernel_process_fork', 'kernel_process_free', 'kernel_process_exit', 'kernel_arch_kthread_create', 'list_statedump_end', 'list_vm_map'): channel = 'processes' elif tracepoint in ('list_interrupt', 'statedump_idt_table', 'statedump_sys_call_table'): channel = 'interrupts' elif tracepoint in ('list_network_ipv4_interface', 'list_network_ip_interface'): channel = 'network' elif tracepoint in ('kernel_module_load', 'kernel_module_free'): channel = 'modules' else: channel = '' print 'Connecting ' + tracepoint utils.write_one_line( '/proc/ltt', 'connect ' + tracepoint + ' default dynamic ' + channel)
def initialize(self, outputsize=1048576, tracepoints=None, **dargs): self.job.require_gcc() self.tracepoints = tracepoints self.ltt_bindir = os.path.join(self.srcdir, 'lttctl') self.lttctl = os.path.join(self.ltt_bindir, 'lttctl') self.lttd = os.path.join(self.srcdir, 'lttd', 'lttd') self.armall = os.path.join(self.ltt_bindir, 'ltt-armall') self.disarmall = os.path.join(self.ltt_bindir, 'ltt-disarmall') self.mountpoint = '/mnt/debugfs' self.outputsize = outputsize os.putenv('LTT_DAEMON', self.lttd) if not os.path.exists(self.mountpoint): os.mkdir(self.mountpoint) utils.system('mount -t debugfs debugfs ' + self.mountpoint, ignore_status=True) utils.system('modprobe ltt-control') utils.system('modprobe ltt-statedump') # clean up from any tracing we left running utils.system(self.lttctl + ' -n test -R', ignore_status=True) utils.system(self.disarmall, ignore_status=True) if tracepoints is None: utils.system(self.armall, ignore_status=True) else: for tracepoint in self.tracepoints: if tracepoint in ('list_process_state', 'user_generic_thread_brand', 'fs_exec', 'kernel_process_fork', 'kernel_process_free', 'kernel_process_exit', 'kernel_arch_kthread_create', 'list_statedump_end', 'list_vm_map'): channel = 'processes' elif tracepoint in ('list_interrupt', 'statedump_idt_table', 'statedump_sys_call_table'): channel = 'interrupts' elif tracepoint in ('list_network_ipv4_interface', 'list_network_ip_interface'): channel = 'network' elif tracepoint in ('kernel_module_load', 'kernel_module_free'): channel = 'modules' else: channel = '' print 'Connecting ' + tracepoint utils.write_one_line('/proc/ltt', 'connect ' + tracepoint + ' default dynamic ' + channel)
def run_the_test(self, iterations): utils.write_one_line('/proc/sys/vm/dirty_ratio', '4') utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2') cmd = os.path.join(self.srcdir, 'linus_stress') args = "%d" % (utils_memory.memtotal() / 32) profilers = self.job.profilers if profilers.present(): profilers.start(self) for i in range(iterations): utils.system(cmd + ' ' + args) if profilers.present(): profilers.stop(self) profilers.report(self)
def run_the_test(self, iterations): utils.write_one_line('/proc/sys/vm/dirty_ratio', '4') utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2') cmd = os.path.join(self.srcdir, 'linus_stress') args = "%d" % (utils.memtotal() / 32) profilers = self.job.profilers if profilers.present(): profilers.start(self) for i in range(iterations): utils.system(cmd + ' ' + args) if profilers.present(): profilers.stop(self) profilers.report(self)
def run_the_test(self, iterations): utils.write_one_line("/proc/sys/vm/dirty_ratio", "4") utils.write_one_line("/proc/sys/vm/dirty_background_ratio", "2") cmd = os.path.join(self.srcdir, "linus_stress") args = "%d" % (utils.memtotal() / 32) profilers = self.job.profilers if profilers.present(): profilers.start(self) for i in range(iterations): utils.system(cmd + " " + args) if profilers.present(): profilers.stop(self) profilers.report(self)
def set_io_controls(container_name, disks=[], ioprio_classes=[PROPIO_NORMAL], io_shares=[95], io_limits=[0]): # set the propio controls for one container, for selected disks # writing directly to /dev/cgroup/container_name/io.io_service_level # without using containerd or container.py # See wiki ProportionalIOScheduler for definitions # ioprio_classes: list of service classes, one per disk # using numeric propio service classes as used by kernel API, namely # 1: RT, Real Time, aka PROPIO_PRIO # 2: BE, Best Effort, aka PROPIO_NORMAL # 3: PROPIO_IDLE # io_shares: list of disk-time-fractions, one per disk, # as percentage integer 0..100 # io_limits: list of limit on/off, one per disk # 0: no limit, shares use of other containers' unused disk time # 1: limited, container's use of disk time is capped to given DTF # ioprio_classes defaults to best-effort # io_limit defaults to no limit, use slack time if not disks: # defaults to all drives disks = all_drive_names() io_shares = [io_shares[0]] * len(disks) ioprio_classes = [ioprio_classes[0]] * len(disks) io_limits = [io_limits[0]] * len(disks) if not (len(disks) == len(ioprio_classes) and len(disks) == len(io_shares) and len(disks) == len(io_limits)): raise error.AutotestError('Unequal number of values for io controls') service_level = io_attr(container_name, 'io_service_level') if not os.path.exists(service_level): return # kernel predates propio features # or io cgroup is mounted separately from cpusets disk_infos = [] for disk, ioclass, limit, share in zip(disks, ioprio_classes, io_limits, io_shares): parts = (disk, str(ioclass), str(limit), str(share)) disk_info = ' '.join(parts) utils.write_one_line(service_level, disk_info) disk_infos.append(disk_info) logging.debug('set_io_controls of %s to %s', container_name, ', '.join(disk_infos))
def create_container_with_specific_mems_cpus(name, mems, cpus): need_fake_numa() os.mkdir(full_path(name)) utils.write_one_line(cpuset_attr(name, 'mem_hardwall'), '1') utils.write_one_line(mems_path(name), ','.join(map(str, mems))) utils.write_one_line(cpus_path(name), ','.join(map(str, cpus))) logging.debug('container %s has %d cpus and %d nodes totalling %s bytes', name, len(cpus), len(get_mem_nodes(name)), utils.human_format(container_bytes(name)))
def create_container_via_memcg(name, parent, bytes, cpus): # create container via direct memcg cgroup writes os.mkdir(full_path(name)) nodes = utils.read_one_line(mems_path(parent)) utils.write_one_line(mems_path(name), nodes) # inherit parent's nodes utils.write_one_line(memory_path(name) + '.limit_in_bytes', str(bytes)) utils.write_one_line(cpus_path(name), ','.join(map(str, cpus))) logging.debug('Created container %s directly via memcg,' ' has %d cpus and %s bytes', name, len(cpus), utils.human_format(container_bytes(name)))
def initialize(self): # Store the setting if the system has CPUQuiet feature if os.path.exists(SYSFS_CPUQUIET_ENABLE): self.is_cpuquiet_enabled = utils.read_file(SYSFS_CPUQUIET_ENABLE) utils.write_one_line(SYSFS_CPUQUIET_ENABLE, '0')