Exemplo n.º 1
0
 def execute(self, iterations = 1):
     dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio')
     dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio')
     try:
         self.run_the_test(iterations)
     finally:
         utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio)
         utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
Exemplo n.º 2
0
    def verify_graphics_dvfs(self):
        """ On systems which support DVFS, check that we get into the lowest
        clock frequency; idle before doing so, and retry every second for 20
        seconds."""
        logging.info('Running verify_graphics_dvfs')
        if self._gpu_type == 'mali':
            if self._cpu_type == 'exynos5':
                node = '/sys/devices/11800000.mali/'
                enable_node = 'dvfs'
                enable_value = 'on'
            elif self._cpu_type == 'rockchip':
                node = '/sys/devices/ffa30000.gpu/'
                enable_node = 'dvfs_enable'
                enable_value = '1'
            else:
                logging.error('Error: Unknown CPU type (%s) for mali GPU.',
                              self._cpu_type)
                return 'Unknown CPU type for mali GPU. '

            clock_path = utils.locate_file('clock', node)
            enable_path = utils.locate_file(enable_node, node)
            freqs_path = utils.locate_file('available_frequencies', node)

            enable = utils.read_one_line(enable_path)
            logging.info('DVFS enable = %s', enable)
            if not enable == enable_value:
                logging.error('Error: DVFS is not enabled')
                return 'DVFS is not enabled. '

            # available_frequencies are always sorted in ascending order
            lowest_freq = int(utils.read_one_line(freqs_path))

            # daisy_* (exynos5250) boards set idle frequency to 266000000
            # See: crbug.com/467401 and crosbug.com/p/19710
            if self._board.startswith('daisy'):
                lowest_freq = 266000000

            logging.info('Expecting idle DVFS clock = %u', lowest_freq)

            tries = 0
            found = False
            while not found and tries < 80:
                time.sleep(0.25)
                clock = int(utils.read_one_line(clock_path))
                if clock <= lowest_freq:
                    logging.info('Found idle DVFS clock = %u', clock)
                    found = True
                    break

                tries += 1

            if not found:
                utils.log_process_activity()
                logging.error('Error: DVFS clock (%u) > min (%u)', clock,
                              lowest_freq)
                return 'Did not see the min DVFS clock. '

        return ''
    def run_once(self, compression_factor=3, num_procs=50, cycles=20,
                 selections=None, swap_targets=None, switch_delay=0.0):
        if selections is None:
            selections = ['sequential', 'uniform', 'exponential']
        if swap_targets is None:
            swap_targets = [0.00, 0.25, 0.50, 0.75, 0.95]

        swaptotal = utils.read_from_meminfo('SwapTotal')

        # Check for proper swap space configuration.
        # If the swap enable file says "0", swap.conf does not create swap.
        if os.path.exists(self.swap_enable_file):
            enable_size = utils.read_one_line(self.swap_enable_file)
        else:
            enable_size = "nonexistent" # implies nonzero
        if enable_size == "0":
            if swaptotal != 0:
                raise error.TestFail('The swap enable file said 0, but'
                                     ' swap was still enabled for %d.' %
                                     swaptotal)
            logging.info('Swap enable (0), swap disabled.')
        else:
            # Rather than parsing swap.conf logic to calculate a size,
            # use the value it writes to /sys/block/zram0/disksize.
            if not os.path.exists(self.swap_disksize_file):
                raise error.TestFail('The %s swap enable file should have'
                                     ' caused zram to load, but %s was'
                                     ' not found.' %
                                     (enable_size, self.swap_disksize_file))
            disksize = utils.read_one_line(self.swap_disksize_file)
            swaprequested = int(disksize) / 1000
            if (swaptotal < swaprequested * 0.9 or
                swaptotal > swaprequested * 1.1):
                raise error.TestFail('Our swap of %d K is not within 10%%'
                                     ' of the %d K we requested.' %
                                     (swaptotal, swaprequested))
            logging.info('Swap enable (%s), requested %d, total %d',
                         enable_size, swaprequested, swaptotal)

        # We should try to autodetect this if we add other swap methods.
        swap_method = 'zram'

        for swap_target in swap_targets:
            logging.info('swap_target is %f', swap_target)
            temp_dir = tempfile.mkdtemp()
            try:
                # Reset swap space to make sure nothing leaks between runs.
                swap_reset = swap_reset_funcs[swap_method]
                swap_reset()
                self.run_single_test(compression_factor, num_procs, cycles,
                                     swap_target, switch_delay, temp_dir,
                                     selections)
            except socket.error:
                logging.debug('swap target %f failed; oom killer?', swap_target)

            shutil.rmtree(temp_dir)
Exemplo n.º 4
0
    def stop(self):
        """Restore the backed-up DNS settings and stop the mock DNS server."""
        try:
            # Follow resolv.conf symlink.
            resolv = os.path.realpath(constants.RESOLV_CONF_FILE)
            # Grab path to the real file, do following work in that directory.
            resolv_dir = os.path.dirname(resolv)
            resolv_bak = os.path.join(resolv_dir, self._resolv_bak_file)
            os.chmod(resolv_dir, self._resolv_dir_mode)
            if os.path.exists(resolv_bak):
                os.rename(resolv_bak, resolv)
            else:
                # This probably means shill restarted during the execution
                # of our test, and has cleaned up the .bak file we created.
                raise error.TestError('Backup file %s no longer exists!  '
                                      'Connection manager probably crashed '
                                      'during the test run.' %
                                      resolv_bak)

            utils.poll_for_condition(
                lambda: self.__attempt_resolve('www.google.com.',
                                               '127.0.0.1',
                                               expected=False),
                utils.TimeoutError('Timed out waiting to revert DNS.  '
                                   'resolv.conf contents are: ' +
                                   utils.read_one_line(resolv)),
                timeout=10)
        finally:
            # Stop the DNS server.
            self._stopper.set()
            self._thread.join()
Exemplo n.º 5
0
    def run_once(self):
        if self._waiver():
            raise error.TestNAError(
                "Light sensor not required for this device")

        found_light_sensor = 0
        for location in glob.glob(LIGHT_SENSOR_LOCATION):
            for fname in LIGHT_SENSOR_FILES:
                path = location + fname
                if os.path.exists(path):
                    found_light_sensor = 1
                    break
                else:
                    logging.info("Did not find light sensor reading at " +
                                 path)

            if found_light_sensor:
                break

        if not found_light_sensor:
            raise error.TestFail("No light sensor reading found.")
        else:
            logging.info("Found light sensor at " + path)

        val = utils.read_one_line(path)
        reading = int(val)
        if reading < 0:
            raise error.TestFail("Invalid light sensor reading (%s)" % val)
        logging.debug("light sensor reading is %d", reading)
Exemplo n.º 6
0
    def _parse_pid_stats(pid):
        """Parse process id stats to determin CPU utilization.

           from: https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt

           /proc/<pid>/schedstat
           ----------------
           schedstats also adds a new /proc/<pid>/schedstat file to include some
           of the same information on a per-process level.  There are three
           fields in this file correlating for that process to:
                1) time spent on the cpu
                2) time spent waiting on a runqueue
                3) # of timeslices run on this cpu

        Args:
            pid: integer, process id to gather stats for.

        Returns:
            tuple with total_msecs and idle_msecs
        """
        idle_slices = 0
        total_slices = 0

        fname = "/proc/sys/kernel/sched_cfs_bandwidth_slice_us"
        timeslice_ms = int(utils.read_one_line(fname).strip()) / 1000.

        with open(os.path.join('/proc', str(pid), 'schedstat')) as fd:
            values = list(int(val) for val in fd.readline().strip().split())
            running_slices = values[0] / timeslice_ms
            idle_slices = values[1] / timeslice_ms
            total_slices = running_slices + idle_slices
        return (total_slices, idle_slices)
def reset_zram():
    """
    Resets zram, clearing all swap space.
    """
    swapoff_timeout = 60
    zram_device = 'zram0'
    zram_device_path = os.path.join('/dev', zram_device)
    reset_path = os.path.join('/sys/block', zram_device, 'reset')
    disksize_path = os.path.join('/sys/block', zram_device, 'disksize')

    disksize = utils.read_one_line(disksize_path)

    # swapoff is prone to hanging, especially after heavy swap usage, so
    # time out swapoff if it takes too long.
    ret = utils.system('swapoff ' + zram_device_path,
                       timeout=swapoff_timeout, ignore_status=True)

    if ret != 0:
        raise error.TestFail('Could not reset zram - swapoff failed.')

    # Sleep to avoid "device busy" errors.
    time.sleep(1)
    utils.write_one_line(reset_path, '1')
    time.sleep(1)
    utils.write_one_line(disksize_path, disksize)
    utils.system('mkswap ' + zram_device_path)
    utils.system('swapon ' + zram_device_path)
Exemplo n.º 8
0
    def initialize(self, dir = None, pages_requested = 20):
        self.dir = None

        self.job.require_gcc()

        utils.check_kernel_ver("2.6.16")

        # Check huge page number
        pages_available = 0
        if os.path.exists('/proc/sys/vm/nr_hugepages'):
            utils.write_one_line('/proc/sys/vm/nr_hugepages',
                                          str(pages_requested))
            nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages')
            pages_available = int(nr_hugepages)
        else:
            raise error.TestNAError('Kernel does not support hugepages')

        if pages_available < pages_requested:
            raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))

        # Check if hugetlbfs has been mounted
        if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
            if not dir:
                dir = os.path.join(self.tmpdir, 'hugetlbfs')
                os.makedirs(dir)
            utils.system('mount -t hugetlbfs none %s' % dir)
            self.dir = dir
    def _verify_storage_power_settings(self):
        if self._on_ac:
            return 0

        expected_state = 'min_power'

        dirs_path = '/sys/class/scsi_host/host*'
        dirs = glob.glob(dirs_path)
        if not dirs:
            logging.info('scsi_host paths not found')
            return 1

        for dirpath in dirs:
            link_policy_file = os.path.join(dirpath,
                                            'link_power_management_policy')
            if not os.path.exists(link_policy_file):
                logging.debug('path does not exist: %s', link_policy_file)
                continue

            out = utils.read_one_line(link_policy_file)
            logging.debug('storage: path set to %s for %s', out,
                          link_policy_file)
            if out == expected_state:
                return 0

        return 1
Exemplo n.º 10
0
def get_mem_nodes(container_name):
    # all mem nodes now available to a container, both exclusive & shared
    file_name = mems_path(container_name)
    if os.path.exists(file_name):
        return rangelist_to_set(utils.read_one_line(file_name))
    else:
        return set()
Exemplo n.º 11
0
    def initialize(self, dir = None, pages_requested = 20):
        self.dir = None

        self.job.require_gcc()

        utils.check_kernel_ver("2.6.16")

        # Check huge page number
        pages_available = 0
        if os.path.exists('/proc/sys/vm/nr_hugepages'):
            utils.write_one_line('/proc/sys/vm/nr_hugepages',
                                          str(pages_requested))
            nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages')
            pages_available = int(nr_hugepages)
        else:
            raise error.TestNAError('Kernel does not support hugepages')

        if pages_available < pages_requested:
            raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))

        # Check if hugetlbfs has been mounted
        if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
            if not dir:
                dir = os.path.join(self.tmpdir, 'hugetlbfs')
                os.makedirs(dir)
            utils.system('mount -t hugetlbfs none %s' % dir)
            self.dir = dir
Exemplo n.º 12
0
def get_mem_nodes(container_name):
    # all mem nodes now available to a container, both exclusive & shared
    file_name = mems_path(container_name)
    if os.path.exists(file_name):
        return rangelist_to_set(utils.read_one_line(file_name))
    else:
        return set()
    def set_scaling_governor_mode(self, index, mode):
        """Set mode of CPU scaling governor on one CPU.

        @param index: CPU index starting from 0.

        @param mode: Mode of scaling governor, accept 'interactive' or
                     'performance'.

        @returns: The original mode.

        """
        if mode not in self.SCALING_GOVERNOR_MODES:
            raise SystemFacadeNativeError('mode %s is invalid' % mode)

        governor_path = os.path.join(
                '/sys/devices/system/cpu/cpu%d' % index,
                'cpufreq/scaling_governor')
        if not os.path.exists(governor_path):
            raise SystemFacadeNativeError(
                    'scaling governor of CPU %d is not available' % index)

        original_mode = utils.read_one_line(governor_path)
        utils.open_write_close(governor_path, mode)

        return original_mode
Exemplo n.º 14
0
def get_boot_numa():
    # get boot-time numa=fake=xyz option for current boot
    #   eg  numa=fake=nnn,  numa=fake=nnnM, or nothing
    label = 'numa=fake='
    for arg in utils.read_one_line('/proc/cmdline').split():
        if arg.startswith(label):
            return arg[len(label):]
    return ''
Exemplo n.º 15
0
def get_boot_numa():
    # get boot-time numa=fake=xyz option for current boot
    #   eg  numa=fake=nnn,  numa=fake=nnnM, or nothing
    label = 'numa=fake='
    for arg in utils.read_one_line('/proc/cmdline').split():
        if arg.startswith(label):
            return arg[len(label):]
    return ''
Exemplo n.º 16
0
    def __init__(self, logdir, method=sys_power.do_suspend,
                 throw=False, device_times=False, suspend_state=''):
        """
        Prepare environment for suspending.
        @param suspend_state: Suspend state to enter into. It can be
                              'mem' or 'freeze' or an empty string. If
                              the suspend state is an empty string,
                              system suspends to the default pref.
        """
        self.disconnect_3G_time = 0
        self.successes = []
        self.failures = []
        self._logdir = logdir
        self._suspend = method
        self._throw = throw
        self._reset_pm_print_times = False
        self._restart_tlsdated = False
        self._log_file = None
        self._suspend_state = suspend_state
        if device_times:
            self.device_times = []

        # stop tlsdated, make sure we/hwclock have /dev/rtc for ourselves
        if utils.system_output('initctl status tlsdated').find('start') != -1:
            utils.system('initctl stop tlsdated')
            self._restart_tlsdated = True
            # give process's file descriptors time to asynchronously tear down
            time.sleep(0.1)

        # prime powerd_suspend RTC timestamp saving and make sure hwclock works
        utils.open_write_close(self.HWCLOCK_FILE, '')
        hwclock_output = utils.system_output('hwclock -r --debug --utc',
                                             ignore_status=True)
        if not re.search('Using.*/dev interface to.*clock', hwclock_output):
            raise error.TestError('hwclock cannot find rtc: ' + hwclock_output)

        # activate device suspend timing debug output
        if hasattr(self, 'device_times'):
            if not int(utils.read_one_line('/sys/power/pm_print_times')):
                self._set_pm_print_times(True)
                self._reset_pm_print_times = True

        # Shut down 3G to remove its variability from suspend time measurements
        flim = flimflam.FlimFlam()
        service = flim.FindCellularService(0)
        if service:
            logging.info('Found 3G interface, disconnecting.')
            start_time = time.time()
            (success, status) = flim.DisconnectService(
                    service=service, wait_timeout=60)
            if success:
                logging.info('3G disconnected successfully.')
                self.disconnect_3G_time = time.time() - start_time
            else:
                logging.error('Could not disconnect: %s.', status)
                self.disconnect_3G_time = -1

        self._configure_suspend_state()
Exemplo n.º 17
0
    def get(self):
        """Get current keyboard brightness setting.

        Returns:
            float, percentage of keyboard brightness.
        """
        current = int(
            utils.read_one_line(os.path.join(self._path, 'brightness')))
        return (current * 100) / self._get_max()
        def product_matched(path):
            """Checks if the product field matches expected product name.

            @returns: True if the product name matches, False otherwise.

            """
            read_product_name = utils.read_one_line(path)
            logging.debug('Read product at %s = %s', path, read_product_name)
            return read_product_name == product_name
Exemplo n.º 19
0
    def query_devices(self):
        """."""
        dirs_path = '/sys/bus/usb/devices/*/power'
        dirs = glob.glob(dirs_path)
        if not dirs:
            logging.info('USB power path not found')
            return 1

        for dirpath in dirs:
            vid_path = os.path.join(dirpath, '..', 'idVendor')
            pid_path = os.path.join(dirpath, '..', 'idProduct')
            if not os.path.exists(vid_path):
                logging.debug("No vid for USB @ %s", vid_path)
                continue
            vid = utils.read_one_line(vid_path)
            pid = utils.read_one_line(pid_path)
            whitelisted = self._is_whitelisted(vid, pid)
            self.devices.append(USBDevicePower(vid, pid, whitelisted, dirpath))
Exemplo n.º 20
0
    def run(self):
        """Start the mock DNS server and redirect all queries to it."""
        self._thread.start()
        # Redirect all DNS queries to the mock DNS server.
        try:
            # Follow resolv.conf symlink.
            resolv = os.path.realpath(constants.RESOLV_CONF_FILE)
            # Grab path to the real file, do following work in that directory.
            resolv_dir = os.path.dirname(resolv)
            resolv_bak = os.path.join(resolv_dir, self._resolv_bak_file)
            resolv_contents = 'nameserver 127.0.0.1'
            # Test to make sure the current resolv.conf isn't already our
            # specially modified version.  If this is the case, we have
            # probably been interrupted while in the middle of this test
            # in a previous run.  The last thing we want to do at this point
            # is to overwrite a legitimate backup.
            if (utils.read_one_line(resolv) == resolv_contents and
                os.path.exists(resolv_bak)):
                logging.error('Current resolv.conf is setup for our local '
                              'server, and a backup already exists!  '
                              'Skipping the backup step.')
            else:
                # Back up the current resolv.conf.
                os.rename(resolv, resolv_bak)
            # To stop flimflam from editing resolv.conf while we're working
            # with it, we want to make the directory -r-xr-xr-x.  Open an
            # fd to the file first, so that we'll retain the ability to
            # alter it.
            resolv_fd = open(resolv, 'w')
            self._resolv_dir_mode = os.stat(resolv_dir).st_mode
            os.chmod(resolv_dir, (stat.S_IRUSR | stat.S_IXUSR |
                                  stat.S_IRGRP | stat.S_IXGRP |
                                  stat.S_IROTH | stat.S_IXOTH))
            resolv_fd.write(resolv_contents)
            resolv_fd.close()
            assert utils.read_one_line(resolv) == resolv_contents
        except Exception as e:
            logging.error(str(e))
            raise e

        utils.poll_for_condition(
            lambda: self.__attempt_resolve('www.google.com.', '127.0.0.1'),
            utils.TimeoutError('Timed out waiting for DNS changes.'),
            timeout=10)
Exemplo n.º 21
0
    def autosuspend(self):
        """Determine current value of USB autosuspend for device."""
        control_file = os.path.join(self._path, 'control')
        if not os.path.exists(control_file):
            logging.info('USB: power control file not found for %s', dir)
            return False

        out = utils.read_one_line(control_file)
        logging.debug('USB: control set to %s for %s', out, control_file)
        return (out == 'auto')
Exemplo n.º 22
0
def create_container_via_memcg(name, parent, bytes, cpus):
    # create container via direct memcg cgroup writes
    os.mkdir(full_path(name))
    nodes = utils.read_one_line(mems_path(parent))
    utils.write_one_line(mems_path(name), nodes)  # inherit parent's nodes
    utils.write_one_line(memory_path(name)+'.limit_in_bytes', str(bytes))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug('Created container %s directly via memcg,'
                  ' has %d cpus and %s bytes',
                  name, len(cpus), utils.human_format(container_bytes(name)))
    def is_tracing(self):
        """Is tracing on?

        Returns:
            True if tracing enabled and at least one event is enabled.
        """
        fname = os.path.join(self._TRACE_ROOT, 'tracing_on')
        result = int(utils.read_one_line(fname).strip())
        if result == 1 and len(self._events) > 0:
            return True
        return False
    def _verify_graphics_power_settings(self):
        """Verify that power-saving for graphics are configured properly.

        Returns:
            0 if no errors, otherwise the number of errors that occurred.
        """
        errors = 0

        if self._cpu_type in GFX_CHECKS:
            checks = GFX_CHECKS[self._cpu_type]
            for param_name in checks:
                param_path = '/sys/module/i915/parameters/%s' % param_name
                if not os.path.exists(param_path):
                    errors += 1
                    logging.error('Error(%d), %s not found', errors,
                                  param_path)
                else:
                    out = utils.read_one_line(param_path)
                    logging.debug('Graphics: %s = %s', param_path, out)
                    value = int(out)
                    if value != checks[param_name]:
                        errors += 1
                        logging.error('Error(%d), %s = %d but should be %d',
                                      errors, param_path, value,
                                      checks[param_name])
        errors += self._verify_lvds_downclock_mode_added()

        # On systems which support RC6 (non atom), check that we get into rc6;
        # idle before doing so, and retry every second for 20 seconds.
        if self._cpu_type == 'Non-Atom':
            tries = 0
            found = False
            while found == False and tries < 20:
                time.sleep(1)
                param_path = "/sys/kernel/debug/dri/0/i915_drpc_info"
                if not os.path.exists(param_path):
                    logging.error('Error(%d), %s not found', errors,
                                  param_path)
                    break
                drpc_info_file = open(param_path, "r")
                for line in drpc_info_file:
                    match = re.search(r'Current RC state: (.*)', line)
                    if match:
                        found = match.group(1) != 'on'
                        break

                tries += 1
                drpc_info_file.close()

            if not found:
                errors += 1
                logging.error('Error(%d), did not see the GPU in RC6', errors)

        return errors
Exemplo n.º 25
0
def create_container_via_memcg(name, parent, bytes, cpus):
    # create container via direct memcg cgroup writes
    os.mkdir(full_path(name))
    nodes = utils.read_one_line(mems_path(parent))
    utils.write_one_line(mems_path(name), nodes)  # inherit parent's nodes
    utils.write_one_line(memory_path(name) + '.limit_in_bytes', str(bytes))
    utils.write_one_line(cpus_path(name), ','.join(map(str, cpus)))
    logging.debug(
        'Created container %s directly via memcg,'
        ' has %d cpus and %s bytes', name, len(cpus),
        utils.human_format(container_bytes(name)))
Exemplo n.º 26
0
    def _cg_set_quota(self, quota=-1):
        """Set CPU quota that can be used for cgroup

        Default of -1 will disable throttling
        """
        utils.write_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.cfs_quota_us"), quota)
        rd_quota = utils.read_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.cfs_quota_us"))
        if rd_quota != quota:
            error.TestFail("Setting cpu quota to %d" % quota)
Exemplo n.º 27
0
    def _get_max(self):
        """Get maximum absolute value of keyboard brightness.

        Returns:
            integer, maximum value of keyboard brightness
        """
        if self._max is None:
            self._max = int(
                utils.read_one_line(os.path.join(self._path,
                                                 'max_brightness')))
        return self._max
Exemplo n.º 28
0
def container_bytes(name):
    if fake_numa_containers:
        return nodes_avail_mbytes(get_mem_nodes(name)) << 20
    else:
        while True:
            file = memory_path(name) + '.limit_in_bytes'
            limit = int(utils.read_one_line(file))
            if limit < NO_LIMIT:
                return limit
            if name == SUPER_ROOT:
                return root_container_bytes
            name = os.path.dirname(name)
Exemplo n.º 29
0
def container_bytes(name):
    if fake_numa_containers:
        return nodes_avail_mbytes(get_mem_nodes(name)) << 20
    else:
        while True:
            file = memory_path(name) + '.limit_in_bytes'
            limit = int(utils.read_one_line(file))
            if limit < NO_LIMIT:
                return limit
            if name == SUPER_ROOT:
                return root_container_bytes
            name = os.path.dirname(name)
Exemplo n.º 30
0
    def run_once(self, test_quota=True, test_shares=True):
        errors = 0
        if not os.path.exists(self._CG_CRB_DIR):
            raise error.TestError("Locating cgroup dir %s" % self._CG_CRB_DIR)

        self._quota = utils.read_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.cfs_quota_us"))
        self._shares = utils.read_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.shares"))
        if test_quota:
            self._cg_disable_throttling()
            quota_stats = self._cg_test_quota()
            errors += self._check_stats('quota', quota_stats, 0.9)

        if test_shares:
            self._cg_disable_throttling()
            shares_stats = self._cg_test_shares()
            errors += self._check_stats('shares', shares_stats, 0.6)

        if errors:
            error.TestFail("Cgroup bandwidth throttling not working")
    def readline(self, logdir):
        """Reads one line from the log.

        @param logdir: The log directory.
        @return A line from the log, or the empty string if the log doesn't
            exist.
        """
        path = os.path.join(logdir, self.logf)
        if os.path.exists(path):
            return utils.read_one_line(path)
        else:
            return ""
Exemplo n.º 32
0
    def end_reboot_and_verify(self,
                              expected_when,
                              expected_id,
                              subdir,
                              type='src',
                              patches=[]):
        """ Check the passed kernel identifier against the command line
            and the running kernel, abort the job on missmatch. """

        logging.info(
            "POST BOOT: checking booted kernel "
            "mark=%d identity='%s' type='%s'", expected_when, expected_id,
            type)

        running_id = utils.running_os_ident()

        cmdline = utils.read_one_line("/proc/cmdline")

        find_sum = re.compile(r'.*IDENT=(\d+)')
        m = find_sum.match(cmdline)
        cmdline_when = -1
        if m:
            cmdline_when = int(m.groups()[0])

        # We have all the facts, see if they indicate we
        # booted the requested kernel or not.
        bad = False
        if (type == 'src' and expected_id != running_id or type == 'rpm'
                and not running_id.startswith(expected_id + '::')):
            logging.error("Kernel identifier mismatch")
            bad = True
        if expected_when != cmdline_when:
            logging.error("Kernel command line mismatch")
            bad = True

        if bad:
            logging.error("   Expected Ident: " + expected_id)
            logging.error("    Running Ident: " + running_id)
            logging.error("    Expected Mark: %d", expected_when)
            logging.error("Command Line Mark: %d", cmdline_when)
            logging.error("     Command Line: " + cmdline)

            self._record_reboot_failure(subdir,
                                        "reboot.verify",
                                        "boot failure",
                                        running_id=running_id)
            raise error.JobError("Reboot returned with the wrong kernel")

        self.record('GOOD', subdir, 'reboot.verify',
                    utils.running_os_full_version())
        self.end_reboot(subdir, expected_id, patches, running_id=running_id)
Exemplo n.º 33
0
    def _cg_set_shares(self, shares=None):
        """Set CPU shares that can be used for cgroup

        Default of None reads total shares for cpu group and assigns that so
        there will be no throttling
        """
        if shares is None:
            shares = self._cg_total_shares()
        utils.write_one_line(os.path.join(self._CG_CRB_DIR, "cpu.shares"),
                             shares)
        rd_shares = utils.read_one_line(
            os.path.join(self._CG_CRB_DIR, "cpu.shares"))
        if rd_shares != shares:
            error.TestFail("Setting cpu shares to %d" % shares)
    def isRemovable(self, device):
        """
        Check if the block device is removable.

        Args:
            @param device: string, name of the block device.

        Returns:
            bool, True if device is removable.
        """

        # Construct a pathname to 'removable' for this device
        removable_file = os.path.join('/sys', 'block', device, 'removable')
        return int(utils.read_one_line(removable_file)) == 1
Exemplo n.º 35
0
 def _configure_suspend_state(self):
     """Configure the suspend state as requested."""
     if self._suspend_state:
         available_suspend_states = utils.read_one_line('/sys/power/state')
         if self._suspend_state not in available_suspend_states:
             raise error.TestNAError('Invalid suspend state: ' +
                                     self._suspend_state)
         # Check the current state. If it is same as the one requested,
         # we don't want to call PowerPrefChanger(restarts powerd).
         if self._suspend_state == power_utils.get_sleep_state():
             return
         should_freeze = '1' if self._suspend_state == 'freeze' else '0'
         new_prefs = {self._SUSPEND_STATE_PREF_FILE: should_freeze}
         self._power_pref_changer = power_utils.PowerPrefChanger(new_prefs)
Exemplo n.º 36
0
    def _init_cmdline(self, extra_copy_cmdline):
        """
        Initialize default cmdline for booted kernels in this job.
        """
        copy_cmdline = set(["console"])
        if extra_copy_cmdline is not None:
            copy_cmdline.update(extra_copy_cmdline)

        # extract console= and other args from cmdline and add them into the
        # base args that we use for all kernels we install
        cmdline = utils.read_one_line("/proc/cmdline")
        kernel_args = []
        for karg in cmdline.split():
            for param in copy_cmdline:
                if karg.startswith(param) and (len(param) == len(karg) or karg[len(param)] == "="):
                    kernel_args.append(karg)
        self.config_set("boot.default_args", " ".join(kernel_args))
Exemplo n.º 37
0
    def end_reboot_and_verify(self, expected_when, expected_id, subdir,
                              type='src', patches=[]):
        """ Check the passed kernel identifier against the command line
            and the running kernel, abort the job on missmatch. """

        logging.info("POST BOOT: checking booted kernel "
                     "mark=%d identity='%s' type='%s'",
                     expected_when, expected_id, type)

        running_id = utils.running_os_ident()

        cmdline = utils.read_one_line("/proc/cmdline")

        find_sum = re.compile(r'.*IDENT=(\d+)')
        m = find_sum.match(cmdline)
        cmdline_when = -1
        if m:
            cmdline_when = int(m.groups()[0])

        # We have all the facts, see if they indicate we
        # booted the requested kernel or not.
        bad = False
        if (type == 'src' and expected_id != running_id or
            type == 'rpm' and
            not running_id.startswith(expected_id + '::')):
            logging.error("Kernel identifier mismatch")
            bad = True
        if expected_when != cmdline_when:
            logging.error("Kernel command line mismatch")
            bad = True

        if bad:
            logging.error("   Expected Ident: " + expected_id)
            logging.error("    Running Ident: " + running_id)
            logging.error("    Expected Mark: %d", expected_when)
            logging.error("Command Line Mark: %d", cmdline_when)
            logging.error("     Command Line: " + cmdline)

            self._record_reboot_failure(subdir, "reboot.verify", "boot failure",
                                        running_id=running_id)
            raise error.JobError("Reboot returned with the wrong kernel")

        self.record('GOOD', subdir, 'reboot.verify',
                    utils.running_os_full_version())
        self.end_reboot(subdir, expected_id, patches, running_id=running_id)
Exemplo n.º 38
0
    def __init__(self, control, options, drop_caches=True,
                 extra_copy_cmdline=None):
        """
        Prepare a client side job object.

        @param control: The control file (pathname of).
        @param options: an object which includes:
                jobtag: The job tag string (eg "default").
                cont: If this is the continuation of this job.
                harness_type: An alternative server harness.  [None]
                use_external_logging: If true, the enable_external_logging
                          method will be called during construction.  [False]
        @param drop_caches: If true, utils.drop_caches() is called before and
                between all tests.  [True]
        @param extra_copy_cmdline: list of additional /proc/cmdline arguments to
                copy from the running kernel to all the installed kernels with
                this job
        """
        self.autodir = os.environ['AUTODIR']
        self.bindir = os.path.join(self.autodir, 'bin')
        self.libdir = os.path.join(self.autodir, 'lib')
        self.testdir = os.path.join(self.autodir, 'tests')
        self.configdir = os.path.join(self.autodir, 'config')
        self.site_testdir = os.path.join(self.autodir, 'site_tests')
        self.profdir = os.path.join(self.autodir, 'profilers')
        self.tmpdir = os.path.join(self.autodir, 'tmp')
        self.toolsdir = os.path.join(self.autodir, 'tools')
        self.resultdir = os.path.join(self.autodir, 'results', options.tag)

        if not os.path.exists(self.resultdir):
            os.makedirs(self.resultdir)

        if not options.cont:
            self._cleanup_results_dir()

        logging_manager.configure_logging(
                client_logging_config.ClientLoggingConfig(),
                results_dir=self.resultdir,
                verbose=options.verbose)
        logging.info('Writing results to %s', self.resultdir)

        self.drop_caches_between_iterations = False
        self.drop_caches = drop_caches
        if self.drop_caches:
            logging.debug("Dropping caches")
            utils.drop_caches()

        self.control = os.path.realpath(control)
        self._is_continuation = options.cont
        self.state_file = self.control + '.state'
        self.current_step_ancestry = []
        self.next_step_index = 0
        self.testtag = ''
        self._test_tag_prefix = ''

        self._load_state()
        self.pkgmgr = packages.PackageManager(
            self.autodir, run_function_dargs={'timeout':3600})
        self.pkgdir = os.path.join(self.autodir, 'packages')
        self.run_test_cleanup = self.get_state("__run_test_cleanup",
                                                default=True)

        self.sysinfo = sysinfo.sysinfo(self.resultdir)
        self._load_sysinfo_state()

        self.last_boot_tag = self.get_state("__last_boot_tag", default=None)
        self.tag = self.get_state("__job_tag", default=None)

        if not options.cont:
            """
            Don't cleanup the tmp dir (which contains the lockfile)
            in the constructor, this would be a problem for multiple
            jobs starting at the same time on the same client. Instead
            do the delete at the server side. We simply create the tmp
            directory here if it does not already exist.
            """
            if not os.path.exists(self.tmpdir):
                os.mkdir(self.tmpdir)

            if not os.path.exists(self.pkgdir):
                os.mkdir(self.pkgdir)

            results = os.path.join(self.autodir, 'results')
            if not os.path.exists(results):
                os.mkdir(results)

            download = os.path.join(self.testdir, 'download')
            if not os.path.exists(download):
                os.mkdir(download)

            os.makedirs(os.path.join(self.resultdir, 'analysis'))

            shutil.copyfile(self.control,
                            os.path.join(self.resultdir, 'control'))


        self.control = control
        self.jobtag = options.tag
        self.log_filename = self.DEFAULT_LOG_FILENAME

        self.logging = logging_manager.get_logging_manager(
                manage_stdout_and_stderr=True, redirect_fds=True)
        self.logging.start_logging()

        self._init_group_level()

        self.config = config.config(self)
        self.harness = harness.select(options.harness, self)
        self.profilers = profilers.profilers(self)

        try:
            tool = self.config_get('boottool.executable')
            self.bootloader = boottool.boottool(tool)
        except:
            pass

        self.sysinfo.log_per_reboot_data()

        if not options.cont:
            self.record('START', None, None)
            self._increment_group_level()

        self.harness.run_start()

        if options.log:
            self.enable_external_logging()

        # load the max disk usage rate - default to no monitoring
        self.max_disk_usage_rate = self.get_state('__monitor_disk', default=0.0)

        copy_cmdline = set(['console'])
        if extra_copy_cmdline is not None:
            copy_cmdline.update(extra_copy_cmdline)

        # extract console= and other args from cmdline and add them into the
        # base args that we use for all kernels we install
        cmdline = utils.read_one_line('/proc/cmdline')
        kernel_args = []
        for karg in cmdline.split():
            for param in copy_cmdline:
                if karg.startswith(param) and \
                    (len(param) == len(karg) or karg[len(param)] == '='):
                    kernel_args.append(karg)
        self.config_set('boot.default_args', ' '.join(kernel_args))
Exemplo n.º 39
0
 def readline(self, logdir):
     path = os.path.join(logdir, self.logf)
     if os.path.exists(path):
         return utils.read_one_line(path)
     else:
         return ""
Exemplo n.º 40
0
def get_cpus(container_name):
    file_name = cpus_path(container_name)
    if os.path.exists(file_name):
        return rangelist_to_set(utils.read_one_line(file_name))
    else:
        return set()
Exemplo n.º 41
0
def my_container_name():
    # Get current process's inherited or self-built container name
    #   within /dev/cpuset or /dev/cgroup.  Is '' for root container.
    name = utils.read_one_line('/proc/%i/cpuset' % os.getpid())
    return name[1:]   # strip leading /