def __init__(self, job): """ job The job object for this job """ self.autodir = os.path.abspath(os.environ['AUTODIR']) self.setup(job) src = job.control_get() dest = os.path.join(self.autodir, 'control') if os.path.abspath(src) != os.path.abspath(dest): shutil.copyfile(src, dest) job.control_set(dest) logging.info('Symlinking init scripts') rc = os.path.join(self.autodir, 'tools/autotest') # see if system supports event.d versus inittab if os.path.exists('/etc/event.d'): # NB: assuming current runlevel is default initdefault = utils.system_output('/sbin/runlevel').split()[1] elif os.path.exists('/etc/inittab'): initdefault = utils.system_output('grep :initdefault: /etc/inittab') initdefault = initdefault.split(':')[1] else: initdefault = '2' try: utils.system('ln -sf %s /etc/init.d/autotest' % rc) utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % (rc,initdefault)) except: logging.warning("Linking init scripts failed")
def __init__(self, job): """ job The job object for this job """ self.autodir = os.path.abspath(os.environ['AUTODIR']) self.setup(job) src = job.control_get() dest = os.path.join(self.autodir, 'control') if os.path.abspath(src) != os.path.abspath(dest): shutil.copyfile(src, dest) job.control_set(dest) logging.info('Symlinking init scripts') rc = os.path.join(self.autodir, 'tools/autotest') # see if system supports event.d versus inittab if os.path.exists('/etc/event.d'): # NB: assuming current runlevel is default initdefault = utils.system_output('/sbin/runlevel').split()[1] elif os.path.exists('/etc/inittab'): initdefault = utils.system_output( 'grep :initdefault: /etc/inittab') initdefault = initdefault.split(':')[1] else: initdefault = '2' try: utils.system('ln -sf %s /etc/init.d/autotest' % rc) utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % (rc, initdefault)) except: logging.warning("Linking init scripts failed")
def check_audio_stream_at_selected_device(device_name, device_type): """Checks the audio output at expected node @param device_name: Audio output device name, Ex: kbl_r5514_5663_max: :0,1 @param device_type: Audio output device type, Ex: INTERNAL_SPEAKER """ if device_type == 'BLUETOOTH': output_device_output = utils.system_output(OUTPUT_DEVICE_CMD).strip() bt_device = output_device_output.split('Output dev:')[1].strip() if bt_device != device_name: raise error.TestFail("Audio is not routing through expected node") logging.info('Audio is routing through %s', bt_device) else: card_device_search = re.search(r':(\d),(\d)', device_name) if card_device_search: card_num = card_device_search.group(1) device_num = card_device_search.group(2) logging.debug("Sound card number is %s", card_num) logging.debug("Device number is %s", device_num) if card_num is None or device_num is None: raise error.TestError( "Audio device name is not in expected format") device_status_output = utils.system_output(AUDIO_DEVICE_STATUS_CMD % (card_num, device_num)) logging.debug("Selected output device status is %s", device_status_output) if 'RUNNING' in device_status_output: logging.info("Audio is routing through expected node!") elif 'closed' in device_status_output: raise error.TestFail("Audio is not routing through expected audio " "node!") else: raise error.TestError("Audio routing error! Device may be " "preparing")
def _test_server_unavailable(self): """ metrics_daemon should not crash when the server is unavailable. """ self._create_one_sample() utils.system_output('metrics_daemon -uploader_test ' '-server="http://localhost:12345"', retain_output=True)
def xfs_tunables(dev): """Call xfs_grow -n to get filesystem tunables.""" # Have to mount the filesystem to call xfs_grow. tmp_mount_dir = tempfile.mkdtemp() cmd = 'mount %s %s' % (dev, tmp_mount_dir) utils.system_output(cmd) xfs_growfs = os.path.join(os.environ['AUTODIR'], 'tools', 'xfs_growfs') cmd = '%s -n %s' % (xfs_growfs, dev) try: out = utils.system_output(cmd) finally: # Clean. cmd = 'umount %s' % dev utils.system_output(cmd, ignore_status=True) os.rmdir(tmp_mount_dir) ## The output format is given in report_info (xfs_growfs.c) ## "meta-data=%-22s isize=%-6u agcount=%u, agsize=%u blks\n" ## " =%-22s sectsz=%-5u attr=%u\n" ## "data =%-22s bsize=%-6u blocks=%llu, imaxpct=%u\n" ## " =%-22s sunit=%-6u swidth=%u blks\n" ## "naming =version %-14u bsize=%-6u\n" ## "log =%-22s bsize=%-6u blocks=%u, version=%u\n" ## " =%-22s sectsz=%-5u sunit=%u blks, lazy-count=%u\n" ## "realtime =%-22s extsz=%-6u blocks=%llu, rtextents=%llu\n" tune2fs_dict = {} # Flag for extracting naming version number keep_version = False for line in out.splitlines(): m = re.search('^([-\w]+)', line) if m: main_tag = m.group(1) pairs = line.split() for pair in pairs: # naming: version needs special treatment if pair == '=version': # 1 means the next pair is the version number we want keep_version = True continue if keep_version: tune2fs_dict['naming: version'] = pair # Resets the flag since we have logged the version keep_version = False continue # Ignores the strings without '=', such as 'blks' if '=' not in pair: continue key, value = pair.split('=') tagged_key = '%s: %s' % (main_tag, key) if re.match('[0-9]+', value): tune2fs_dict[tagged_key] = int(value.rstrip(',')) else: tune2fs_dict[tagged_key] = value.rstrip(',') return tune2fs_dict
def log_test_keyvals(self, test_sysinfodir): """Generate keyval for the sysinfo. Collects keyval entries to be written in the test keyval. @param test_sysinfodir: The test's system info directory. """ keyval = super(site_sysinfo, self).log_test_keyvals(test_sysinfodir) lsb_lines = utils.system_output( "cat /etc/lsb-release", ignore_status=True).splitlines() lsb_dict = dict(item.split("=") for item in lsb_lines) for lsb_key in lsb_dict.keys(): # Special handling for build number if lsb_key == "CHROMEOS_RELEASE_DESCRIPTION": keyval["CHROMEOS_BUILD"] = ( lsb_dict[lsb_key].rstrip(")").split(" ")[3]) keyval[lsb_key] = lsb_dict[lsb_key] # Get the hwid (hardware ID), if applicable. try: keyval["hwid"] = utils.system_output('crossystem hwid') except error.CmdError: # The hwid may not be available (e.g, when running on a VM). # If the output of 'crossystem mainfw_type' is 'nonchrome', then # we expect the hwid to not be avilable, and we can proceed in this # case. Otherwise, the hwid is missing unexpectedly. mainfw_type = utils.system_output('crossystem mainfw_type') if mainfw_type == 'nonchrome': logging.info( 'HWID not available; not logging it as a test keyval.') else: logging.exception('HWID expected but could not be identified; ' 'output of "crossystem mainfw_type" is "%s"', mainfw_type) raise # Get the chrome version and milestone numbers. keyval["CHROME_VERSION"], keyval["MILESTONE"] = ( self._get_chrome_version()) # TODO(kinaba): crbug.com/707448 Import at the head of this file. # Currently a server-side script server/server_job.py is indirectly # importing this file, so we cannot globaly import cryptohome that # has dependency to a client-only library. from autotest_lib.client.cros import cryptohome # Get the dictionary attack counter. keyval["TPM_DICTIONARY_ATTACK_COUNTER"] = ( cryptohome.get_tpm_more_status().get( 'dictionary_attack_counter', 'Failed to query cryptohome')) # Return the updated keyvals. return keyval
def get_vmlinux(): """Return the full path to vmlinux Ahem. This is crap. Pray harder. Bad Martin. """ vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r') if os.path.isfile(vmlinux): return vmlinux vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r') if os.path.isfile(vmlinux): return vmlinux return None
def get_systemmap(): """Return the full path to System.map Ahem. This is crap. Pray harder. Bad Martin. """ map = '/boot/System.map-%s' % utils.system_output('uname -r') if os.path.isfile(map): return map map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r') if os.path.isfile(map): return map return None
def get_vmlinux(): """Return the full path to vmlinux Ahem. This is crap. Pray harder. Bad Martin. """ vmlinux = "/boot/vmlinux-%s" % utils.system_output("uname -r") if os.path.isfile(vmlinux): return vmlinux vmlinux = "/lib/modules/%s/build/vmlinux" % utils.system_output("uname -r") if os.path.isfile(vmlinux): return vmlinux return None
def restart_job(service_name): """ Restarts an upstart job if it's running. If it's not running, start it. @param service_name: name of service """ if is_running(service_name): utils.system_output('restart %s' % service_name) else: utils.system_output('start %s' % service_name)
def get_systemmap(): """Return the full path to System.map Ahem. This is crap. Pray harder. Bad Martin. """ map = "/boot/System.map-%s" % utils.system_output("uname -r") if os.path.isfile(map): return map map = "/lib/modules/%s/build/System.map" % utils.system_output("uname -r") if os.path.isfile(map): return map return None
def _test_simple_upload(self): self._create_one_sample() self.server = FakeServer() self.server.Start() utils.system_output('metrics_daemon -uploader_test ' '-server="%s"' % SERVER_ADDRESS, timeout=10, retain_output=True) self.server.Stop() if len(self.server.messages) != 1: raise error.TestFail('no messages received by the server')
def _device_release(self, cmd, device): if utils.system(cmd, ignore_status=True) == 0: return logging.warning("Could not release %s. Retrying..." % (device)) # Other things (like cros-disks) may have the device open briefly, # so if we initially fail, try again and attempt to gather details # on who else is using the device. fuser = utils.system_output('fuser -v %s' % (device), retain_output=True) lsblk = utils.system_output('lsblk %s' % (device), retain_output=True) time.sleep(1) if utils.system(cmd, ignore_status=True) == 0: return raise error.TestFail('"%s" failed: %s\n%s' % (cmd, fuser, lsblk))
def get_running_processes(self): """Returns a list of running processes as PsOutput objects.""" usermax = utils.system_output("cut -d: -f1 /etc/passwd | wc -L", ignore_status=True) usermax = max(int(usermax), 8) ps_cmd = "ps --no-headers -ww -eo " + (PS_FIELDS % (usermax, usermax)) ps_fields_len = len(PS_FIELDS.split(',')) output = utils.system_output(ps_cmd) # crbug.com/422700: Filter out zombie processes. running_processes = [ PsOutput(*line.split(None, ps_fields_len - 1)) for line in output.splitlines() if "<defunct>" not in line ] return running_processes
def run_once(self): """Entry point of this test.""" # ID of the kiosk app to start. kId = 'afhcomalholahplbjhnmahkoekoijban' self.DEVICE_POLICIES = { 'DeviceLocalAccounts': [{ 'account_id': kId, 'kiosk_app': { 'app_id': kId }, 'type': 1 }], 'DeviceLocalAccountAutoLoginId': kId } self.setup_case(device_policies=self.DEVICE_POLICIES, enroll=True, kiosk_mode=True, auto_login=False) running_apps = utils.system_output( 'cat /var/log/messages | grep kiosk') # Currently this is the best way I can think of to check if DUT entered # kiosk mode. This isn't ideal but it's better than what we have now. # TODO(rzakarian): Find another way to verify that kiosk mode is up. # crbug.com/934500. if KIOSK_MODE not in running_apps: raise error.TestFail( 'DUT did not enter kiosk mode. and it should have.')
def probe_cpus(): """ This routine returns a list of cpu devices found under /sys/devices/system/cpu. """ cmd = "find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*" return utils.system_output(cmd).splitlines()
def get_oldest_pid_by_name(name): """ Return the oldest pid of a process whose name perfectly matches |name|. name is an egrep expression, which will be matched against the entire name of processes on the system. For example: get_oldest_pid_by_name('chrome') on a system running 8600 ? 00:00:04 chrome 8601 ? 00:00:00 chrome 8602 ? 00:00:00 chrome-sandbox would return 8600, as that's the oldest process that matches. chrome-sandbox would not be matched. Arguments: name: egrep expression to match. Will be anchored at the beginning and end of the match string. Returns: pid as an integer, or None if one cannot be found. Raises: ValueError if pgrep returns something odd. """ str_pid = utils.system_output('pgrep -o ^%s$' % name, ignore_status=True).rstrip() if str_pid: return int(str_pid)
def module_is_loaded(module_name): module_name = module_name.replace('-', '_') modules = utils.system_output('/sbin/lsmod').splitlines() for module in modules: if module.startswith(module_name) and module[len(module_name)] == ' ': return True return False
def get_process_list(name, command_line=None): """ Return the list of pid for matching process |name command_line|. on a system running 31475 ? 0:06 /opt/google/chrome/chrome --allow-webui-compositing - 31478 ? 0:00 /opt/google/chrome/chrome-sandbox /opt/google/chrome/ 31485 ? 0:00 /opt/google/chrome/chrome --type=zygote --log-level=1 31532 ? 1:05 /opt/google/chrome/chrome --type=renderer get_process_list('chrome') would return ['31475', '31485', '31532'] get_process_list('chrome', '--type=renderer') would return ['31532'] Arguments: name: process name to search for. If command_line is provided, name is matched against full command line. If command_line is not provided, name is only matched against the process name. command line: when command line is passed, the full process command line is used for matching. Returns: list of PIDs of the matching processes. """ # TODO(rohitbm) crbug.com/268861 flag = '-x' if not command_line else '-f' name = '\'%s.*%s\'' % (name, command_line) if command_line else name str_pid = utils.system_output('pgrep %s %s' % (flag, name), ignore_status=True).rstrip() return str_pid.split()
def get_root_device(): """ Return root device. Will return correct disk device even system boot from /dev/dm-0 Example: return /dev/sdb for falco booted from usb """ return utils.system_output('rootdev -s -d')
def module_is_loaded(module_name): module_name = module_name.replace("-", "_") modules = utils.system_output("/sbin/lsmod").splitlines() for module in modules: if module.startswith(module_name) and module[len(module_name)] == " ": return True return False
def check_kernel_ver(ver): kernel_ver = utils.system_output('uname -r') kv_tmp = re.split(r'[-]', kernel_ver)[0:3] # In compare_versions, if v1 < v2, return value == -1 if utils.compare_versions(kv_tmp[0], ver) == -1: raise error.TestError("Kernel too old (%s). Kernel > %s is needed." % (kernel_ver, ver))
def apply_patch(self, patch): """ Apply a patch to the code base. Patches are expected to be made using level -p1, and taken according to the code base top level. @param patch: Path to the patch file. """ try: utils.system_output("patch -p1 < %s" % patch) except: logging.error("Patch applied incorrectly. Possible causes: ") logging.error("1 - Patch might not be -p1") logging.error("2 - You are not at the top of the autotest tree") logging.error("3 - Patch was made using an older tree") logging.error("4 - Mailer might have messed the patch") sys.exit(1)
def _prepare_test(self): '''Prepare test: check initial conditions and set variables.''' ext_temp_path = utils.system_output('find /sys -name %s' % EXT_TEMP_SENSOR_FILE).splitlines() if len(ext_temp_path) != 1: raise error.TestError('found %d sensor files' % len(ext_temp_path)) self.temperature_data_path = os.path.dirname(ext_temp_path[0]) self.stop_all_workers = False self.pl_desc = self._get_platform_descriptor() # Verify CPU frequency is in range. self._check_freq() # Make sure we are not yet throttling. if self._throttle_count(): raise error.TestError('Throttling active before test started') # Remember throttling level setting before test started. self.preserved_throttle_limit = self._throttle_limit() if self.preserved_throttle_limit - self._cpu_temp() < 4 * DELTA: raise error.TestError('Target is too hot: %s C' % str(self._cpu_temp())) # list to keep track of threads started to heat up CPU. self.worker_threads = [] # Dictionary of saved cores' scaling governor settings. self.saved_governors = {} self.register_after_iteration_hook(clean_up)
def run_once(self, test_which='Mains'): # This test doesn't apply to systems that run on AC only. cmd = "mosys psu type" if utils.system_output(cmd, ignore_status=True).strip() == "AC_only": return ac_paths = [] bat_paths = [] # Gather power supplies for path in glob.glob(power_ProbeDriver.power_supply_path): type_path = os.path.join(path, 'type') if not os.path.exists(type_path): continue # With the advent of USB Type-C, mains power might show up as # one of several variants of USB. psu_type = utils.read_one_line(type_path) if any([ psu_type == 'Mains', psu_type == 'USB', psu_type == 'USB_DCP', psu_type == 'USB_CDP', psu_type == 'USB_TYPE_C', psu_type == 'USB_PD', psu_type == 'USB_PD_DRP' ]): ac_paths.append(path) elif psu_type == 'Battery': bat_paths.append(path) run_dict = {'Mains': self.run_ac, 'Battery': self.run_bat} run = run_dict.get(test_which) if run: run(ac_paths, bat_paths) else: raise error.TestNAError('Unknown test type: %s' % test_which)
def probe_cpus(): """ This routine returns a list of cpu devices found under /sys/devices/system/cpu. """ cmd = 'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*' return utils.system_output(cmd).splitlines()
def get_usbdevice_type_and_serial(device): """Get USB device type and Serial number @param device: USB device mount point Example: /dev/sda or /dev/sdb @return: Returns the information about USB type and the serial number of the device """ usb_info_list = [] # Getting the USB type and Serial number info using 'lsusb -v'. Sample # output is shown in below # Device Descriptor: # bcdUSB 2.00 # iSerial 3 131BC7 # bcdUSB 2.00 # Device Descriptor: # bcdUSB 2.10 # iSerial 3 001A4D5E8634B03169273995 lsusb_output = utils.system_output(LSUSB_CMD) # we are parsing each line and getting the usb info for line in lsusb_output.splitlines(): desc_matched = re.search(DESC_PATTERN, line) bcdusb_matched = re.search(BCDUSB_PATTERN, line) iserial_matched = re.search(ISERIAL_PATTERN, line) if desc_matched: usb_info = {} elif bcdusb_matched: # bcdUSB may appear multiple time. Drop the remaining. usb_info['bcdUSB'] = bcdusb_matched.group(1) elif iserial_matched: usb_info['iSerial'] = iserial_matched.group(1) usb_info_list.append(usb_info) logging.debug('lsusb output is %s', usb_info_list) # Comparing the lsusb serial number with udev output serial number # Both serial numbers should be same. Sample udev command output is # shown in below. # ATTRS{serial}=="001A4D5E8634B03169273995" udev_serial_output = utils.system_output(UDEV_CMD_FOR_SERIAL_NUMBER % device) udev_serial_matched = re.search(UDEV_SERIAL_PATTERN, udev_serial_output) if udev_serial_matched: udev_serial = udev_serial_matched.group(1) logging.debug("udev serial number is %s", udev_serial) for usb_details in usb_info_list: if usb_details['iSerial'] == udev_serial: return usb_details.get('bcdUSB'), udev_serial return None, None
def get_modified_files(self): status = utils.system_output("git status --porcelain") modified_files = [] for line in status.split("\n"): status_flag = line[0] if line and status_flag == "M" or status_flag == "A": modified_files.append(line[1:].strip()) return modified_files
def get_modified_files(self): status = utils.system_output("svn status --ignore-externals") modified_files = [] for line in status.split("\n"): status_flag = line[0] if line and status_flag == "M" or status_flag == "A": modified_files.append(line[1:].strip()) return modified_files
def get_cpu_family(): procinfo = utils.system_output('cat /proc/cpuinfo') CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M) matches = CPU_FAMILY_RE.findall(procinfo) if matches: return int(matches[0]) else: raise error.TestError('Could not get valid cpu family data')
def _test_metrics_disabled(self): """ When metrics are disabled, nothing should get uploaded. """ self._create_one_sample() self.server = FakeServer() self.server.Start() utils.system_output('metrics_daemon -uploader_test ' '-server="%s"' % SERVER_ADDRESS, timeout=10, retain_output=True) self.server.Stop() if len(self.server.messages) != 0: raise error.TestFail('message received by the server')
def get_cpu_family(): procinfo = utils.system_output("cat /proc/cpuinfo") CPU_FAMILY_RE = re.compile(r"^cpu family\s+:\s+(\S+)", re.M) matches = CPU_FAMILY_RE.findall(procinfo) if matches: return int(matches[0]) else: raise error.TestError("Could not get valid cpu family data")
def is_running(service_name): """ Returns true if |service_name| is running. @param service_name: name of service """ return utils.system_output( 'status %s' % service_name).find('start/running') != -1
def running_config(): """ Return path of config file of the currently running kernel """ version = utils.system_output("uname -r") for config in ("/proc/config.gz", "/boot/config-%s" % version, "/lib/modules/%s/build/.config" % version): if os.path.isfile(config): return config return None
def get_mem_status(params, role): if role == "host": info = utils.system_output("cat /proc/meminfo") else: info = session.cmd("cat /proc/meminfo") for h in re.split("\n+", info): if h.startswith("%s" % params): output = re.split('\s+', h)[1] return output
def _hash_image(self): """runs verity over the image and saves the device mapper table""" self.table = utils.system_output( self.verity_cmd % (self.alg, self.file, self.blocks, self.hash_file)) # The verity tool doesn't include a templated error value. # For now, we add one. self.table += " error_behavior=ERROR_BEHAVIOR" logging.info("table is %s" % self.table)
def get_unknown_files(self): status = utils.system_output("svn status --ignore-externals") unknown_files = [] for line in status.split("\n"): status_flag = line[0] if line and status_flag == "?": for extension in self.ignored_extension_list: if not line.endswith(extension): unknown_files.append(line[1:].strip()) return unknown_files
def get_new_files(self): """ Implement source.get_new_files by using rsync listing feature. """ files = {} for src, prefix in self.sources: output = utils.system_output(self._cmd_template % (self.exclude, self.prefix, src)) files.update(self._parse_output(output, prefix)) return self._get_new_files(files)
def get_unknown_files(self): status = utils.system_output("git status --porcelain") unknown_files = [] for line in status.split("\n"): status_flag = line[0] if line and status_flag == "??": for extension in self.ignored_extension_list: if not line.endswith(extension): unknown_files.append(line[2:].strip()) return unknown_files
def running_config(): """ Return path of config file of the currently running kernel """ version = utils.system_output('uname -r') for config in ('/proc/config.gz', \ '/boot/config-%s' % version, '/lib/modules/%s/build/.config' % version): if os.path.isfile(config): return config return None
def __init__(self, job, harness_args): """ job The job object for this job """ self.autodir = os.path.abspath(os.environ['AUTODIR']) self.setup(job) src = job.control_get() dest = os.path.join(self.autodir, 'control') if os.path.abspath(src) != os.path.abspath(dest): shutil.copyfile(src, dest) job.control_set(dest) logging.debug("Symlinking init scripts") rc = os.path.join(self.autodir, 'tools/autotest') # see if system supports event.d versus systemd versus inittab supports_eventd = os.path.exists('/etc/event.d') supports_systemd = os.path.exists('/etc/systemd') supports_inittab = os.path.exists('/etc/inittab') if supports_eventd or supports_systemd: # NB: assuming current runlevel is default initdefault = utils.system_output('/sbin/runlevel').split()[1] elif supports_inittab: initdefault = utils.system_output('grep :initdefault: /etc/inittab') initdefault = initdefault.split(':')[1] else: initdefault = '2' try: service = '/etc/init.d/autotest' service_link = '/etc/rc%s.d/S99autotest' % initdefault if os.path.islink(service): os.remove(service) if os.path.islink(service_link): os.remove(service_link) os.symlink(rc, service) os.symlink(rc, service_link) except Exception, e: logging.error("Symlink init scripts failed with %s", e)
def stop(self, test): try: term_profiler = "kill -15 %d" % self.pid # send SIGTERM to iostat and give it a 5-sec timeout utils.system(term_profiler, timeout=5) except error.CmdError: # probably times out pass # do a ps again to see if iostat is still there ps_cmd = "ps -p %d | grep iostat" % self.pid out = utils.system_output(ps_cmd, ignore_status=True) if out != '': kill_profiler = 'kill -9 %d' % self.pid utils.system(kill_profiler, ignore_status=True)
def ext_tunables(dev): """Call tune2fs -l and parse the result.""" cmd = 'tune2fs -l %s' % dev try: out = utils.system_output(cmd) except error.CmdError: tools_dir = os.path.join(os.environ['AUTODIR'], 'tools') cmd = '%s/tune2fs.ext4dev -l %s' % (tools_dir, dev) out = utils.system_output(cmd) # Load option mappings tune2fs_dict = {} for line in out.splitlines(): components = line.split(':', 1) if len(components) == 2: value = components[1].strip() option = components[0] if value.isdigit(): tune2fs_dict[option] = int(value) else: tune2fs_dict[option] = value return tune2fs_dict
def check_for_kernel_feature(feature): config = running_config() if not config: raise TypeError("Can't find kernel config file") if magic.guess_type(config) == 'application/x-gzip': grep = 'zgrep' else: grep = 'grep' grep += ' ^CONFIG_%s= %s' % (feature, config) if not utils.system_output(grep, ignore_status=True): raise ValueError("Kernel doesn't have a %s feature" % (feature))
def check_for_kernel_feature(feature): config = running_config() if not config: raise TypeError("Can't find kernel config file") if config.endswith(".gz"): grep = "zgrep" else: grep = "grep" grep += " ^CONFIG_%s= %s" % (feature, config) if not utils.system_output(grep, ignore_status=True): raise ValueError("Kernel doesn't have a %s feature" % (feature))
def main(): coverage = os.path.join(root, "contrib/coverage.py") unittest_suite = os.path.join(root, "unittest_suite.py") # remove preceeding coverage data cmd = "%s -e" % (coverage) utils.system_output(cmd) # run unittest_suite through coverage analysis cmd = "%s -x %s" % (coverage, unittest_suite) utils.system_output(cmd) # now walk through directory grabbing lits of files module_strings = [] for dirpath, dirnames, files in os.walk(root): if is_valid_directory(dirpath): for f in files: if is_valid_filename(f): temp = os.path.join(dirpath, f) module_strings.append(temp) # analyze files cmd = "%s -r -m %s" % (coverage, " ".join(module_strings)) utils.system(cmd)
def rebase_test(cmd): """ Subcommand 'qemu-img rebase' test Change the backing file of a snapshot image in "unsafe mode": Assume the previous backing file had missed and we just have to change reference of snapshot to new one. After change the backing file of a snapshot image in unsafe mode, the snapshot should work still. @param cmd: qemu-img base command. """ if not 'rebase' in utils.system_output(cmd + ' --help', ignore_status=True): raise error.TestNAError("Current kvm user space version does not" " support 'rebase' subcommand") sn_fmt = params.get("snapshot_format", "qcow2") sn1 = params.get("image_name_snapshot1") sn1 = kvm_utils.get_path(test.bindir, sn1) + ".%s" % sn_fmt base_img = kvm_vm.get_image_filename(params, test.bindir) _create(cmd, sn1, sn_fmt, base_img=base_img, base_img_fmt=image_format) # Create snapshot2 based on snapshot1 sn2 = params.get("image_name_snapshot2") sn2 = kvm_utils.get_path(test.bindir, sn2) + ".%s" % sn_fmt _create(cmd, sn2, sn_fmt, base_img=sn1, base_img_fmt=sn_fmt) rebase_mode = params.get("rebase_mode") if rebase_mode == "unsafe": os.remove(sn1) _rebase(cmd, sn2, base_img, image_format, mode=rebase_mode) # Check sn2's format and backing_file actual_base_img = _info(cmd, sn2, "backing file") base_img_name = os.path.basename(params.get("image_name")) if not base_img_name in actual_base_img: raise error.TestFail("After rebase the backing_file of 'sn2' is " "'%s' which is not expected as '%s'" % (actual_base_img, base_img_name)) s, o = _check(cmd, sn2) if not s: raise error.TestFail("Check image '%s' failed after rebase;" "got error: %s" % (sn2, o)) try: os.remove(sn2) os.remove(sn1) except: pass
def _check(cmd, img): """ Simple 'qemu-img check' function implementation. @param cmd: qemu-img base command. @param img: image to be checked """ cmd += " check %s" % img logging.info("Checking image '%s'...", img) try: output = utils.system_output(cmd) except error.CmdError, e: if "does not support checks" in str(e): return (True, "") else: return (False, str(e))
def get_hwclock_seconds(utc=True): """ Return the hardware clock in seconds as a floating point value. Use Coordinated Universal Time if utc is True, local time otherwise. Raise a ValueError if unable to read the hardware clock. """ cmd = "/sbin/hwclock --debug" if utc: cmd += " --utc" hwclock_output = utils.system_output(cmd, ignore_status=True) match = re.search(r"= ([0-9]+) seconds since .+ (-?[0-9.]+) seconds$", hwclock_output, re.DOTALL) if match: seconds = int(match.group(1)) + float(match.group(2)) logging.debug("hwclock seconds = %f" % seconds) return seconds raise ValueError("Unable to read the hardware clock -- " + hwclock_output)
def unload_module(module_name): """ Removes a module. Handles dependencies. If even then it's not possible to remove one of the modules, it will trhow an error.CmdError exception. @param module_name: Name of the module we want to remove. """ l_raw = utils.system_output("/sbin/lsmod").splitlines() lsmod = [x for x in l_raw if x.split()[0] == module_name] if len(lsmod) > 0: line_parts = lsmod[0].split() if len(line_parts) == 4: submodules = line_parts[3].split(",") for submodule in submodules: unload_module(submodule) utils.system("/sbin/modprobe -r %s" % module_name) logging.info("Module %s unloaded" % module_name) else: logging.info("Module %s is already unloaded" % module_name)
def _info(cmd, img, sub_info=None, fmt=None): """ Simple wrapper of 'qemu-img info'. @param cmd: qemu-img base command. @param img: image file @param sub_info: sub info, say 'backing file' @param fmt: image format """ cmd += " info" if fmt: cmd += " -f %s" % fmt cmd += " %s" % img try: output = utils.system_output(cmd) except error.CmdError, e: logging.error("Get info of image '%s' failed: %s", img, str(e)) return None
def _check_indent(self): """ Verifies the file with reindent.py. This tool performs the following checks on python files: * Trailing whitespaces * Tabs * End of line * Incorrect indentation For the purposes of checking, the dry run mode is used and no changes are made. It is up to the user to decide if he wants to run reindent to correct the issues. """ reindent_raw = utils.system_output('reindent.py -v -d %s | head -1' % self.path) reindent_results = reindent_raw.split(" ")[-1].strip(".") if reindent_results == "changed": if self.basename not in self.indentation_exceptions: self.corrective_actions.append("reindent.py -v %s" % self.path)
def match_xfs_options(dev, needed_options): """Compare the current ext* filesystem tunables with needed ones.""" tmp_mount_dir = tempfile.mkdtemp() cmd = 'mount %s %s' % (dev, tmp_mount_dir) utils.system_output(cmd) xfs_growfs = os.path.join(os.environ['AUTODIR'], 'tools', 'xfs_growfs') cmd = '%s -n %s' % (xfs_growfs, dev) try: current_option = utils.system_output(cmd) finally: # Clean. cmd = 'umount %s' % dev utils.system_output(cmd, ignore_status=True) os.rmdir(tmp_mount_dir) # '-N' has the same effect as '-n' in mkfs.ext*. Man mkfs.xfs for details. cmd = 'mkfs.xfs %s -N -f %s' % (needed_options, dev) needed_out = utils.system_output(cmd) # 'mkfs.xfs -N' produces slightly different result than 'xfs_growfs -n' needed_out = re.sub('internal log', 'internal ', needed_out) if current_option == needed_out: return True else: return False