def run_once(self): if os.geteuid() != 0: raise error.TestNAError('This test needs to be run under root') for path in [self._CGPT_PATH, self._ROOTDEV_PATH]: if not os.path.isfile(path): raise error.TestNAError('%s not found' % path) root_device = bin_utils.get_root_device() if not root_device: raise error.TestNAError('Could not find the root device') logging.debug('Root device: %s' % root_device) root_partitions = self.get_root_partitions(root_device) if not root_partitions: raise error.TestNAError('Could not find any root partition') logging.debug('Root partitions: %s' % ', '.join(root_partitions)) processes = self.get_process_list([self._UPDATE_ENGINE_PATH]) if not processes: raise error.TestNAError('Could not find any process') logging.debug('Active processes: %s' % ', '.join(processes)) for process in processes: process_exe = self.get_process_executable(process) mounts_file = '/proc/%s/mounts' % process mounted_devices = self.get_mounted_devices(mounts_file) for partition in root_partitions: if partition in mounted_devices: raise error.TestFail( 'Root partition "%s" is mounted by process %s (%s)' % (partition, process, process_exe))
def run_once(self, dev='/dev/sda'): """ Measure write performance before and after trim. This test use an entire disk so we need to boot from usb. @param dev: block device to test """ logging.info('Target device: %s', dev) # Check that device exist. if not os.path.exists(dev): msg = 'Test failed with error: %s not exist' % dev raise error.TestFail(msg) # Check that device is not rootdev. rootdev = utils.get_root_device() if dev == rootdev: raise error.TestFail('Can not test on root device') # Use fio to fill device first. self.job.run_test('hardware_StorageFio', disable_sysinfo=True, dev=dev, filesize=0, requirements=[('disk_fill', [])], tag='disk_fill') # Use 4k random write with queue depth = 32 because manufacture usually # uses this use case in the SSD specification. # Also, print result every minute to look at the performance drop trend # over time. Result reported by autotest will be the last minute one. requirements = [('4k_write_qd32', ['--status-interval=60'])] # Check write performance self.job.run_test('hardware_StorageFio', disable_sysinfo=True, dev=dev, filesize=0, requirements=requirements, tag='before_trim') # Unmount drive to make it possible to format. utils.run('umount %s*' % dev, ignore_status=True) # Format whole drive to ext4. Mkfs will trim the drive before format. utils.run('mkfs.ext4 -F %s' % dev, ignore_status=True) # Check write performance self.job.run_test('hardware_StorageFio', disable_sysinfo=True, dev=dev, filesize=0, requirements=requirements, tag='after_trim')
def _create_dut_info_dict(self, power_rails): """Create a dictionary that contain information of the DUT. Args: power_rails: list of measured power rails Returns: DUT info dictionary """ board = utils.get_board() platform = utils.get_platform() if not platform.startswith(board): board += '_' + platform if power_utils.has_hammer(): board += '_hammer' dut_info_dict = { 'board': board, 'version': { 'hw': utils.get_hardware_revision(), 'milestone': lsbrelease_utils.get_chromeos_release_milestone(), 'os': lsbrelease_utils.get_chromeos_release_version(), 'channel': lsbrelease_utils.get_chromeos_channel(), 'firmware': utils.get_firmware_version(), 'ec': utils.get_ec_version(), 'kernel': utils.get_kernel_version(), }, 'sku': { 'cpu': utils.get_cpu_name(), 'memory_size': utils.get_mem_total_gb(), 'storage_size': utils.get_disk_size_gb(utils.get_root_device()), 'display_resolution': utils.get_screen_resolution(), }, 'ina': { 'version': 0, 'ina': power_rails, }, 'note': self._note, } if power_utils.has_battery(): status = power_status.get_status() if status.battery: # Round the battery size to nearest tenth because it is # fluctuated for platform without battery nominal voltage data. dut_info_dict['sku']['battery_size'] = round( status.battery[0].energy_full_design, 1) dut_info_dict['sku']['battery_shutdown_percent'] = \ power_utils.get_low_battery_shutdown_percent() return dut_info_dict
def run_once(self, iters=1, tmout=60 * 60): """ Executes test. @param iters: Number of times to run badblocks. @param tmout: Time allowed badblocks to run before killing it. (Default time is 60 minutes.) """ # Log starting message. logging.info('Statring hardware_Badblocks Test.') logging.info('Iterations: %d', iters) logging.info('badblocks Timeout (sec): %d', tmout) # Determine which device and partition to use. logging.info('Determine unused root partition to test on:') dev = utils.get_free_root_partition() logging.info('Testing on ' + dev) # Get block device's sector size. logging.info('Determine block device sector size:') sector_size = self._get_sector_size(utils.get_root_device()) logging.info('Sector size (bytes): ' + sector_size) # Get partition size. logging.info('Determine partition size:') part_size = utils.get_disk_size(dev) logging.info('Partition size (bytes): %s', part_size) # Run badblocks. for i in range(iters): logging.info('Starting iteration %d', i) self._run_badblocks(dev, sector_size, tmout) # Report statistics. logging.info('Total pass: %d', self._pass_count) logging.info('Total fail: %d', self._fail_count) stats = {} stats['ea_badblocks_runs'] = iters stats['ea_passed_count'] = self._pass_count stats['ea_failed_count'] = self._fail_count stats['sec_longest_run'] = self._longest_runtime # TODO: change write_perf_keyval() to output_perf_value() as soon as # autotest is ready for it. self.write_perf_keyval(stats) # Report test pass/fail. if self._pass_count != iters: raise error.TestFail('One or more runs found bad blocks on' ' storage device.')
def _create_powerlog_dict(self, raw_measurement): """Create powerlog dictionary from raw measurement data Data format in go/power-dashboard-data Args: raw_measurement: dictionary contains raw measurement data. Returns: A dictionary of powerlog. """ powerlog_dict = { 'format_version': 2, 'timestamp': time.time(), 'test': self._testname, 'dut': { 'board': utils.get_board(), 'version': { 'hw': utils.get_hardware_revision(), 'milestone': lsbrelease_utils.get_chromeos_release_milestone(), 'os': lsbrelease_utils.get_chromeos_release_version(), 'channel': lsbrelease_utils.get_chromeos_channel(), 'firmware': utils.get_firmware_version(), 'ec': utils.get_ec_version(), 'kernel': utils.get_kernel_version(), }, 'sku': { 'cpu': utils.get_cpu_name(), 'memory_size': utils.get_mem_total_gb(), 'storage_size': utils.get_disk_size_gb(utils.get_root_device()), 'display_resolution': utils.get_screen_resolution(), }, 'ina': { 'version': 0, 'ina': raw_measurement['data'].keys() }, 'note': '' }, 'power': raw_measurement } if power_utils.has_battery(): # Round the battery size to nearest tenth because it is fluctuated # for platform without battery norminal voltage data. powerlog_dict['dut']['sku']['battery_size'] = round( power_status.get_status().battery[0].energy_full_design, 1) powerlog_dict['dut']['sku']['battery_shutdown_percent'] = \ power_utils.get_low_battery_shutdown_percent() return powerlog_dict
def initialize(self, dev='', filesize=DEFAULT_FILE_SIZE): """ Set up local variables. @param dev: block device / file to test. Spare partition on root device by default @param filesize: size of the file. 0 means whole partition. by default, 1GB. """ if dev != '' and (os.path.isfile(dev) or not os.path.exists(dev)): if filesize == 0: raise error.TestError( 'Nonzero file size is required to test file systems') self.__filename = dev self.__filesize = filesize self.__description = '' return if not dev: dev = utils.get_fixed_dst_drive() if dev == utils.get_root_device(): if filesize == 0: raise error.TestError( 'Using the root device as a whole is not allowed') else: self.__filename = utils.get_free_root_partition() elif filesize != 0: # Use the first partition of the external drive if dev[5:7] == 'sd': self.__filename = dev + '1' else: self.__filename = dev + 'p1' else: self.__filename = dev self.__get_disk_size() self.__get_device_description() # Restrict test to use a given file size, default 1GiB if filesize != 0: self.__filesize = min(self.__filesize, filesize) self.__verify_only = False logging.info('filename: %s', self.__filename) logging.info('filesize: %d', self.__filesize)
def run_once(self): root_dev = utils.get_root_device() self._device = os.path.basename(root_dev) disk_size = utils.get_disk_size(root_dev) if not disk_size: raise error.TestError('Unable to determine main disk size') # Capacity of a hard disk is quoted with SI prefixes, incrementing by # powers of 1000, instead of powers of 1024. gb = float(disk_size) / (10 ** 9) self.write_perf_keyval({"gb_main_disk_size": gb}) min_gb = self._compute_min_gb() logging.info("DiskSize: %.3f GB MinDiskSize: %.3f GB", gb, min_gb) if (gb < min_gb): raise error.TestError("DiskSize %.3f GB below minimum (%.3f GB)" \ % (gb, min_gb))
def run_once(self, check_link_speed=()): """ Use rootdev to find the underlying block device even if the system booted to /dev/dm-0. """ device = utils.get_root_device() def is_fixed(dev): """ Check the device is fixed. @param dev: device to check, i.e. 'sda'. """ sysfs_path = '/sys/block/%s/removable' % dev return (os.path.exists(sysfs_path) and open(sysfs_path).read().strip() == '0') # Catch device name like sda, mmcblk0, nvme0n1. device_re = re.compile(r'^/dev/([a-zA-Z0-9]+)$') dev = device_re.findall(device) if len(dev) != 1 or not is_fixed(dev[0]): raise error.TestFail('The main disk %s is not fixed' % dev) # If it is an mmcblk or nvme device, then it is SSD. # Else run hdparm to check for SSD. if re.search("nvme", device): return if re.search("mmcblk", device): return hdparm = utils.run('/sbin/hdparm -I %s' % device) # Check if device is a SSD match = re.search(r'Nominal Media Rotation Rate: (.+)$', hdparm.stdout, re.MULTILINE) if match and match.group(1): if match.group(1) != 'Solid State Device': if utils.get_board() in self.boards_with_hdd: return raise error.TestFail('The main disk is not a SSD, ' 'Rotation Rate: %s' % match.group(1)) else: raise error.TestFail('Rotation Rate not reported from the device, ' 'unable to ensure it is a SSD') # Check if SSD is > 8GB in size match = re.search("device size with M = 1000\*1000: (.+) MBytes", hdparm.stdout, re.MULTILINE) if match and match.group(1): size = int(match.group(1)) self.write_perf_keyval({"mb_ssd_device_size": size}) else: raise error.TestFail('Device size info missing from the device') # Check supported link speed. # # check_link_speed is an empty tuple by default, which does not perform # link speed checking. You can run the test while specifying # check_link_speed=('1.5Gb/s', '3.0Gb/s') to check the 2 signaling # speeds are both supported. for link_speed in check_link_speed: if not re.search(r'Gen. signaling speed \(%s\)' % link_speed, hdparm.stdout, re.MULTILINE): raise error.TestFail('Link speed %s not supported' % link_speed)
def run_once(self, filename=None, file_size=FILE_SIZE, chunk_size=CHUNK_SIZE, trim_ratio=TRIM_RATIO): """ Executes the test and logs the output. @param file_name: file/disk name to test default: spare partition of internal disk @param file_size: size of data to test. default: 1GB @param chunk_size: size of chunk to calculate hash/trim. default: 64KB @param trim_ratio: list of ratio of file size to trim data default: [0, 0.25, 0.5, 0.75, 1] """ if not filename: self._diskname = utils.get_fixed_dst_drive() if self._diskname == utils.get_root_device(): self._filename = utils.get_free_root_partition() else: self._filename = self._diskname else: self._filename = filename self._diskname = utils.get_disk_from_filename(filename) if file_size == 0: fulldisk = True file_size = utils.get_disk_size(self._filename) if file_size == 0: cmd = ( '%s seem to have 0 storage block. Is the media present?' % filename) raise error.TestError(cmd) else: fulldisk = False # Make file size multiple of 4 * chunk size file_size -= file_size % (4 * chunk_size) if fulldisk: fio_file_size = 0 else: fio_file_size = file_size logging.info('filename: %s, filesize: %d', self._filename, file_size) self._verify_trim_support(chunk_size) # Calculate hash value for zero'ed and one'ed data cmd = str('dd if=/dev/zero bs=%d count=1 | %s' % (chunk_size, self.HASH_CMD)) zero_hash = utils.run(cmd).stdout.strip() cmd = str("dd if=/dev/zero bs=%d count=1 | tr '\\0' '\\xff' | %s" % (chunk_size, self.HASH_CMD)) one_hash = utils.run(cmd).stdout.strip() trim_hash = "" # Write random data to disk chunk_count = file_size / chunk_size cmd = str('dd if=/dev/urandom of=%s bs=%d count=%d oflag=direct' % (self._filename, chunk_size, chunk_count)) utils.run(cmd) ref_hash = self._get_hash(chunk_count, chunk_size) # Check read speed/latency when reading real data. self.job.run_test('hardware_StorageFio', disable_sysinfo=True, filesize=fio_file_size, requirements=[('4k_read_qd32', [])], tag='before_trim') # Generate random order of chunk to trim trim_order = list(range(0, chunk_count)) random.shuffle(trim_order) trim_status = [False] * chunk_count # Init stat variable data_verify_count = 0 data_verify_match = 0 trim_verify_count = 0 trim_verify_zero = 0 trim_verify_one = 0 trim_verify_non_delete = 0 trim_deterministic = True last_ratio = 0 for ratio in trim_ratio: # Do trim begin_trim_chunk = int(last_ratio * chunk_count) end_trim_chunk = int(ratio * chunk_count) fd = os.open(self._filename, os.O_RDWR, 0666) for chunk in trim_order[begin_trim_chunk:end_trim_chunk]: self._do_trim(fd, chunk * chunk_size, chunk_size) trim_status[chunk] = True os.close(fd) last_ratio = ratio cur_hash = self._get_hash(chunk_count, chunk_size) trim_verify_count += int(ratio * chunk_count) data_verify_count += chunk_count - int(ratio * chunk_count) # Verify hash for cur, ref, trim in zip(cur_hash, ref_hash, trim_status): if trim: if not trim_hash: trim_hash = cur elif cur != trim_hash: trim_deterministic = False if cur == zero_hash: trim_verify_zero += 1 elif cur == one_hash: trim_verify_one += 1 elif cur == ref: trim_verify_non_delete += 1 else: if cur == ref: data_verify_match += 1 keyval = dict() keyval['data_verify_count'] = data_verify_count keyval['data_verify_match'] = data_verify_match keyval['trim_verify_count'] = trim_verify_count keyval['trim_verify_zero'] = trim_verify_zero keyval['trim_verify_one'] = trim_verify_one keyval['trim_verify_non_delete'] = trim_verify_non_delete keyval['trim_deterministic'] = trim_deterministic self.write_perf_keyval(keyval) # Check read speed/latency when reading from trimmed data. self.job.run_test('hardware_StorageFio', disable_sysinfo=True, filesize=fio_file_size, requirements=[('4k_read_qd32', [])], tag='after_trim') if data_verify_match < data_verify_count: reason = 'Fail to verify untrimmed data.' msg = utils.get_storage_error_msg(self._diskname, reason) raise error.TestFail(msg) if trim_verify_zero < trim_verify_count: reason = 'Trimmed data are not zeroed.' msg = utils.get_storage_error_msg(self._diskname, reason) if utils.is_disk_scsi(self._diskname): if utils.verify_hdparm_feature(self._diskname, self.hdparm_rzat): msg += ' Disk claim deterministic read zero after trim.' raise error.TestFail(msg) raise error.TestNAError(msg)
def run_once(self, iteration=1, dev=''): """ Read S.M.A.R.T attribute from target device @param dev: target device """ if dev == '': logging.info('Run rootdev to determine boot device') dev = utils.get_root_device() logging.info(str('dev: %s' % dev)) # Skip this test if dev is an eMMC device without raising an error if re.match('.*mmc.*', dev): logging.info('Target device is an eMMC device. Skip testing') self.write_perf_keyval({'device_model': 'eMMC'}) return last_result = '' # run multiple time to test the firmware part that retrieve SMART value for loop in range(1, iteration + 1): cmd = 'smartctl -a -f brief %s' % dev result = utils.run(cmd, ignore_status=True) exit_status = result.exit_status result_text = result.stdout result_lines = result_text.split('\n') # log all line if line count is different # otherwise log only changed line if result_text != last_result: logging.info(str('Iteration #%d' % loop)) last_result_lines = last_result.split('\n') if len(last_result_lines) != len(result_lines): for line in result_lines: logging.info(line) else: for i, line in enumerate(result_lines): if line != last_result_lines[i]: logging.info(line) last_result = result_text # Ignore error other than first two bits if exit_status & 0x3: # Error message should be in 4th line of the output msg = 'Test failed with error: %s' % result_lines[3] raise error.TestFail(msg) logging.info(str('smartctl exit status: 0x%x' % exit_status)) # find drive model lookup_table = {} pattern = re.compile(self._SMARTCTL_DEVICE_MODEL_PATTERN) for line in result_lines: if pattern.match(line): model = pattern.match(line).group('model') for known_model in self._SMARTCTL_LOOKUP_TABLE: if model.startswith(known_model): lookup_table = self._SMARTCTL_LOOKUP_TABLE[known_model] break break else: raise error.TestFail('Can not find drive model') # Example of smart ctl result # ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE # 12 Power_Cycle_Count -O---- 100 100 000 - 204 # use flag field to find a valid line pattern = re.compile(self._SMARTCTL_RESULT_PATTERN) keyval = {} fail = [] for line in result_lines: if not pattern.match(line): continue field = line.split() id = int(field[0]) if id in lookup_table: # look up table overwrite smartctl name key = lookup_table[id] else: key = field[1] # ATTRIBUTE_NAME if key == 'Unknown_Attribute': key = "Smart_Attribute_ID_%d" % id keyval[key] = field[7] # RAW_VALUE # check for failing attribute if field[6] != '-': fail += [key] if len(keyval) == 0: raise error.TestFail( 'Test failed with error: Can not parse smartctl keyval') if len(fail) > 0: keyval['fail'] = fail keyval['exit_status'] = exit_status keyval['device_model'] = model self.write_perf_keyval(keyval)
def run_once(self, dev='', quicktest=False, requirements=None, integrity=False, wait=60 * 60 * 72): """ Runs several fio jobs and reports results. @param dev: block device to test @param quicktest: short test @param requirements: list of jobs for fio to run @param integrity: test to check data integrity @param wait: seconds to wait between a write and subsequent verify """ if requirements is not None: pass elif quicktest: requirements = [ ('1m_write', []), ('16k_read', []) ] elif integrity: requirements = [ ('8k_async_randwrite', []), ('8k_async_randwrite', [self.VERIFY_OPTION]) ] elif dev in ['', utils.get_root_device()]: requirements = [ ('surfing', []), ('boot', []), ('login', []), ('seq_read', []), ('seq_write', []), ('16k_read', []), ('16k_write', []), ('1m_stress', []), ] else: # TODO(waihong@): Add more test cases for external storage requirements = [ ('seq_read', []), ('seq_write', []), ('16k_read', []), ('16k_write', []), ('1m_stress', []), ] results = {} for job, options in requirements: # Keys are labeled according to the test case name, which is # unique per run, so they cannot clash if self.VERIFY_OPTION in options: time.sleep(wait) self.__verify_only = True else: self.__verify_only = False env_vars = ' '.join( ['FILENAME=' + self.__filename, 'FILESIZE=' + str(self.__filesize), 'VERIFY_ONLY=' + str(int(self.__verify_only)) ]) job_file = os.path.join(self.bindir, job) results.update(fio_util.fio_runner(self, job_file, env_vars)) # Output keys relevant to the performance, larger filesize will run # slower, and sda5 should be slightly slower than sda3 on a rotational # disk self.write_test_keyval({'filesize': self.__filesize, 'filename': self.__filename, 'device': self.__description}) logging.info('Device Description: %s', self.__description) self.write_perf_keyval(results) for k, v in results.iteritems(): if k.endswith('_error'): self._fail_count += int(v) if self._fail_count > 0: raise error.TestFail('%s failed verifications' % str(self._fail_count))