def _get_partition_layout(self): """Get the partition layout @return dev - name of the device hosting the partition kernelA - partition used to boot kernel rootfsA - partition of current root file system kernelB - backup copy of kernel rootfsB - backup copy of root file system """ # What is our root partition? # TODO(crbug.com/226082) result = self.client.run('rootdev -s') logging.info('Root partition %s', result.stdout) rootdev = result.stdout.strip() if os.path.basename(rootdev).startswith('mmc'): dev = rootdev[:-2] else: dev = rootdev[:-1] kernelA = utils.get_kernel_partition(rootdev) rootfsA = rootdev kernelB = utils.get_free_kernel_partition(rootdev) rootfsB = utils.get_free_root_partition(rootdev) return dev, kernelA, rootfsA, kernelB, rootfsB
def run_once(self, iters=1, tmout=60 * 60): """ Executes test. @param iters: Number of times to run badblocks. @param tmout: Time allowed badblocks to run before killing it. (Default time is 60 minutes.) """ # Log starting message. logging.info('Statring hardware_Badblocks Test.') logging.info('Iterations: %d', iters) logging.info('badblocks Timeout (sec): %d', tmout) # Determine which device and partition to use. logging.info('Determine unused root partition to test on:') dev = utils.get_free_root_partition() logging.info('Testing on ' + dev) # Get block device's sector size. logging.info('Determine block device sector size:') sector_size = self._get_sector_size(utils.get_root_device()) logging.info('Sector size (bytes): ' + sector_size) # Get partition size. logging.info('Determine partition size:') part_size = utils.get_disk_size(dev) logging.info('Partition size (bytes): %s', part_size) # Run badblocks. for i in range(iters): logging.info('Starting iteration %d', i) self._run_badblocks(dev, sector_size, tmout) # Report statistics. logging.info('Total pass: %d', self._pass_count) logging.info('Total fail: %d', self._fail_count) stats = {} stats['ea_badblocks_runs'] = iters stats['ea_passed_count'] = self._pass_count stats['ea_failed_count'] = self._fail_count stats['sec_longest_run'] = self._longest_runtime # TODO: change write_perf_keyval() to output_perf_value() as soon as # autotest is ready for it. self.write_perf_keyval(stats) # Report test pass/fail. if self._pass_count != iters: raise error.TestFail('One or more runs found bad blocks on' ' storage device.')
def initialize(self, dev='', filesize=DEFAULT_FILE_SIZE): """ Set up local variables. @param dev: block device / file to test. Spare partition on root device by default @param filesize: size of the file. 0 means whole partition. by default, 1GB. """ if dev != '' and (os.path.isfile(dev) or not os.path.exists(dev)): if filesize == 0: raise error.TestError( 'Nonzero file size is required to test file systems') self.__filename = dev self.__filesize = filesize self.__description = '' return if not dev: dev = utils.get_fixed_dst_drive() if dev == utils.get_root_device(): if filesize == 0: raise error.TestError( 'Using the root device as a whole is not allowed') else: self.__filename = utils.get_free_root_partition() elif filesize != 0: # Use the first partition of the external drive if dev[5:7] == 'sd': self.__filename = dev + '1' else: self.__filename = dev + 'p1' else: self.__filename = dev self.__get_disk_size() self.__get_device_description() # Restrict test to use a given file size, default 1GiB if filesize != 0: self.__filesize = min(self.__filesize, filesize) self.__verify_only = False logging.info('filename: %s', self.__filename) logging.info('filesize: %d', self.__filesize)
def run_once(self, filename=None, file_size=FILE_SIZE, chunk_size=CHUNK_SIZE, trim_ratio=TRIM_RATIO): """ Executes the test and logs the output. @param file_name: file/disk name to test default: spare partition of internal disk @param file_size: size of data to test. default: 1GB @param chunk_size: size of chunk to calculate hash/trim. default: 64KB @param trim_ratio: list of ratio of file size to trim data default: [0, 0.25, 0.5, 0.75, 1] """ if not filename: self._diskname = utils.get_fixed_dst_drive() if self._diskname == utils.get_root_device(): self._filename = utils.get_free_root_partition() else: self._filename = self._diskname else: self._filename = filename self._diskname = utils.get_disk_from_filename(filename) if file_size == 0: fulldisk = True file_size = utils.get_disk_size(self._filename) if file_size == 0: cmd = ( '%s seem to have 0 storage block. Is the media present?' % filename) raise error.TestError(cmd) else: fulldisk = False # Make file size multiple of 4 * chunk size file_size -= file_size % (4 * chunk_size) if fulldisk: fio_file_size = 0 else: fio_file_size = file_size logging.info('filename: %s, filesize: %d', self._filename, file_size) self._verify_trim_support(chunk_size) # Calculate hash value for zero'ed and one'ed data cmd = str('dd if=/dev/zero bs=%d count=1 | %s' % (chunk_size, self.HASH_CMD)) zero_hash = utils.run(cmd).stdout.strip() cmd = str("dd if=/dev/zero bs=%d count=1 | tr '\\0' '\\xff' | %s" % (chunk_size, self.HASH_CMD)) one_hash = utils.run(cmd).stdout.strip() trim_hash = "" # Write random data to disk chunk_count = file_size / chunk_size cmd = str('dd if=/dev/urandom of=%s bs=%d count=%d oflag=direct' % (self._filename, chunk_size, chunk_count)) utils.run(cmd) ref_hash = self._get_hash(chunk_count, chunk_size) # Check read speed/latency when reading real data. self.job.run_test('hardware_StorageFio', disable_sysinfo=True, filesize=fio_file_size, requirements=[('4k_read_qd32', [])], tag='before_trim') # Generate random order of chunk to trim trim_order = list(range(0, chunk_count)) random.shuffle(trim_order) trim_status = [False] * chunk_count # Init stat variable data_verify_count = 0 data_verify_match = 0 trim_verify_count = 0 trim_verify_zero = 0 trim_verify_one = 0 trim_verify_non_delete = 0 trim_deterministic = True last_ratio = 0 for ratio in trim_ratio: # Do trim begin_trim_chunk = int(last_ratio * chunk_count) end_trim_chunk = int(ratio * chunk_count) fd = os.open(self._filename, os.O_RDWR, 0666) for chunk in trim_order[begin_trim_chunk:end_trim_chunk]: self._do_trim(fd, chunk * chunk_size, chunk_size) trim_status[chunk] = True os.close(fd) last_ratio = ratio cur_hash = self._get_hash(chunk_count, chunk_size) trim_verify_count += int(ratio * chunk_count) data_verify_count += chunk_count - int(ratio * chunk_count) # Verify hash for cur, ref, trim in zip(cur_hash, ref_hash, trim_status): if trim: if not trim_hash: trim_hash = cur elif cur != trim_hash: trim_deterministic = False if cur == zero_hash: trim_verify_zero += 1 elif cur == one_hash: trim_verify_one += 1 elif cur == ref: trim_verify_non_delete += 1 else: if cur == ref: data_verify_match += 1 keyval = dict() keyval['data_verify_count'] = data_verify_count keyval['data_verify_match'] = data_verify_match keyval['trim_verify_count'] = trim_verify_count keyval['trim_verify_zero'] = trim_verify_zero keyval['trim_verify_one'] = trim_verify_one keyval['trim_verify_non_delete'] = trim_verify_non_delete keyval['trim_deterministic'] = trim_deterministic self.write_perf_keyval(keyval) # Check read speed/latency when reading from trimmed data. self.job.run_test('hardware_StorageFio', disable_sysinfo=True, filesize=fio_file_size, requirements=[('4k_read_qd32', [])], tag='after_trim') if data_verify_match < data_verify_count: reason = 'Fail to verify untrimmed data.' msg = utils.get_storage_error_msg(self._diskname, reason) raise error.TestFail(msg) if trim_verify_zero < trim_verify_count: reason = 'Trimmed data are not zeroed.' msg = utils.get_storage_error_msg(self._diskname, reason) if utils.is_disk_scsi(self._diskname): if utils.verify_hdparm_feature(self._diskname, self.hdparm_rzat): msg += ' Disk claim deterministic read zero after trim.' raise error.TestFail(msg) raise error.TestNAError(msg)