예제 #1
0
    def __get_disk_size(self):
        """Return the size in bytes of the device pointed to by __filename"""
        self.__filesize = utils.get_disk_size(self.__filename)

        if not self.__filesize:
            raise error.TestNAError(
                'Unable to find the partition %s, please plug in a USB '
                'flash drive and a SD card for testing external storage' %
                self.__filename)
    def run_once(self, iters=1, tmout=60 * 60):
        """
        Executes test.

        @param iters: Number of times to run badblocks.
        @param tmout: Time allowed badblocks to run before killing it.
                      (Default time is 60 minutes.)

        """

        # Log starting message.
        logging.info('Statring hardware_Badblocks Test.')
        logging.info('Iterations: %d', iters)
        logging.info('badblocks Timeout (sec): %d', tmout)

        # Determine which device and partition to use.
        logging.info('Determine unused root partition to test on:')
        dev = site_utils.get_free_root_partition()
        logging.info('Testing on ' + dev)

        # Get block device's sector size.
        logging.info('Determine block device sector size:')
        sector_size = self._get_sector_size(site_utils.get_root_device())
        logging.info('Sector size (bytes): ' + sector_size)

        # Get partition size.
        logging.info('Determine partition size:')
        part_size = utils.get_disk_size(dev)
        logging.info('Partition size (bytes): %s', part_size)

        # Run badblocks.
        for i in range(iters):
            logging.info('Starting iteration %d', i)
            self._run_badblocks(dev, sector_size, tmout)

        # Report statistics.
        logging.info('Total pass: %d', self._pass_count)
        logging.info('Total fail: %d', self._fail_count)
        stats = {}
        stats['ea_badblocks_runs'] = iters
        stats['ea_passed_count'] = self._pass_count
        stats['ea_failed_count'] = self._fail_count
        stats['sec_longest_run'] = self._longest_runtime
        # TODO: change write_perf_keyval() to output_perf_value() as soon as
        # autotest is ready for it.
        self.write_perf_keyval(stats)

        # Report test pass/fail.
        if self._pass_count != iters:
            raise error.TestFail('One or more runs found bad blocks on'
                                 ' storage device.')
    def run_once(self):
        root_dev = site_utils.get_root_device()
        self._device = os.path.basename(root_dev)
        disk_size = utils.get_disk_size(root_dev)
        if not disk_size:
            raise error.TestError('Unable to determine main disk size')

        # Capacity of a hard disk is quoted with SI prefixes, incrementing by
        # powers of 1000, instead of powers of 1024.
        gb = float(disk_size) / (10 ** 9)

        self.write_perf_keyval({"gb_main_disk_size": gb})
        min_gb = self._compute_min_gb()
        logging.info("DiskSize: %.3f GB MinDiskSize: %.3f GB", gb, min_gb)
        if (gb < min_gb):
            raise error.TestError("DiskSize %.3f GB below minimum (%.3f GB)" \
                % (gb, min_gb))
예제 #4
0
    def run_once(self,
                 filename=None,
                 file_size=FILE_SIZE,
                 chunk_size=CHUNK_SIZE,
                 trim_ratio=TRIM_RATIO):
        """
        Executes the test and logs the output.
        @param file_name:  file/disk name to test
                           default: spare partition of internal disk
        @param file_size:  size of data to test. default: 1GB
        @param chunk_size: size of chunk to calculate hash/trim. default: 64KB
        @param trim_ratio: list of ratio of file size to trim data
                           default: [0, 0.25, 0.5, 0.75, 1]
        """

        if not filename:
            self._diskname = utils.get_fixed_dst_drive()
            if self._diskname == utils.get_root_device():
                self._filename = utils.get_free_root_partition()
            else:
                self._filename = self._diskname
        else:
            self._filename = filename
            self._diskname = utils.get_disk_from_filename(filename)

        if file_size == 0:
            fulldisk = True
            file_size = utils.get_disk_size(self._filename)
            if file_size == 0:
                cmd = (
                    '%s seem to have 0 storage block. Is the media present?' %
                    filename)
                raise error.TestError(cmd)
        else:
            fulldisk = False

        # Make file size multiple of 4 * chunk size
        file_size -= file_size % (4 * chunk_size)

        if fulldisk:
            fio_file_size = 0
        else:
            fio_file_size = file_size

        logging.info('filename: %s, filesize: %d', self._filename, file_size)

        self._verify_trim_support(chunk_size)

        # Calculate hash value for zero'ed and one'ed data
        cmd = str('dd if=/dev/zero bs=%d count=1 | %s' %
                  (chunk_size, self.HASH_CMD))
        zero_hash = utils.run(cmd).stdout.strip()

        cmd = str("dd if=/dev/zero bs=%d count=1 | tr '\\0' '\\xff' | %s" %
                  (chunk_size, self.HASH_CMD))
        one_hash = utils.run(cmd).stdout.strip()

        trim_hash = ""

        # Write random data to disk
        chunk_count = file_size / chunk_size
        cmd = str('dd if=/dev/urandom of=%s bs=%d count=%d oflag=direct' %
                  (self._filename, chunk_size, chunk_count))
        utils.run(cmd)

        ref_hash = self._get_hash(chunk_count, chunk_size)

        # Check read speed/latency when reading real data.
        self.job.run_test('hardware_StorageFio',
                          disable_sysinfo=True,
                          filesize=fio_file_size,
                          requirements=[('4k_read_qd32', [])],
                          tag='before_trim')

        # Generate random order of chunk to trim
        trim_order = list(range(0, chunk_count))
        random.shuffle(trim_order)
        trim_status = [False] * chunk_count

        # Init stat variable
        data_verify_count = 0
        data_verify_match = 0
        trim_verify_count = 0
        trim_verify_zero = 0
        trim_verify_one = 0
        trim_verify_non_delete = 0
        trim_deterministic = True

        last_ratio = 0
        for ratio in trim_ratio:

            # Do trim
            begin_trim_chunk = int(last_ratio * chunk_count)
            end_trim_chunk = int(ratio * chunk_count)
            fd = os.open(self._filename, os.O_RDWR, 0666)
            for chunk in trim_order[begin_trim_chunk:end_trim_chunk]:
                self._do_trim(fd, chunk * chunk_size, chunk_size)
                trim_status[chunk] = True
            os.close(fd)
            last_ratio = ratio

            cur_hash = self._get_hash(chunk_count, chunk_size)

            trim_verify_count += int(ratio * chunk_count)
            data_verify_count += chunk_count - int(ratio * chunk_count)

            # Verify hash
            for cur, ref, trim in zip(cur_hash, ref_hash, trim_status):
                if trim:
                    if not trim_hash:
                        trim_hash = cur
                    elif cur != trim_hash:
                        trim_deterministic = False

                    if cur == zero_hash:
                        trim_verify_zero += 1
                    elif cur == one_hash:
                        trim_verify_one += 1
                    elif cur == ref:
                        trim_verify_non_delete += 1
                else:
                    if cur == ref:
                        data_verify_match += 1

        keyval = dict()
        keyval['data_verify_count'] = data_verify_count
        keyval['data_verify_match'] = data_verify_match
        keyval['trim_verify_count'] = trim_verify_count
        keyval['trim_verify_zero'] = trim_verify_zero
        keyval['trim_verify_one'] = trim_verify_one
        keyval['trim_verify_non_delete'] = trim_verify_non_delete
        keyval['trim_deterministic'] = trim_deterministic
        self.write_perf_keyval(keyval)

        # Check read speed/latency when reading from trimmed data.
        self.job.run_test('hardware_StorageFio',
                          disable_sysinfo=True,
                          filesize=fio_file_size,
                          requirements=[('4k_read_qd32', [])],
                          tag='after_trim')

        if data_verify_match < data_verify_count:
            reason = 'Fail to verify untrimmed data.'
            msg = utils.get_storage_error_msg(self._diskname, reason)
            raise error.TestFail(msg)

        if trim_verify_zero < trim_verify_count:
            reason = 'Trimmed data are not zeroed.'
            msg = utils.get_storage_error_msg(self._diskname, reason)
            if utils.is_disk_scsi(self._diskname):
                if utils.verify_hdparm_feature(self._diskname,
                                               self.hdparm_rzat):
                    msg += ' Disk claim deterministic read zero after trim.'
                    raise error.TestFail(msg)
            raise error.TestNAError(msg)