def compare_capabilities(cache_device, core_device, cache, core, msg):
    if core is None:
        cli_messages.check_stderr_msg(msg,
                                      cli_messages.try_add_core_sector_size_mismatch)
    else:
        core_dev_sectors_num = \
            disk_utils.get_size(core_device.get_device_id()) / disk_utils.get_block_size(
                core_device.get_device_id())
        core_sectors_num = disk_utils.get_size(core.get_device_id()) / disk_utils.get_block_size(
            core.get_device_id())
        if core_dev_sectors_num != core_sectors_num:
            TestRun.LOGGER.error(
                "Number of sectors in CAS device and attached core device is different.")
            cache.stop()
            return
        cas_capabilities = measure_capabilities(core)
        cache_dev_capabilities = measure_capabilities(cache_device)
        core_dev_capabilities = measure_capabilities(core_device)

        for (capability, method) in capabilities.items():
            cas_val = cas_capabilities[capability]
            cache_val = cache_dev_capabilities[capability]
            core_val = core_dev_capabilities[capability]

            comparison_val = method(core_val, cache_val) if method is not None else core_val

            if comparison_val != cas_val:
                TestRun.LOGGER.error(f"Cas device {capability} is not set properly. Is: {cas_val}, "
                                     f"should be {comparison_val} (cache: {cache_val}, "
                                     f"core: {core_val})")
                continue
            TestRun.LOGGER.info(f"Cas device {capability} has proper value: {cas_val} "
                                f"(cache: {cache_val}, core: {core_val})")
    cache.stop()
Ejemplo n.º 2
0
def compare_capabilities(cache_device, core_device, cache, core, msg):
    if core is None:
        cli_messages.check_stderr_msg(
            msg, cli_messages.try_add_core_sector_size_mismatch)
    else:
        core_dev_sectors_num = \
            disk_utils.get_size(core_device.get_device_id()) / disk_utils.get_block_size(
                core_device.get_device_id())
        core_sectors_num = disk_utils.get_size(
            core.get_device_id()) / disk_utils.get_block_size(
                core.get_device_id())
        if core_dev_sectors_num != core_sectors_num:
            TestRun.LOGGER.error(
                "Number of sectors in CAS device and attached core device is different."
            )
            cache.stop()
            return
        cas_capabilities = measure_capabilities(core)
        cache_dev_capabilities = measure_capabilities(cache_device)
        core_dev_capabilities = measure_capabilities(core_device)

        for (capability, method) in capabilities.items():
            cas_val = cas_capabilities[capability]
            cache_val = cache_dev_capabilities[capability]
            core_val = core_dev_capabilities[capability]

            expected_val = method(
                core_val, cache_val) if method is not None else core_val

            if capability in ["max_sectors_kb", "max_hw_sectors_kb"
                              ] and expected_val != cas_val:
                # On the newer kernels this trait is rounded. Instead of checking for
                # the current kernel version, assume that both values are acceptable.
                SECTOR_SHIFT = 9
                lbs = measure_capabilities(core)["logical_block_size"]
                # The original uint is kb, but number of sectors is needed
                new_expected_val = expected_val * 2
                round_val = lbs >> SECTOR_SHIFT
                new_expected_val -= new_expected_val % round_val
                # Restore the original unit
                expected_val = new_expected_val // 2

            if expected_val != cas_val:
                TestRun.LOGGER.error(
                    f"Cas device {capability} is not set properly. Is: {cas_val}, "
                    f"should be {expected_val} (cache: {cache_val}, "
                    f"core: {core_val})")
                continue
            TestRun.LOGGER.info(
                f"Cas device {capability} has proper value: {cas_val} "
                f"(cache: {cache_val}, core: {core_val})")
    cache.stop()
Ejemplo n.º 3
0
def discover_ssd_devices(block_devices, devices_res):
    ssd_count = int(
        TestRun.executor.run_expect_success(
            'isdct show -intelssd | grep DevicePath | wc -l').stdout)
    for i in range(0, ssd_count):
        device_path = TestRun.executor.run_expect_success(
            f"isdct show -intelssd {i} | grep DevicePath").stdout.split()[2]
        dev = device_path.replace("/dev/", "")
        if dev not in block_devices:
            continue
        serial_number = TestRun.executor.run_expect_success(
            f"isdct show -intelssd {i} | grep SerialNumber").stdout.split(
            )[2].strip()
        if 'nvme' not in device_path:
            disk_type = 'sata'
            dev = find_sata_ssd_device_path(serial_number, block_devices)
            if dev is None:
                continue
            if "sg" in device_path:
                device_path = f"{dev}"
        elif TestRun.executor.run(
                f"isdct show -intelssd {i} | grep Optane").exit_code == 0:
            disk_type = 'optane'
        else:
            disk_type = 'nand'

        devices_res.append({
            "type": disk_type,
            "path": resolve_to_by_id_link(device_path),
            "serial": serial_number,
            "blocksize": disk_utils.get_block_size(dev),
            "size": disk_utils.get_size(dev)
        })
        block_devices.remove(dev)
Ejemplo n.º 4
0
def discover_ssd_devices(block_devices, devices_res):
    ssd_count = int(get_command_output('isdct show -intelssd | grep DevicePath | wc -l'))
    for i in range(0, ssd_count):
        device_path = get_command_output(f"isdct show -intelssd {i} | grep DevicePath").split()[2]
        dev = device_path.replace('/dev/', '')
        serial_number = get_command_output(
            f"isdct show -intelssd {i} | grep SerialNumber").split()[2].strip()
        if 'nvme' not in device_path:
            disk_type = 'sata'
            dev = find_sata_ssd_device_path(serial_number, block_devices)
            if dev is None:
                continue
            if "sg" in device_path:
                device_path = f"/dev/{dev}"
        elif TestProperties.executor.execute(
                f"isdct show -intelssd {i} | grep Optane").exit_code == 0:
            disk_type = 'optane'
        else:
            disk_type = 'nand'

        devices_res.append({
            "type": disk_type,
            "path": device_path,
            "serial": serial_number,
            "blocksize": disk_utils.get_block_size(dev),
            "size": disk_utils.get_size(dev)})
        block_devices.remove(dev)
def prepare_cas_device(cache_device, core_device):
    cache = casadm.start_cache(cache_device, cache_line_size=CacheLineSize.LINE_64KiB, force=True)
    try:
        cache_dev_bs = disk_utils.get_block_size(cache_device.get_device_id())
        core_dev_bs = disk_utils.get_block_size(core_device.get_device_id())
        core = cache.add_core(core_device)
        if cache_dev_bs > core_dev_bs:
            TestRun.LOGGER.error(
                f"CAS device started with cache device logical block size ({cache_dev_bs}) "
                f"greater than core device logical block size ({core_dev_bs})")
        return cache, core, None
    except CmdException as e:
        if cache_dev_bs <= core_dev_bs:
            TestRun.fail("Failed to create CAS device.")
        TestRun.LOGGER.info("Cannot add core device with mismatching logical sector size. "
                            "Check output instead of capabilities.")
        return cache, None, e.output
Ejemplo n.º 6
0
def discover_hdd_devices(block_devices, devices_res):
    for dev in block_devices:
        block_size = disk_utils.get_block_size(dev)
        if int(block_size) == 4096:
            disk_type = 'hdd4k'
        else:
            disk_type = 'hdd'
        devices_res.append({
            "type": disk_type,
            "path": f"/dev/{dev}",
            "serial": get_command_output(
                f"sg_inq /dev/{dev} | grep 'Unit serial number'").split(': ')[1].strip(),
            "blocksize": block_size,
            "size": disk_utils.get_size(dev)})
    block_devices.clear()
Ejemplo n.º 7
0
def discover_hdd_devices(block_devices, devices_res):
    for dev in block_devices:
        block_size = disk_utils.get_block_size(dev)
        if int(block_size) == 4096:
            disk_type = 'hdd4k'
        else:
            disk_type = 'hdd'
        devices_res.append({
            "type":
            disk_type,
            "path":
            f"{resolve_to_by_id_link(dev)}",
            "serial":
            TestRun.executor.run_expect_success(
                f"sg_inq /dev/{dev} | grep -i 'serial number'").stdout.split(
                    ': ')[1].strip(),
            "blocksize":
            block_size,
            "size":
            disk_utils.get_size(dev)
        })
    block_devices.clear()
Ejemplo n.º 8
0
def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
    """
        title: Test verifying if trim requests do not cause eviction on CAS device.
        description: |
          When trim requests enabled and files are being added and removed from CAS device,
          there is no eviction (no reads from cache).
        pass_criteria:
          - Reads from cache device are the same before and after removing test file.
    """
    mount_point = "/mnt"
    test_file_path = os.path.join(mount_point, "test_file")

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

        cache_block_size = disk_utils.get_block_size(cache_disk)

    with TestRun.step("Start cache on device supporting trim and add core."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode,
                                   cache_line_size,
                                   force=True)
        cache.set_cleaning_policy(cleaning)
        Udev.disable()
        core = cache.add_core(core_dev)

    with TestRun.step("Create filesystem on CAS device and mount it."):
        core.create_filesystem(filesystem)
        core.mount(mount_point, ["discard"])

    with TestRun.step("Create ioclass config."):
        ioclass_config.create_ioclass_config()
        ioclass_config.add_ioclass(ioclass_id=1,
                                   eviction_priority=1,
                                   allocation="0.00",
                                   rule=f"metadata")
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config.default_config_file_path)

    with TestRun.step("Create random file using ddrescue."):
        test_file = create_file_with_ddrescue(core_dev, test_file_path)
        os_utils.sync()
        os_utils.drop_caches()
        time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)

    with TestRun.step("Remove file and create a new one."):
        cache_iostats_before = cache_dev.get_io_stats()
        data_reads_before = cache.get_io_class_statistics(
            io_class_id=0).block_stats.cache.reads
        metadata_reads_before = cache.get_io_class_statistics(
            io_class_id=1).block_stats.cache.reads
        test_file.remove()
        os_utils.sync()
        os_utils.drop_caches()
        create_file_with_ddrescue(core_dev, test_file_path)
        os_utils.sync()
        os_utils.drop_caches()
        time.sleep(ioclass_config.MAX_CLASSIFICATION_DELAY.seconds)

    with TestRun.step(
            "Check using iostat that reads from cache did not occur."):
        cache_iostats_after = cache_dev.get_io_stats()
        data_reads_after = cache.get_io_class_statistics(
            io_class_id=0).block_stats.cache.reads
        metadata_reads_after = cache.get_io_class_statistics(
            io_class_id=1).block_stats.cache.reads
        reads_before = cache_iostats_before.sectors_read
        reads_after = cache_iostats_after.sectors_read

        metadata_reads_diff = metadata_reads_after - metadata_reads_before
        data_reads_diff = data_reads_after - data_reads_before
        iostat_diff = (reads_after - reads_before) * cache_block_size

        if iostat_diff > int(metadata_reads_diff) or int(data_reads_diff) > 0:
            TestRun.fail(
                f"Number of reads from cache before and after removing test file "
                f"differs. Sectors read before: {reads_before}, sectors read after: {reads_after}."
                f"Data read from cache before {data_reads_before}, after {data_reads_after}."
                f"Metadata read from cache before {metadata_reads_before}, "
                f"after {metadata_reads_after}.")
        else:
            TestRun.LOGGER.info(
                "Number of reads from cache before and after removing test file is the same."
            )