def test_remove_detached_cores():
    """
        title: Validate removing core devices from core pool.
        description: Validate that it is possible to remove core devices from core pool.
        pass_criteria:
          - No kernel error
          - All core devices are correctly added after plugging core disk.
          - All cores are successfully removed.
    """
    with TestRun.step("Prepare devices."):
        devices = prepare_devices([("cache", 1), ("core", 4)])
        cache_dev = devices["cache"].partitions[0]
        core_devs = devices["core"].partitions
        plug_device = devices["core"]
    with TestRun.step("Start cache and add four cores."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode=CacheMode.WB,
                                   force=True)
        cores = []
        for d in core_devs:
            cores.append(cache.add_core(d))
    with TestRun.step(
            "Create init config file using current CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()
    with TestRun.step("Run random writes to all CAS devices."):
        run_fio([c.system_path for c in cores])
    with TestRun.step(
            "Flush dirty data from two CAS devices and verify than other two contain "
            "dirty data."):
        for core in cores:
            if core.core_id % 2 == 0:
                core.flush_core()
                if core.get_dirty_blocks() != Size.zero():
                    TestRun.fail("Failed to flush CAS device.")
            elif core.get_dirty_blocks() == Size.zero():
                TestRun.fail("There should be dirty data on CAS device.")
    with TestRun.step("Stop cache without flushing dirty data."):
        cache.stop(no_data_flush=True)
    with TestRun.step("Unplug core device from system and plug it back."):
        plug_device.unplug()
        time.sleep(2)
        plug_device.plug()
    with TestRun.step(
            "Verify that all cores from plugged core device are listed with "
            "proper status."):
        for core in cores:
            if core.get_status() != CoreStatus.detached:
                TestRun.fail(f"Each core should be in detached state. "
                             f"Actual states: {casadm.list_caches().stdout}")
    with TestRun.step("Remove CAS devices from core pool."):
        casadm.remove_all_detached_cores()
    with TestRun.step("Verify that cores are no longer listed."):
        output = casadm.list_caches().stdout
        for dev in core_devs:
            if dev.system_path in output:
                TestRun.fail(
                    f"CAS device is still listed in casadm list output:\n{output}"
                )
def check_inactive_usage_stats(stats_before, stats_after, stat_name,
                               should_be_zero):
    if should_be_zero and stats_before == Size.zero(
    ) and stats_after == Size.zero():
        TestRun.LOGGER.info(
            f"{stat_name} value before and after equals 0 as expected.")
    elif not should_be_zero and stats_after < stats_before:
        TestRun.LOGGER.info(f"{stat_name} is lower than before as expected.")
    else:
        TestRun.fail(f"{stat_name} ({stats_after}) is not lower than before "
                     f"({stats_before}).")
Ejemplo n.º 3
0
    def create_partitions(self,
                          sizes: [],
                          partition_table_type=disk_utils.PartitionTable.gpt):
        if disk_utils.create_partition_table(self, partition_table_type):
            self.partition_table = partition_table_type
            partition_type = disk_utils.PartitionType.primary

            partition_number_offset = 0
            for s in sizes:
                size = Size(
                    s.get_value(self.block_size) - self.block_size.value,
                    self.block_size)
                if partition_table_type == disk_utils.PartitionTable.msdos and \
                        len(sizes) > 4 and len(self.partitions) == 3:
                    disk_utils.create_partition(
                        self, Size.zero(), 4,
                        disk_utils.PartitionType.extended, Unit.MebiByte, True)
                    partition_type = disk_utils.PartitionType.logical
                    partition_number_offset = 1

                partition_number = len(
                    self.partitions) + 1 + partition_number_offset
                if disk_utils.create_partition(self, size, partition_number,
                                               partition_type, Unit.MebiByte,
                                               True):
                    new_part = Partition(self, partition_type,
                                         partition_number)
                    self.partitions.append(new_part)
Ejemplo n.º 4
0
def create_partitions(device,
                      sizes: [],
                      partition_table_type=PartitionTable.gpt):
    from storage_devices.partition import Partition
    if create_partition_table(device, partition_table_type):
        device.partition_table = partition_table_type
        partition_type = PartitionType.primary

        partition_number_offset = 0
        for s in sizes:
            size = Size(
                s.get_value(device.block_size) - device.block_size.value,
                device.block_size)
            if partition_table_type == PartitionTable.msdos and \
                    len(sizes) > 4 and len(device.partitions) == 3:
                create_partition(device, Size.zero(), 4,
                                 PartitionType.extended, Unit.MebiByte, True)
                partition_type = PartitionType.logical
                partition_number_offset = 1

            partition_number = len(
                device.partitions) + 1 + partition_number_offset
            if create_partition(device, size, partition_number, partition_type,
                                Unit.MebiByte, True):
                new_part = Partition(device, partition_type, partition_number)
                dd = Dd().input("/dev/zero") \
                    .output(new_part.system_path) \
                    .count(1) \
                    .block_size(Size(1, Unit.Blocks4096)) \
                    .oflag("direct")
                dd.run()
                device.partitions.append(new_part)
Ejemplo n.º 5
0
def test_dirty_load():
    """
        title: Loading cache after dirty shutdown.
        description: Test for loading cache containing dirty data after DUT hard restart.
        pass_criteria:
          - DUT should reboot successfully.
          - Cache should load successfully.
    """
    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(2, Unit.GibiByte)] * 2)
        core_devices = core_disk.partitions

    with TestRun.step("Start cache in Write-Back mode and add cores."):
        cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
        cores = []
        for dev in core_devices:
            cores.append(cache.add_core(dev))

    with TestRun.step("Set cleaning policy to nop."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Populate cache with dirty data."):
        fio = Fio().create_command()\
            .size(Size(1, Unit.GibiByte))\
            .read_write(ReadWrite.randwrite)\
            .io_engine(IoEngine.libaio)\
            .block_size(Size(1, Unit.Blocks4096))
        for i, core in enumerate(cores):
            fio.add_job(f"core{i}").target(core.path)
        fio.run()

        if cache.get_dirty_blocks() <= Size.zero():
            TestRun.fail("Cache does not contain dirty data.")

    with TestRun.step("Remove one core without flushing dirty data."):
        casadm.remove_core_with_script_command(cache.cache_id, core.core_id,
                                               True)

    with TestRun.step("Reset platform."):
        power_control = TestRun.plugin_manager.get_plugin('power_control')
        power_control.power_cycle()

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)

        caches_num = len(casadm_parser.get_caches())
        if caches_num != 1:
            TestRun.LOGGER.error(
                f"Wrong number of caches. Expected 1, actual {caches_num}.")

        cores_num = len(casadm_parser.get_cores(cache.cache_id))
        if cores_num != 1:
            TestRun.LOGGER.error(
                f"Wrong number of cores. Expected 1, actual {cores_num}.")
def check_amount_of_dirty_data(devices_dirty_lines_before):
    for dev in devices_dirty_lines_before:
        if dev.get_status(
        ) == CoreStatus.active and dev.get_dirty_blocks() != Size.zero():
            TestRun.fail("Amount of dirty data is not 0.")
        if dev.get_status() == CoreStatus.inactive and \
                dev.get_dirty_blocks() != devices_dirty_lines_before[dev]:
            TestRun.fail("Data from inactive cache is flushed.")
Ejemplo n.º 7
0
def create_partition(
        device,
        part_size,
        part_number,
        part_type: PartitionType = PartitionType.primary,
        unit=Unit.MebiByte,
        aligned: bool = True):
    TestRun.LOGGER.info(
        f"Creating {part_type.name} partition on device: {device.path}")

    begin = get_first_partition_offset(device, aligned)
    for part in device.partitions:
        begin += part.size
        if part.type == PartitionType.logical:
            begin += Size(1, Unit.MebiByte if not aligned else device.block_size)

    if part_type == PartitionType.logical:
        begin += Size(1, Unit.MebiByte if not aligned else device.block_size)

    end = (begin + part_size) if part_size != Size.zero() else '100%'

    cmd = f'parted --script {device.path} mkpart ' \
          f'{part_type.name} ' \
          f'{begin.get_value(unit)}{unit_to_string(unit)} ' \
          f'{end.get_value(unit)}{unit_to_string(unit)}'
    output = TestRun.executor.run(cmd)

    if output.exit_code != 0:
        TestRun.executor.run_expect_success("partprobe")

    TestRun.executor.run_expect_success("udevadm settle")
    if not check_partition_after_create(
            part_size,
            part_number,
            device.path,
            part_type,
            aligned):
        raise Exception("Could not create partition!")

    if part_type != PartitionType.extended:
        from storage_devices.partition import Partition
        new_part = Partition(device,
                             part_type,
                             part_number,
                             begin,
                             end if type(end) is Size else device.size)
        dd = Dd().input("/dev/zero") \
                 .output(new_part.path) \
                 .count(1) \
                 .block_size(Size(1, Unit.Blocks4096)) \
                 .oflag("direct")
        dd.run()
        device.partitions.append(new_part)

    TestRun.LOGGER.info(f"Successfully created {part_type.name} partition on {device.path}")
Ejemplo n.º 8
0
def create_partition(device,
                     part_size,
                     part_number,
                     part_type: PartitionType = PartitionType.primary,
                     unit=Unit.MebiByte,
                     aligned: bool = True):
    TestRun.LOGGER.info(
        f"Creating {part_type.name} partition on device: {device.system_path}")

    begin = get_first_partition_offset(device, aligned).get_value(unit)
    for part in device.partitions:
        begin += part.size.get_value(unit)
        if part.type == PartitionType.logical:
            begin += Size(1, Unit.MebiByte if not aligned else
                          device.block_size).get_value(unit)

    if part_type == PartitionType.logical:
        begin += Size(1, Unit.MebiByte
                      if not aligned else device.block_size).get_value(unit)

    end = f'{begin + part_size.get_value(unit)}{unit_to_string(unit)}' \
        if part_size != Size.zero() else '100%'

    cmd = f'parted --script {device.system_path} mkpart ' \
          f'{part_type.name} {begin}{unit_to_string(unit)} {end}'
    output = TestRun.executor.run(cmd)

    if output.exit_code == 0:
        TestRun.executor.run("udevadm settle")
        if check_partition_after_create(part_size, part_number,
                                        device.system_path, part_type,
                                        aligned):
            TestRun.LOGGER.info(
                f"Successfully created partition on {device.system_path}")
            return True

    output = TestRun.executor.run("partprobe")
    if output.exit_code == 0:
        TestRun.executor.run("udevadm settle")
        if check_partition_after_create(part_size, part_number,
                                        device.system_path, part_type,
                                        aligned):
            TestRun.LOGGER.info(
                f"Successfully created partition on {device.system_path}")
            return True

    raise Exception(
        f"Could not create partition: {output.stderr}\n{output.stdout}")
Ejemplo n.º 9
0
def create_partitions(device,
                      sizes: [],
                      partition_table_type=PartitionTable.gpt):
    create_partition_table(device, partition_table_type)
    partition_type = PartitionType.primary
    partition_number_offset = 0

    for s in sizes:
        size = Size(
            s.get_value(device.block_size) - device.block_size.value,
            device.block_size)
        if partition_table_type == PartitionTable.msdos and \
                len(sizes) > 4 and len(device.partitions) == 3:
            create_partition(device, Size.zero(), 4, PartitionType.extended)
            partition_type = PartitionType.logical
            partition_number_offset = 1

        partition_number = len(device.partitions) + 1 + partition_number_offset
        create_partition(device, size, partition_number, partition_type,
                         Unit.MebiByte, True)
Ejemplo n.º 10
0
def test_recovery_flush_reset_raw(cache_mode):
    """
        title: Recovery after reset during cache flushing - test on raw device.
        description: |
          Verify that unflushed data can be safely recovered, when reset was pressed during
          data flushing on raw device.
        pass_criteria:
          - CAS recovers successfully after reboot
          - No data corruption
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_disk.create_partitions([Size(16, Unit.GibiByte)] * 2)
        cache_device = cache_disk.partitions[0]
        core_device = core_disk.partitions[0]
        core_device_link = core_device.get_device_link("/dev/disk/by-id")
        cache_device_link = cache_device.get_device_link("/dev/disk/by-id")

    with TestRun.step("Create test files."):
        source_file, target_file = create_test_files(test_file_size)

    with TestRun.step("Setup cache and add core."):
        cache = casadm.start_cache(cache_device, cache_mode)
        core = cache.add_core(core_device)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Copy file to CAS."):
        copy_file(source=source_file.full_path,
                  target=core.path,
                  size=test_file_size,
                  direct="oflag")

    with TestRun.step("Sync and flush buffers."):
        os_utils.sync()
        output = TestRun.executor.run(f"hdparm -f {core.path}")
        if output.exit_code != 0:
            raise CmdException("Error during hdparm", output)

    with TestRun.step("Trigger flush."):
        TestRun.executor.run_in_background(
            cli.flush_cache_cmd(f"{cache.cache_id}"))

    with TestRun.step("Hard reset DUT during data flushing."):
        power_cycle_dut(wait_for_flush_begin=True, core_device=core_device)
        cache_device.path = cache_device_link.get_target()
        core_device.path = core_device_link.get_target()

    with TestRun.step(
            "Copy file from core and check if current md5sum is different than "
            "before restart."):
        copy_file(source=core_device_link.get_target(),
                  target=target_file.full_path,
                  size=test_file_size,
                  direct="iflag")
        compare_files(source_file, target_file, should_differ=True)

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_device)
        if cache.get_dirty_blocks() == Size.zero():
            TestRun.fail("There are no dirty blocks on cache device.")

    with TestRun.step("Stop cache with dirty data flush."):
        core_writes_before = core_device.get_io_stats().sectors_written
        cache.stop()
        if core_writes_before >= core_device.get_io_stats().sectors_written:
            TestRun.fail(
                "No data was flushed after stopping cache started with load option."
            )

    with TestRun.step(
            "Copy test file from core device to temporary location. "
            "Compare it with the first version – they should be the same."):
        copy_file(source=core_device_link.get_target(),
                  target=target_file.full_path,
                  size=test_file_size,
                  direct="iflag")
        compare_files(source_file, target_file)

    with TestRun.step("Cleanup core device and remove test files."):
        target_file.remove()
        source_file.remove()
Ejemplo n.º 11
0
def test_recovery_flush_reset_fs(cache_mode, fs):
    """
        title: Recovery after reset during cache flushing - test on filesystem.
        description: |
          Verify that unflushed data can be safely recovered, when reset was pressed during
          data flushing on filesystem.
        pass_criteria:
          - CAS recovers successfully after reboot
          - No data corruption
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_disk.create_partitions([Size(16, Unit.GibiByte)] * 2)
        cache_device = cache_disk.partitions[0]
        core_device = core_disk.partitions[0]
        core_device_link = core_device.get_device_link("/dev/disk/by-id")
        cache_device_link = cache_device.get_device_link("/dev/disk/by-id")

    with TestRun.step(f"Create {fs} filesystem on core."):
        core_device.create_filesystem(fs)

    with TestRun.step("Create test files."):
        source_file, target_file = create_test_files(test_file_size)

    with TestRun.step("Setup cache and add core."):
        cache = casadm.start_cache(cache_device, cache_mode)
        Udev.disable()
        core = cache.add_core(core_device)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Mount CAS device."):
        core.mount(mount_point)

    with TestRun.step("Copy file to CAS."):
        copy_file(source=source_file.full_path,
                  target=os.path.join(mount_point, "source_test_file"),
                  size=test_file_size,
                  direct="oflag")

    with TestRun.step("Unmount CAS device."):
        core.unmount()

    with TestRun.step("Trigger flush."):
        TestRun.executor.run_in_background(
            cli.flush_cache_cmd(f"{cache.cache_id}"))

    with TestRun.step("Hard reset DUT during data flushing."):
        power_cycle_dut(True, core_device)
        cache_device.path = cache_device_link.get_target()
        core_device.path = core_device_link.get_target()

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_device)
        if cache.get_dirty_blocks() == Size.zero():
            TestRun.fail("There are no dirty blocks on cache device.")

    with TestRun.step("Stop cache with dirty data flush."):
        core_writes_before = core_device.get_io_stats().sectors_written
        cache.stop()
        if core_writes_before >= core_device.get_io_stats().sectors_written:
            TestRun.fail(
                "No data was flushed after stopping cache started with load option."
            )

    with TestRun.step("Mount core device."):
        core_device.mount(mount_point)

    with TestRun.step(
            "Copy test file from core device to temporary location. "
            "Compare it with the first version – they should be the same."):
        copy_file(source=os.path.join(mount_point, "source_test_file"),
                  target=target_file.full_path,
                  size=test_file_size,
                  direct="iflag")
        compare_files(source_file, target_file)

    with TestRun.step("Unmount core device and remove test files."):
        core_device.unmount()
        target_file.remove()
        source_file.remove()
        Udev.enable()
Ejemplo n.º 12
0
def test_acp_functional(cache_mode):
    """
        title: Validate ACP behavior.
        description: |
          Validate that ACP is cleaning dirty data from chunks bucket - sorted by number of
          dirty pages.
        pass_criteria:
          - All chunks are cleaned in proper order
    """
    chunks_count = 8
    chunk_size = Size(100, Unit.MebiByte)
    chunk_list = []

    def sector_in_chunk(chunk, blktrace_header):
        sector_to_size = Size(blktrace_header.sector_number, Unit.Blocks512)
        return chunk.offset <= sector_to_size < chunk.offset + chunk_size

    def get_header_chunk(bucket_chunks, blktrace_header):
        return next(
            (c for c in bucket_chunks if sector_in_chunk(c, blktrace_header)),
            None)

    def sector_in_tested_region(blktrace_header, list_of_chunks):
        return any(
            [sector_in_chunk(c, blktrace_header) for c in list_of_chunks])

    with TestRun.step("Prepare devices."):
        cache_device = TestRun.disks['cache']
        core_device = TestRun.disks['core']
        cache_device.create_partitions([chunk_size * chunks_count])
        cache_device = cache_device.partitions[0]

    with TestRun.step("Start cache in WB mode, set cleaning policy to NOP "
                      "and add whole disk as core."):
        cache = casadm.start_cache(cache_device, cache_mode)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        core = cache.add_core(core_device)

    with TestRun.step(
            "Run separate random writes with random amount of data on every "
            "100 MiB part of CAS device."):
        Chunk = namedtuple('Chunk', 'offset writes_size')
        random_chunk_writes = random.sample(range(1, 101), chunks_count)
        for i in range(chunks_count):
            c = Chunk(chunk_size * i,
                      Size(random_chunk_writes[i], Unit.MebiByte))
            chunk_list.append(c)

        fio = (Fio().create_command().io_engine(IoEngine.sync).read_write(
            ReadWrite.randwrite).direct().size(chunk_size).block_size(
                Size(1, Unit.Blocks4096)).target(f"{core.path}"))
        for chunk in chunk_list:
            fio.add_job().offset(chunk.offset).io_size(chunk.writes_size)
        fio.run()

        dirty_blocks = cache.get_dirty_blocks()
        if dirty_blocks == Size.zero():
            TestRun.fail("No dirty data on cache after IO.")
        TestRun.LOGGER.info(str(cache.get_statistics()))

    with TestRun.step(
            "Switch cleaning policy to ACP and start blktrace monitoring."):
        trace = BlkTrace(core.core_device, BlkTraceMask.write)
        trace.start_monitoring()

        initial_dirty_blocks = cache.get_dirty_blocks()
        cache.set_cleaning_policy(CleaningPolicy.acp)
        while cache.get_dirty_blocks() > Size.zero():
            time.sleep(10)
            if cache.get_dirty_blocks() == initial_dirty_blocks:
                TestRun.fail(
                    f"No data flushed in 10s.\n{str(cache.get_statistics())}")
            initial_dirty_blocks = cache.get_dirty_blocks()

        TestRun.LOGGER.info(str(cache.get_statistics()))

        action_kind = ActionKind.IoHandled
        output = trace.stop_monitoring()
        blktrace_output = [
            h for h in output
            if h.action == action_kind and RwbsKind.F not in h.rwbs
        ]

        if not blktrace_output:
            TestRun.fail(f"No {action_kind.name} entries in blktrace output!")
        TestRun.LOGGER.debug(
            f"Blktrace headers count: {len(blktrace_output)}.")

    with TestRun.step(
            "Using blktrace verify that cleaning thread cleans data from "
            "all CAS device parts in proper order."):
        all_writes_ok = True
        last_sector = None
        max_percent = 100
        bucket_chunks = []
        current_chunk = None

        for header in blktrace_output:
            # Sector not in current chunk - search for the next chunk
            if current_chunk is None or \
                    not sector_in_chunk(current_chunk, header):
                # Search for bucket with chunks that contain most dirty data
                while not bucket_chunks and max_percent > 0:
                    bucket_chunks = [
                        chunk for chunk in chunk_list
                        if max_percent >= chunk.writes_size.get_value(
                            Unit.MebiByte) > max_percent - 10
                    ]
                    max_percent -= 10

                if not bucket_chunks:
                    TestRun.fail(
                        f"No chunks left for sector {header.sector_number} "
                        f"({Size(header.sector_number, Unit.Blocks512)}).")

                # Get chunk within current bucket where current header sector is expected
                chunk = get_header_chunk(bucket_chunks, header)
                if not chunk:
                    TestRun.LOGGER.error(
                        f"Sector {header.sector_number} "
                        f"({Size(header.sector_number, Unit.Blocks512)}) "
                        f"not in current bucket.")
                    all_writes_ok = False
                    if not sector_in_tested_region(header, chunk_list):
                        TestRun.LOGGER.error(
                            f"Sector {header.sector_number} "
                            f"({Size(header.sector_number, Unit.Blocks512)}) "
                            f"outside of any tested chunk.")
                    continue

                # Set new chunk as current
                if current_chunk:
                    TestRun.LOGGER.info(f"Writes to chunk: {write_counter}")
                current_chunk = chunk
                write_counter = 1
                bucket_chunks.remove(chunk)
                last_sector = header.sector_number
                TestRun.LOGGER.debug(
                    f"First written sector in new chunk: {header.sector_number} "
                    f"({Size(header.sector_number, Unit.Blocks512)})")
                continue

            # Sector in current chunk - check sequential order
            if last_sector is None or header.sector_number >= last_sector:
                last_sector = header.sector_number
            else:
                TestRun.LOGGER.error(
                    f"Sectors in chunk <{current_chunk.offset}, "
                    f"{str(current_chunk.offset + chunk_size)}) written in bad "
                    f"order - sector {header.sector_number} ("
                    f"{Size(header.sector_number, Unit.Blocks512)}) after sector "
                    f"{last_sector} ({Size(last_sector, Unit.Blocks512)})")
                all_writes_ok = False
            write_counter += 1
        TestRun.LOGGER.info(f"Writes to chunk: {write_counter}")

        if all_writes_ok:
            TestRun.LOGGER.info("All sectors written in proper order.")
Ejemplo n.º 13
0
def test_multistream_seq_cutoff_functional(threshold, streams_number):
    """
    title: Functional test for multistream sequential cutoff
    description: |
        Testing if amount of data written to cache and core is correct after running sequential
        writes from multiple streams with different sequential cut-off thresholds.
    pass_criteria:
        - Amount of data written to cache is equal to amount set with sequential cutoff threshold
        - Amount of data written in pass-through is equal to io size run after reaching the
        sequential cutoff threshold
    """
    with TestRun.step("Start cache and add core device."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']

        cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
        Udev.disable()
        core = cache.add_core(core_disk)

    with TestRun.step(
            f"Set seq-cutoff policy to always, threshold to {threshold} "
            f"and reset statistics counters."):
        core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
        core.set_seq_cutoff_threshold(threshold)
        core.reset_counters()

    with TestRun.step(
            f"Run {streams_number} I/O streams with amount of sequential writes equal to "
            f"seq-cutoff threshold value minus one 4k block."):
        kib_between_streams = 100
        range_step = int(threshold.get_value(
            Unit.KibiByte)) + kib_between_streams
        max_range_offset = streams_number * range_step

        offsets = [o for o in range(0, max_range_offset, range_step)]
        core_statistics_before = core.get_statistics()

        for i in TestRun.iteration(range(0, len(offsets))):
            TestRun.LOGGER.info(
                f"Statistics before I/O:\n{core_statistics_before}")

            offset = Size(offsets[i], Unit.KibiByte)
            run_dd(core.system_path,
                   count=int(threshold.get_value(Unit.Blocks4096) - 1),
                   seek=int(offset.get_value(Unit.Blocks4096)))

            core_statistics_after = core.get_statistics()
            check_statistics(core_statistics_before,
                             core_statistics_after,
                             expected_pt=0,
                             expected_writes_to_cache=threshold -
                             Size(1, Unit.Blocks4096))
            core_statistics_before = core_statistics_after

    with TestRun.step(
            "Write random number of 4k block requests to each stream and check if all "
            "writes were sent in pass-through mode."):
        core_statistics_before = core.get_statistics()
        random.shuffle(offsets)

        for i in TestRun.iteration(range(0, len(offsets))):
            TestRun.LOGGER.info(
                f"Statistics before second I/O:\n{core_statistics_before}")
            additional_4k_blocks_writes = random.randint(
                1, kib_between_streams / 4)
            offset = Size(offsets[i], Unit.KibiByte)
            run_dd(core.system_path,
                   count=additional_4k_blocks_writes,
                   seek=int(
                       offset.get_value(Unit.Blocks4096) +
                       threshold.get_value(Unit.Blocks4096) - 1))

            core_statistics_after = core.get_statistics()
            check_statistics(core_statistics_before,
                             core_statistics_after,
                             expected_pt=additional_4k_blocks_writes,
                             expected_writes_to_cache=Size.zero())
            core_statistics_before = core_statistics_after
def test_stop_cache_with_inactive_devices():
    """
        title: Validate stopping cache with inactive CAS devices.
        description: |
          Validate that cache with inactive CAS devices cannot be stopped
          unless ‘force’ option is used.
        pass_criteria:
          - No kernel error
          - Stopping cache with inactive CAS devices without ‘force’ option is blocked.
          - Stopping cache with inactive CAS devices with ‘force’ option is successful.
    """
    with TestRun.step("Prepare devices."):
        devices = prepare_devices([("cache", 1), ("core", 1)])
        cache_dev = devices["cache"].partitions[0]
        core_dev = devices["core"].partitions[0]
        plug_device = devices["core"]
    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode=CacheMode.WB,
                                   force=True)
        core = cache.add_core(core_dev)
    with TestRun.step(
            "Create init config file using current CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()
    with TestRun.step(
            "Run random writes and verify that CAS device contains dirty data."
    ):
        run_fio([core.system_path])
        if core.get_dirty_blocks() == Size.zero():
            TestRun.fail("There is no dirty data on core device.")
    with TestRun.step("Stop cache without flushing dirty data."):
        cache.stop(no_data_flush=True)
    with TestRun.step("Unplug core disk."):
        plug_device.unplug()
    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)
    with TestRun.step(
            "Verify that previously created CAS device is listed with proper status."
    ):
        core_status = core.get_status()
        if core_status != CoreStatus.inactive:
            TestRun.fail(
                f"CAS device should be in inactive state. Actual status: {core_status}."
            )
    with TestRun.step(
            "Try stopping cache without ‘no data flush’ option, verify that operation "
            "was blocked and proper message is displayed."):
        try_stop_incomplete_cache(cache)
    with TestRun.step("Stop cache with force option."):
        cache.stop(no_data_flush=True)
    with TestRun.step("Plug missing core device."):
        plug_device.plug()
    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)
    with TestRun.step("Stop cache with flushing dirty data."):
        cache.stop()
    with TestRun.step("Unplug core device."):
        plug_device.unplug()
    with TestRun.step("Load cache and verify core status is inactive."):
        cache = casadm.load_cache(cache_dev)
        core_status = core.get_status()
        if core_status != CoreStatus.inactive:
            TestRun.fail(
                f"CAS device should be in inactive state. Actual state: {core_status}."
            )
    with TestRun.step(
            "Try stopping cache without ‘no data flush’ option, verify that "
            "operation was blocked and proper message is displayed."):
        try_stop_incomplete_cache(cache)
    with TestRun.step(
            "Stop cache with 'no data flush' option and plug missing core device."
    ):
        cache.stop(no_data_flush=True)
        plug_device.plug()
def test_remove_inactive_devices():
    """
        title: Validate removing inactive CAS devices.
        description: |
          Validate that it is possible to remove inactive CAS devices when there are no dirty
          cache lines associated with them and that removing CAS devices is prevented otherwise
          (unless ‘force’ option is used).
        pass_criteria:
          - No kernel error
          - Removing CAS devices without dirty data is successful.
          - Removing CAS devices with dirty data without ‘force’ option is blocked.
          - Removing CAS devices with dirty data with ‘force’ option is successful.
    """
    with TestRun.step("Prepare devices."):
        devices = prepare_devices([("cache", 1), ("core", 4)])
        cache_dev = devices["cache"].partitions[0]
        core_devs = devices["core"].partitions
        plug_device = devices["core"]
    with TestRun.step("Start cache and add four cores."):
        cache = casadm.start_cache(cache_dev,
                                   cache_mode=CacheMode.WB,
                                   force=True)
        cores = []
        for d in core_devs:
            cores.append(cache.add_core(d))
    with TestRun.step(
            "Create init config file using current CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()
    with TestRun.step("Run random writes to all CAS devices."):
        run_fio([c.system_path for c in cores])
    with TestRun.step(
            "Flush dirty data from two CAS devices and verify than other two "
            "contain dirty data."):
        for core in cores:
            if core.core_id % 2 == 0:
                core.flush_core()
                if core.get_dirty_blocks() != Size.zero():
                    TestRun.fail("Failed to flush CAS device.")
            elif core.get_dirty_blocks() == Size.zero():
                TestRun.fail("There should be dirty data on CAS device.")
    with TestRun.step("Stop cache without flushing dirty data."):
        cache.stop(no_data_flush=True)
    with TestRun.step("Unplug core disk."):
        plug_device.unplug()
    with TestRun.step("Load cache."):
        casadm.load_cache(cache_dev)
    with TestRun.step(
            "Verify that all previously created CAS devices are listed with "
            "proper status."):
        for core in cores:
            if core.get_status() != CoreStatus.inactive:
                TestRun.fail(f"Each core should be in inactive state. "
                             f"Actual states:\n{casadm.list_caches().stdout}")
    with TestRun.step(
            "Try removing CAS device without ‘force’ option. Verify that for "
            "dirty CAS devices operation is blocked, proper message is displayed "
            "and device is still listed."):
        shuffle(cores)
        for core in cores:
            try:
                dirty_blocks = core.get_dirty_blocks()
                core.remove_core()
                if dirty_blocks != Size.zero():
                    TestRun.fail(
                        "Removing dirty CAS device should be impossible but remove "
                        "command executed without any error.")
                TestRun.LOGGER.info(
                    "Removing core with force option skipped for clean CAS device."
                )
            except CmdException as e:
                if dirty_blocks == Size.zero():
                    TestRun.fail(
                        "Removing clean CAS device should be possible but remove "
                        "command returned an error.")
                TestRun.LOGGER.info(
                    "Remove operation without force option is blocked for "
                    "dirty CAS device as expected.")
                cli_messages.check_stderr_msg(
                    e.output, cli_messages.remove_inactive_core)
                output = casadm.list_caches().stdout
                if core.system_path not in output:
                    TestRun.fail(
                        f"CAS device is not listed in casadm list output but it should be."
                        f"\n{output}")
                core.remove_core(force=True)
    with TestRun.step("Plug missing disk and stop cache."):
        plug_device.plug()
        casadm.stop_all_caches()