예제 #1
0
def test_udev_core_partition():
    """
        title: |
          CAS udev rule execution after re-attaching partitions existing in configuration file as
          cores.
        description: |
          Verify if CAS udev rule is executed when partitions existing in CAS configuration file
          as cores are being attached.
        pass_criteria:
          - No kernel error
          - Created partitions are added to core pool after attaching core drive.
    """
    cores_count = 4
    with TestRun.step(
            "Create four partitions on core device and one on cache device."):
        cache_disk = TestRun.disks["cache"]
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        core_disk = TestRun.disks["core"]
        core_disk.create_partitions([Size(2, Unit.GibiByte)] * cores_count)
        core_devices = core_disk.partitions
    with TestRun.step("Start cache and add created partitions as cores."):
        cache = casadm.start_cache(cache_dev, force=True)
        for dev in core_devices:
            cache.add_core(dev)
    with TestRun.step("Create init config from running CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()
    with TestRun.step("Stop cache."):
        cache.stop()
    with TestRun.step("Detach core disk."):
        core_disk.unplug()
    with TestRun.step("Plug missing core disk."):
        core_disk.plug()
    with TestRun.step(
            "List cache devices and check that created partitions are present "
            "in core pool."):
        for dev in core_devices:
            check_if_dev_in_core_pool(dev)
def test_stress_start(cache_mode):
    """
        title: Stress test for starting and stopping cache.
        description: Validate the ability of CAS to start and stop cache in the loop.
        pass_criteria:
          - No system crash while starting and stopping cache in the loop.
          - Cache starts and stops successfully.
    """
    with TestRun.step("Prepare cache and core."):
        cache_dev, core_dev = prepare()

    for _ in TestRun.iteration(
            range(0, iterations_per_config),
            f"Start and stop CAS {iterations_per_config} times."):
        with TestRun.step("Start cache."):
            cache = casadm.start_cache(cache_dev, cache_mode, force=True)
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(
                    f"Expected caches count: 1; Actual caches count: {caches_count}."
                )
        with TestRun.step("Add core."):
            cache.add_core(core_dev)
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.fail(
                    f"Expected cores count: 1; Actual cores count: {cores_count}."
                )
        with TestRun.step("Stop cache."):
            cache.stop()
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 0:
                TestRun.fail(
                    f"Expected caches count: 0; Actual caches count: {caches_count}."
                )

    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
예제 #3
0
def test_load_cache_with_inactive_core():
    """
        title: Load cache with unavailable core devices.
        description: Check if it is possible to load cache with unavailable core devices.
        pass_criteria:
          - No kernel error
          - It is possible to perform cache load operation with unavailable devices.
          - Warning message about not available core device should appear.
          - Cache status should change to active after plugging missing core device.
    """
    with TestRun.step("Prepare devices."):
        devices = prepare_devices([("cache", 1), ("core", 1)])
        cache_dev = devices["cache"].partitions[0]
        core_dev = devices["core"].partitions[0]
        plug_device = devices["core"]
    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_dev, force=True)
        core = cache.add_core(core_dev)
    with TestRun.step(
            "Create init config file using current CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()
    with TestRun.step("Stop cache."):
        cache.stop()
    with TestRun.step("Unplug core device."):
        plug_device.unplug()
    with TestRun.step("Load cache."):
        output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path))
        cli_messages.check_stderr_msg(output,
                                      cli_messages.load_inactive_core_missing)
    with TestRun.step("Plug missing device and stop cache."):
        plug_device.plug()
        core.wait_for_status_change(CoreStatus.active)
        cache_status = cache.get_status()
        if cache_status != CacheStatus.running:
            TestRun.fail(
                f"Cache did not change status to 'running' after plugging core device. "
                f"Actual state: {cache_status}.")
        cache.stop()
예제 #4
0
def test_cas_init_with_changed_mode(cache_mode_pair):
    """
    title: Check starting cache in other cache mode by initializing OpenCAS service from config.
    description: |
      Start cache, create config based on running configuration but with another cache mode,
      reinitialize OpenCAS service with '--force' option and check if cache defined
      in config file starts properly.
      Check all cache modes.
    pass_criteria:
      - Cache starts with attached core
      - Cache starts in mode saved in configuration file.
    """
    with TestRun.step("Prepare partitions for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(200, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(400, Unit.MebiByte)])
        core_part = core_dev.partitions[0]

    with TestRun.step(f"Start cache in the {cache_mode_pair[0]} mode and add core."):
        cache = casadm.start_cache(cache_part, cache_mode_pair[0], force=True)
        core = cache.add_core(core_part)

    with TestRun.step(
            f"Create the configuration file with a different cache mode ({cache_mode_pair[1]})"
    ):
        init_conf = InitConfig()
        init_conf.add_cache(cache.cache_id, cache.cache_device, cache_mode_pair[1])
        init_conf.add_core(cache.cache_id, core.core_id, core.core_device)
        init_conf.save_config_file()

    with TestRun.step("Reinitialize OpenCAS service with '--force' option."):
        casadm.stop_all_caches()
        casctl.init(True)

    with TestRun.step("Check if cache started in correct mode with core attached."):
        validate_cache(cache_mode_pair[1])
예제 #5
0
def test_another_core_with_same_id():
    """
        title: Test for creating another core device with the same ID.
        description: |
          Checking if adding another core device and setting
          the same core ID as the previous one fails.
        pass_criteria:
          - No additional core device added.
    """
    with TestRun.step("Start cache device"):
        cache_dev = TestRun.disks["cache"]
        cache_dev.create_partitions([Size(2, Unit.GibiByte)])
        cache = casadm.start_cache(cache_dev.partitions[0], force=True)

    with TestRun.step("Add core with ID = 1"):
        core_dev_1 = TestRun.disks["core_1"]
        core_dev_1.create_partitions([Size(1, Unit.GibiByte)])
        TestRun.executor.run_expect_success(
            cli.add_core_cmd(
                cache_id=f"{cache.cache_id}",
                core_dev=f"{core_dev_1.partitions[0].system_path}",
                core_id="1",
            )
        )

    with TestRun.step("Try to add another core with the same ID = 1"):
        core_dev_2 = TestRun.disks["core_2"]
        core_dev_2.create_partitions([Size(1, Unit.GibiByte)])
        TestRun.executor.run_expect_fail(
            cli.add_core_cmd(
                cache_id=f"{cache.cache_id}",
                core_dev=f"{core_dev_2.partitions[0].system_path}",
                core_id="1",
            )
        )

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
예제 #6
0
def test_write_fetch_full_misses(cache_mode, cache_line_size):
    """
        title: No caching of full write miss operations with block size smaller than cache line size
        description: |
          Validate CAS ability to not cache entire cache line size for full write miss operations
          when block size is smaller than cache line size – no fetch for writes
        pass_criteria:
          - Appropriate number of write full misses and writes to cache in cache statistics
          - Appropriate number of writes to cache in iostat
    """
    io_size = Size(300, Unit.MebiByte)

    with TestRun.step("Start cache and add core."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache = casadm.start_cache(cache_disk, cache_mode, cache_line_size)
        Udev.disable()
        core = cache.add_core(core_disk)
    with TestRun.step("Run writes to CAS device using fio."):
        io_stats_before_io = cache_disk.get_io_stats()
        blocksize = cache_line_size.value / 2
        skip_size = cache_line_size.value / 2
        run_fio(target=core.system_path,
                operation_type=ReadWrite.write,
                skip=skip_size,
                blocksize=blocksize,
                io_size=io_size)
    with TestRun.step("Verify CAS statistics for write full misses and writes to cache."):
        check_statistics(cache=cache, blocksize=blocksize, skip_size=skip_size, io_size=io_size)
    with TestRun.step("Verify number of writes to cache device using iostat. Shall be half of "
                      f"io size ({str(io_size / 2)}) + metadata for WB."):
        check_io_stats(cache_disk=cache_disk,
                       cache=cache,
                       io_stats_before=io_stats_before_io,
                       io_size=io_size,
                       blocksize=blocksize,
                       skip_size=skip_size)
def test_stress_load(cache_mode):
    """
        title: Stress test for stopping and loading CAS device.
        description: |
          Validate the ability of the CAS to load and stop cache in the loop
          using different cache modes.
        pass_criteria:
          - No system crash while stop and load cache in the loop.
          - CAS device loads successfully.
    """
    with TestRun.step("Prepare cache and core."):
        cache_dev, core_dev = prepare()
    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        casadm.add_core(cache, core_dev)

    for _ in TestRun.iteration(
            range(0, iterations_per_config),
            f"Stop cache and load it {iterations_per_config} times."):
        with TestRun.step("Stop cache."):
            casadm.stop_cache(cache.cache_id)
            if len(casadm_parser.get_caches()) != 0:
                TestRun.fail("Cache did not stop successfully.")
        with TestRun.step("Load cache."):
            casadm.load_cache(cache_dev)
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(
                    f"Cache did not load successfully - wrong number of caches: {caches_count}."
                )
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.LOGGER.error(
                    f"Cache loaded with wrong cores count: {cores_count}.")

    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
def prepare_configuration(cache_mode, cache_line_size):
    cache_device = TestRun.disks["cache"]
    core_device = TestRun.disks["core"]

    cache_device.create_partitions([Size(70, Unit.MebiByte)])
    core_device.create_partitions(
        [Size(70, Unit.MebiByte), Size(70, Unit.MebiByte)]
    )
    core1 = core_device.partitions[0]
    core2 = core_device.partitions[1]

    error_device = ErrorDevice(
        "error",
        core1,
        DmTable.uniform_error_table(
            start_lba=0,
            stop_lba=int(core1.size.get_value(Unit.Blocks512)),
            num_error_zones=100,
            error_zone_size=Size(5, Unit.Blocks512),
        ).fill_gaps(core1),
    )

    cache = casadm.start_cache(
        cache_device.partitions[0],
        cache_mode=cache_mode,
        cache_line_size=cache_line_size,
        force=True,
    )
    cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    cache.set_cleaning_policy(CleaningPolicy.nop)

    Udev.disable()
    error_core = cache.add_core(core_dev=error_device)
    good_core = cache.add_core(core_dev=core2)

    return cache, error_core, good_core
예제 #9
0
def prepare(cache_mode: CacheMode):
    base_prepare()
    ioclass_config.remove_ioclass_config()
    cache_device = next(disk for disk in TestProperties.dut.disks
                        if disk.disk_type in [DiskType.optane, DiskType.nand])
    core_device = next(disk for disk in TestProperties.dut.disks
                       if (disk.disk_type.value > cache_device.disk_type.value
                           and disk != cache_device))

    cache_device.create_partitions([Size(500, Unit.MebiByte)])
    core_device.create_partitions([
        Size(1, Unit.GibiByte),
        Size(1, Unit.GibiByte),
        Size(1, Unit.GibiByte)
    ])

    cache_device = cache_device.partitions[0]
    core_device_1 = core_device.partitions[0]
    core_device_2 = core_device.partitions[1]
    core_device_3 = core_device.partitions[2]

    Udev.disable()

    TestProperties.LOGGER.info(f"Starting cache")
    cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
    TestProperties.LOGGER.info(f"Setting cleaning policy to NOP")
    casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
    TestProperties.LOGGER.info(f"Adding core devices")
    core_1 = cache.add_core(core_dev=core_device_1)
    core_2 = cache.add_core(core_dev=core_device_2)
    core_3 = cache.add_core(core_dev=core_device_3)

    output = TestProperties.executor.execute(f"mkdir -p {mountpoint}")
    if output.exit_code != 0:
        raise Exception(f"Failed to create mountpoint")

    return cache, [core_1, core_2, core_3]
예제 #10
0
def test_stress_attach_detach():
    """
        title: Stress test for attaching and detaching multiple core devices.
        description: |
          Validate the ability of CAS to attach and detach core devices using script commands.
        pass_criteria:
          - No system crash.
          - Core devices are successfully attached and detached.
    """
    iterations_number = 50

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(100, Unit.MebiByte)])
        cache_part = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(5, Unit.GibiByte)] * 8)
        core_devices = core_disk.partitions

    with TestRun.step("Start cache."):
        cache = casadm.start_cache(cache_part)
        cores = []
        for dev in core_devices:
            cores.append(cache.add_core(dev))

    with TestRun.step("Attach and detach core devices in a loop."):
        for _ in TestRun.iteration(range(0, iterations_number)):
            TestRun.LOGGER.info("Detaching all core devices.")
            for core in cores:
                casadm.detach_core(cache.cache_id, core.core_id)

            random.shuffle(cores)

            TestRun.LOGGER.info("Attaching all core devices.")
            for core in cores:
                casadm.try_add(core.core_device, cache.cache_id, core.core_id)
예제 #11
0
def prepare(random_cls, cache_count=1, cores_per_cache=1):
    cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
    ioclass_config.remove_ioclass_config()

    cache_device = TestRun.disks['cache']
    core_device = TestRun.disks['core']

    cache_device.create_partitions([Size(500, Unit.MebiByte)] * cache_count)
    core_device.create_partitions([Size(2, Unit.GibiByte)] * cache_count * cores_per_cache)

    cache_devices = cache_device.partitions
    core_devices = core_device.partitions
    for core_device in core_devices:
        core_device.create_filesystem(Filesystem.ext4)

    Udev.disable()
    caches, cores = [], []
    for i, cache_device in enumerate(cache_devices):
        TestRun.LOGGER.info(f"Starting cache on {cache_device.path}")
        cache = casadm.start_cache(cache_device,
                                   force=True,
                                   cache_mode=cache_modes[i],
                                   cache_line_size=random_cls)
        caches.append(cache)
        TestRun.LOGGER.info("Setting cleaning policy to NOP")
        cache.set_cleaning_policy(CleaningPolicy.nop)
        for core_device in core_devices[i * cores_per_cache:(i + 1) * cores_per_cache]:
            TestRun.LOGGER.info(
                f"Adding core device {core_device.path} to cache {cache.cache_id}")
            core = cache.add_core(core_dev=core_device)
            core.reset_counters()
            cores.append(core)

    TestRun.executor.run_expect_success(f"mkdir -p {mountpoint}")

    return caches, cores
def prepare():
    cache_device = TestRun.disks["cache"]
    core_device = TestRun.disks["core"]

    cache_device.create_partitions([Size(500, Unit.MebiByte)])
    core_device.create_partitions([
        Size(1, Unit.GibiByte),
        Size(1, Unit.GibiByte),
        Size(1, Unit.GibiByte)
    ])

    cache_device = cache_device.partitions[0]
    core_device_1 = core_device.partitions[0]
    core_device_2 = core_device.partitions[1]
    core_device_3 = core_device.partitions[2]

    TestRun.LOGGER.info("Staring cache")
    cache = casadm.start_cache(cache_device, force=True)
    TestRun.LOGGER.info("Adding core device")
    core_1 = cache.add_core(core_dev=core_device_1)
    core_2 = cache.add_core(core_dev=core_device_2)
    core_3 = cache.add_core(core_dev=core_device_3)

    return cache, core_device
def test_stop_no_flush_load_cache(cache_mode, filesystem):
    """
        title: Test to check that 'stop --no-data-flush' command works correctly.
        description: |
          Negative test of the ability of CAS to load unflushed cache on core device
          with filesystem. Test uses lazy flush cache modes.
        pass_criteria:
          - No system crash while load cache.
          - Starting cache without loading metadata fails.
          - Starting cache with loading metadata finishes with success.
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_part, core_part = prepare()

    with TestRun.step("Start cache."):
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Change cleaning policy to NOP."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step(
            f"Add core with {filesystem.name} filesystem to cache and mount it."
    ):
        core_part.create_filesystem(filesystem)
        core = cache.add_core(core_part)
        core.mount(mount_point)

    with TestRun.step(
            f"Create test file in mount point of exported object and check its md5 sum."
    ):
        test_file = fs_utils.create_random_test_file(test_file_path,
                                                     Size(48, Unit.MebiByte))
        test_file_md5_before = test_file.md5sum()

    with TestRun.step("Unmount exported object."):
        core.unmount()

    with TestRun.step("Count dirty blocks on exported object."):
        dirty_blocks_before = core.get_dirty_blocks()

    with TestRun.step("Stop cache with option '--no-data-flush'."):
        cache.stop(no_data_flush=True)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 0:
            TestRun.fail(
                f"Expected caches count: 0; Actual caches count: {caches_count}."
            )

    with TestRun.step("Try to start cache without loading metadata."):
        output = TestRun.executor.run_expect_fail(
            cli.start_cmd(cache_dev=str(cache_part.path),
                          cache_mode=str(cache_mode.name.lower()),
                          force=False,
                          load=False))
        cli_messages.check_stderr_msg(
            output, cli_messages.start_cache_with_existing_metadata)

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache.cache_device)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(
                f"Expected caches count: 1 Actual caches count: {caches_count}."
            )
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 1:
            TestRun.fail(
                f"Expected cores count: 1; Actual cores count: {cores_count}.")

    with TestRun.step(
            "Compare dirty blocks number before and after loading cache."):
        if dirty_blocks_before != core.get_dirty_blocks():
            TestRun.LOGGER.error(
                "Dirty blocks number is different than before loading cache.")

    with TestRun.step("Mount exported object."):
        core.mount(mount_point)

    with TestRun.step(
            "Compare md5 sum of test file before and after loading cache."):
        if test_file_md5_before != test_file.md5sum():
            TestRun.LOGGER.error(
                "Test file's md5 sum is different than before loading cache.")

    with TestRun.step("Unmount exported object."):
        core.unmount()

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
def test_stop_no_flush_load_cache_no_fs(cache_mode):
    """
        title: Test to check that 'stop --no-data-flush' command works correctly.
        description: |
          Negative test of the ability of CAS to load unflushed cache on core device
          without filesystem. Test uses lazy flush cache modes.
        pass_criteria:
          - No system crash while load cache.
          - Starting cache without loading metadata fails.
          - Starting cache with loading metadata finishes with success.
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_part, core_part = prepare()

    with TestRun.step("Start cache with --force option."):
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Change cleaning policy to NOP."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Add core device without filesystem."):
        core_part.wipe_filesystem()
        core = cache.add_core(core_part)

    with TestRun.step("Fill exported object with data."):
        dd = (Dd().input("/dev/zero").output(core.path).block_size(
            Size(1, Unit.Blocks4096)).oflag("direct"))
        dd.run()

    with TestRun.step("Count dirty blocks on exported object."):
        dirty_blocks_before = core.get_dirty_blocks()

    with TestRun.step("Stop cache with option '--no-data-flush'."):
        cache.stop(no_data_flush=True)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 0:
            TestRun.fail(
                f"Expected caches count: 0; Actual caches count: {caches_count}."
            )

    with TestRun.step("Try to start cache without loading metadata."):
        output = TestRun.executor.run_expect_fail(
            cli.start_cmd(cache_dev=str(cache_part.path),
                          cache_mode=str(cache_mode.name.lower()),
                          force=False,
                          load=False))
        cli_messages.check_stderr_msg(
            output, cli_messages.start_cache_with_existing_metadata)

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache.cache_device)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(
                f"Expected caches count: 1 Actual caches count: {caches_count}."
            )
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 1:
            TestRun.fail(
                f"Expected cores count: 1; Actual cores count: {cores_count}.")

    with TestRun.step(
            "Compare dirty blocks number before and after loading cache."):
        if dirty_blocks_before != core.get_dirty_blocks():
            TestRun.LOGGER.error(
                "Dirty blocks number is different than before loading cache.")

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
예제 #15
0
def test_recovery_flush_reset_raw(cache_mode):
    """
        title: Recovery after reset during cache flushing - test on raw device.
        description: |
          Verify that unflushed data can be safely recovered, when reset was pressed during
          data flushing on raw device.
        pass_criteria:
          - CAS recovers successfully after reboot
          - No data corruption
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_disk.create_partitions([Size(16, Unit.GibiByte)] * 2)
        cache_device = cache_disk.partitions[0]
        core_device = core_disk.partitions[0]
        core_device_link = core_device.get_device_link("/dev/disk/by-id")
        cache_device_link = cache_device.get_device_link("/dev/disk/by-id")

    with TestRun.step("Create test files."):
        source_file, target_file = create_test_files(test_file_size)

    with TestRun.step("Setup cache and add core."):
        cache = casadm.start_cache(cache_device, cache_mode)
        core = cache.add_core(core_device)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Copy file to CAS."):
        copy_file(source=source_file.full_path,
                  target=core.path,
                  size=test_file_size,
                  direct="oflag")

    with TestRun.step("Sync and flush buffers."):
        os_utils.sync()
        output = TestRun.executor.run(f"hdparm -f {core.path}")
        if output.exit_code != 0:
            raise CmdException("Error during hdparm", output)

    with TestRun.step("Trigger flush."):
        TestRun.executor.run_in_background(
            cli.flush_cache_cmd(f"{cache.cache_id}"))

    with TestRun.step("Hard reset DUT during data flushing."):
        power_cycle_dut(wait_for_flush_begin=True, core_device=core_device)
        cache_device.path = cache_device_link.get_target()
        core_device.path = core_device_link.get_target()

    with TestRun.step(
            "Copy file from core and check if current md5sum is different than "
            "before restart."):
        copy_file(source=core_device_link.get_target(),
                  target=target_file.full_path,
                  size=test_file_size,
                  direct="iflag")
        compare_files(source_file, target_file, should_differ=True)

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_device)
        if cache.get_dirty_blocks() == Size.zero():
            TestRun.fail("There are no dirty blocks on cache device.")

    with TestRun.step("Stop cache with dirty data flush."):
        core_writes_before = core_device.get_io_stats().sectors_written
        cache.stop()
        if core_writes_before >= core_device.get_io_stats().sectors_written:
            TestRun.fail(
                "No data was flushed after stopping cache started with load option."
            )

    with TestRun.step(
            "Copy test file from core device to temporary location. "
            "Compare it with the first version – they should be the same."):
        copy_file(source=core_device_link.get_target(),
                  target=target_file.full_path,
                  size=test_file_size,
                  direct="iflag")
        compare_files(source_file, target_file)

    with TestRun.step("Cleanup core device and remove test files."):
        target_file.remove()
        source_file.remove()
예제 #16
0
def test_recovery_flush_reset_fs(cache_mode, fs):
    """
        title: Recovery after reset during cache flushing - test on filesystem.
        description: |
          Verify that unflushed data can be safely recovered, when reset was pressed during
          data flushing on filesystem.
        pass_criteria:
          - CAS recovers successfully after reboot
          - No data corruption
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_disk.create_partitions([Size(16, Unit.GibiByte)] * 2)
        cache_device = cache_disk.partitions[0]
        core_device = core_disk.partitions[0]
        core_device_link = core_device.get_device_link("/dev/disk/by-id")
        cache_device_link = cache_device.get_device_link("/dev/disk/by-id")

    with TestRun.step(f"Create {fs} filesystem on core."):
        core_device.create_filesystem(fs)

    with TestRun.step("Create test files."):
        source_file, target_file = create_test_files(test_file_size)

    with TestRun.step("Setup cache and add core."):
        cache = casadm.start_cache(cache_device, cache_mode)
        Udev.disable()
        core = cache.add_core(core_device)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Mount CAS device."):
        core.mount(mount_point)

    with TestRun.step("Copy file to CAS."):
        copy_file(source=source_file.full_path,
                  target=os.path.join(mount_point, "source_test_file"),
                  size=test_file_size,
                  direct="oflag")

    with TestRun.step("Unmount CAS device."):
        core.unmount()

    with TestRun.step("Trigger flush."):
        TestRun.executor.run_in_background(
            cli.flush_cache_cmd(f"{cache.cache_id}"))

    with TestRun.step("Hard reset DUT during data flushing."):
        power_cycle_dut(True, core_device)
        cache_device.path = cache_device_link.get_target()
        core_device.path = core_device_link.get_target()

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_device)
        if cache.get_dirty_blocks() == Size.zero():
            TestRun.fail("There are no dirty blocks on cache device.")

    with TestRun.step("Stop cache with dirty data flush."):
        core_writes_before = core_device.get_io_stats().sectors_written
        cache.stop()
        if core_writes_before >= core_device.get_io_stats().sectors_written:
            TestRun.fail(
                "No data was flushed after stopping cache started with load option."
            )

    with TestRun.step("Mount core device."):
        core_device.mount(mount_point)

    with TestRun.step(
            "Copy test file from core device to temporary location. "
            "Compare it with the first version – they should be the same."):
        copy_file(source=os.path.join(mount_point, "source_test_file"),
                  target=target_file.full_path,
                  size=test_file_size,
                  direct="iflag")
        compare_files(source_file, target_file)

    with TestRun.step("Unmount core device and remove test files."):
        core_device.unmount()
        target_file.remove()
        source_file.remove()
        Udev.enable()
예제 #17
0
def test_acp_param_wake_up_time(cache_line_size, cache_mode):
    """
        title: Functional test for ACP wake-up parameter.
        description: |
          Verify if interval between ACP cleaning iterations is not longer than
          wake-up time parameter value.
        pass_criteria:
          - ACP flush iterations are triggered with defined frequency.
    """
    with TestRun.step("Test prepare."):
        error_threshold_ms = 50
        generated_vals = get_random_list(
            min_val=FlushParametersAcp.acp_params_range().wake_up_time[0],
            max_val=FlushParametersAcp.acp_params_range().wake_up_time[1],
            n=10,
        )
        acp_configs = []
        for config in generated_vals:
            acp_configs.append(
                FlushParametersAcp(wake_up_time=Time(milliseconds=config)))
        acp_configs.append(FlushParametersAcp.default_acp_params())

    with TestRun.step("Prepare partitions."):
        core_size = Size(5, Unit.GibiByte)
        cache_device = TestRun.disks["cache"]
        core_device = TestRun.disks["core"]
        cache_device.create_partitions([Size(10, Unit.GibiByte)])
        core_device.create_partitions([core_size])

    with TestRun.step(
            f"Start cache in {cache_mode} with {cache_line_size} and add core."
    ):
        cache = casadm.start_cache(cache_device.partitions[0], cache_mode,
                                   cache_line_size)
        core = cache.add_core(core_device.partitions[0])

    with TestRun.step("Set cleaning policy to NOP."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Start IO in background."):
        fio = get_fio_cmd(core, core_size)
        fio.run_in_background()
        time.sleep(10)

    with TestRun.step("Set cleaning policy to ACP."):
        cache.set_cleaning_policy(CleaningPolicy.acp)

    with TestRun.group("Verify IO number for different wake_up_time values."):
        for acp_config in acp_configs:
            with TestRun.step(f"Setting {acp_config}"):
                cache.set_params_acp(acp_config)
                accepted_interval_threshold = (
                    acp_config.wake_up_time.total_milliseconds() +
                    error_threshold_ms)
            with TestRun.step(
                    "Using blktrace verify if interval between ACP cleaning iterations "
                    f"is shorter or equal than wake-up parameter value "
                    f"(including {error_threshold_ms}ms error threshold)"):
                blktrace = BlkTrace(core.core_device, BlkTraceMask.write)
                blktrace.start_monitoring()
                time.sleep(15)
                blktrace_output = blktrace.stop_monitoring()

                for (prev, curr) in zip(blktrace_output, blktrace_output[1:]):
                    if not new_acp_iteration(prev, curr):
                        continue

                    interval_ms = (curr.timestamp - prev.timestamp) / 10**6

                    if interval_ms > accepted_interval_threshold:
                        TestRun.LOGGER.error(
                            f"{interval_ms} is not within accepted range for "
                            f"{acp_config.wake_up_time.total_milliseconds()} "
                            f"wake_up_time param value.")

    with TestRun.step("Stop all caches"):
        kill_all_io()
        casadm.stop_all_caches()
예제 #18
0
def test_acp_functional(cache_mode):
    """
        title: Validate ACP behavior.
        description: |
          Validate that ACP is cleaning dirty data from chunks bucket - sorted by number of
          dirty pages.
        pass_criteria:
          - All chunks are cleaned in proper order
    """
    chunks_count = 8
    chunk_size = Size(100, Unit.MebiByte)
    chunk_list = []

    def sector_in_chunk(chunk, blktrace_header):
        sector_to_size = Size(blktrace_header.sector_number, Unit.Blocks512)
        return chunk.offset <= sector_to_size < chunk.offset + chunk_size

    def get_header_chunk(bucket_chunks, blktrace_header):
        return next(
            (c for c in bucket_chunks if sector_in_chunk(c, blktrace_header)),
            None)

    def sector_in_tested_region(blktrace_header, list_of_chunks):
        return any(
            [sector_in_chunk(c, blktrace_header) for c in list_of_chunks])

    with TestRun.step("Prepare devices."):
        cache_device = TestRun.disks['cache']
        core_device = TestRun.disks['core']
        cache_device.create_partitions([chunk_size * chunks_count])
        cache_device = cache_device.partitions[0]

    with TestRun.step("Start cache in WB mode, set cleaning policy to NOP "
                      "and add whole disk as core."):
        cache = casadm.start_cache(cache_device, cache_mode)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        core = cache.add_core(core_device)

    with TestRun.step(
            "Run separate random writes with random amount of data on every "
            "100 MiB part of CAS device."):
        Chunk = namedtuple('Chunk', 'offset writes_size')
        random_chunk_writes = random.sample(range(1, 101), chunks_count)
        for i in range(chunks_count):
            c = Chunk(chunk_size * i,
                      Size(random_chunk_writes[i], Unit.MebiByte))
            chunk_list.append(c)

        fio = (Fio().create_command().io_engine(IoEngine.sync).read_write(
            ReadWrite.randwrite).direct().size(chunk_size).block_size(
                Size(1, Unit.Blocks4096)).target(f"{core.path}"))
        for chunk in chunk_list:
            fio.add_job().offset(chunk.offset).io_size(chunk.writes_size)
        fio.run()

        dirty_blocks = cache.get_dirty_blocks()
        if dirty_blocks == Size.zero():
            TestRun.fail("No dirty data on cache after IO.")
        TestRun.LOGGER.info(str(cache.get_statistics()))

    with TestRun.step(
            "Switch cleaning policy to ACP and start blktrace monitoring."):
        trace = BlkTrace(core.core_device, BlkTraceMask.write)
        trace.start_monitoring()

        initial_dirty_blocks = cache.get_dirty_blocks()
        cache.set_cleaning_policy(CleaningPolicy.acp)
        while cache.get_dirty_blocks() > Size.zero():
            time.sleep(10)
            if cache.get_dirty_blocks() == initial_dirty_blocks:
                TestRun.fail(
                    f"No data flushed in 10s.\n{str(cache.get_statistics())}")
            initial_dirty_blocks = cache.get_dirty_blocks()

        TestRun.LOGGER.info(str(cache.get_statistics()))

        action_kind = ActionKind.IoHandled
        output = trace.stop_monitoring()
        blktrace_output = [
            h for h in output
            if h.action == action_kind and RwbsKind.F not in h.rwbs
        ]

        if not blktrace_output:
            TestRun.fail(f"No {action_kind.name} entries in blktrace output!")
        TestRun.LOGGER.debug(
            f"Blktrace headers count: {len(blktrace_output)}.")

    with TestRun.step(
            "Using blktrace verify that cleaning thread cleans data from "
            "all CAS device parts in proper order."):
        all_writes_ok = True
        last_sector = None
        max_percent = 100
        bucket_chunks = []
        current_chunk = None

        for header in blktrace_output:
            # Sector not in current chunk - search for the next chunk
            if current_chunk is None or \
                    not sector_in_chunk(current_chunk, header):
                # Search for bucket with chunks that contain most dirty data
                while not bucket_chunks and max_percent > 0:
                    bucket_chunks = [
                        chunk for chunk in chunk_list
                        if max_percent >= chunk.writes_size.get_value(
                            Unit.MebiByte) > max_percent - 10
                    ]
                    max_percent -= 10

                if not bucket_chunks:
                    TestRun.fail(
                        f"No chunks left for sector {header.sector_number} "
                        f"({Size(header.sector_number, Unit.Blocks512)}).")

                # Get chunk within current bucket where current header sector is expected
                chunk = get_header_chunk(bucket_chunks, header)
                if not chunk:
                    TestRun.LOGGER.error(
                        f"Sector {header.sector_number} "
                        f"({Size(header.sector_number, Unit.Blocks512)}) "
                        f"not in current bucket.")
                    all_writes_ok = False
                    if not sector_in_tested_region(header, chunk_list):
                        TestRun.LOGGER.error(
                            f"Sector {header.sector_number} "
                            f"({Size(header.sector_number, Unit.Blocks512)}) "
                            f"outside of any tested chunk.")
                    continue

                # Set new chunk as current
                if current_chunk:
                    TestRun.LOGGER.info(f"Writes to chunk: {write_counter}")
                current_chunk = chunk
                write_counter = 1
                bucket_chunks.remove(chunk)
                last_sector = header.sector_number
                TestRun.LOGGER.debug(
                    f"First written sector in new chunk: {header.sector_number} "
                    f"({Size(header.sector_number, Unit.Blocks512)})")
                continue

            # Sector in current chunk - check sequential order
            if last_sector is None or header.sector_number >= last_sector:
                last_sector = header.sector_number
            else:
                TestRun.LOGGER.error(
                    f"Sectors in chunk <{current_chunk.offset}, "
                    f"{str(current_chunk.offset + chunk_size)}) written in bad "
                    f"order - sector {header.sector_number} ("
                    f"{Size(header.sector_number, Unit.Blocks512)}) after sector "
                    f"{last_sector} ({Size(last_sector, Unit.Blocks512)})")
                all_writes_ok = False
            write_counter += 1
        TestRun.LOGGER.info(f"Writes to chunk: {write_counter}")

        if all_writes_ok:
            TestRun.LOGGER.info("All sectors written in proper order.")
예제 #19
0
def test_neg_udev_cache_load():
    """
        title: CAS udev rule for cache negative test.
        description: |
          Verify if CAS udev rule is executed properly for cache with valid metadata and do not
          load cache with no metadata.
        pass_criteria:
          - No kernel error
          - Cache with metadata is properly loaded
          - Cache without metadata is not loaded
          - Cores assigned to not loaded cache are not inserted to core pool after
            plugging cache disk
          - Cores assigned to not loaded cache are inserted to core pool after plugging core disk
    """
    caches_count = 2
    cores_count = 4

    with TestRun.step(
            "Create init config file with two caches and two cores per each cache."
    ):
        cache_disk = TestRun.disks["cache"]
        cache_disk.create_partitions([Size(1, Unit.GibiByte)] * caches_count)
        core_disk = TestRun.disks["core"]
        core_disk.create_partitions([Size(2, Unit.GibiByte)] * cores_count)
        first_cache_core_numbers = random.sample(range(0, cores_count), 2)
        init_conf = InitConfig()
        for i in range(0, caches_count):
            init_conf.add_cache(i + 1, cache_disk.partitions[i])
        for j in range(0, cores_count):
            init_conf.add_core(1 if j in first_cache_core_numbers else 2,
                               j + 1, core_disk.partitions[j])
        init_conf.save_config_file()

    with TestRun.step(
            "Start one cache and add two cores as defined in init config."):
        cache = casadm.start_cache(cache_disk.partitions[0])
        for i in first_cache_core_numbers:
            cache.add_core(core_disk.partitions[i])

    with TestRun.step("Stop cache."):
        cache.stop()

    with TestRun.step("Unplug and plug cache disk."):
        cache_disk.unplug()
        cache_disk.plug()
        time.sleep(1)

    with TestRun.step("Check if CAS is loaded correctly."):
        cas_devices = casadm_parser.get_cas_devices_dict()
        if len(cas_devices["core_pool"]) != 0:
            TestRun.LOGGER.error(
                f"There is wrong number of core devices in core pool. Expected: 0,"
                f" actual: {len(cas_devices['core_pool'])}")
        if len(cas_devices["caches"]) != 1:
            TestRun.LOGGER.error(
                f"There is wrong number of caches. Expected: 1, actual: "
                f"{len(cas_devices['caches'])}")
        elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].path or \
                CacheStatus[(cas_devices["caches"][1]["status"]).lower()] != CacheStatus.running:
            TestRun.LOGGER.error(
                f"Cache did not load properly: {cas_devices['caches'][1]}")
        if len(cas_devices["cores"]) != 2:
            TestRun.LOGGER.error(
                f"There is wrong number of cores. Expected: 2, actual: "
                f"{len(cas_devices['caches'])}")

        correct_core_devices = []
        for i in first_cache_core_numbers:
            correct_core_devices.append(core_disk.partitions[i].path)
        for core in cas_devices["cores"].values():
            if core["device"] not in correct_core_devices or \
                    CoreStatus[core["status"].lower()] != CoreStatus.active or \
                    core["cache_id"] != 1:
                TestRun.LOGGER.error(f"Core did not load correctly: {core}.")

    with TestRun.step("Unplug and plug core disk."):
        core_disk.unplug()
        core_disk.plug()
        time.sleep(1)

    with TestRun.step(
            "Check if two cores assigned to not loaded cache are inserted to core pool."
    ):
        cas_devices = casadm_parser.get_cas_devices_dict()
        if len(cas_devices["core_pool"]) != 2:
            TestRun.LOGGER.error(
                f"There is wrong number of cores in core pool. Expected: 2, "
                f"actual: {len(cas_devices['core_pool'])}")
        core_pool_expected_devices = []
        for i in range(0, cores_count):
            if i not in first_cache_core_numbers:
                core_pool_expected_devices.append(core_disk.partitions[i].path)
        for c in cas_devices["core_pool"]:
            if c["device"] not in core_pool_expected_devices:
                TestRun.LOGGER.error(
                    f"Wrong core device added to core pool: {c}.")
예제 #20
0
def test_clean_stop_cache(cache_mode):
    """
        title: Test of the ability to stop cache in modes with lazy writes.
        description: |
          Test if OpenCAS stops cache in modes with lazy writes without data loss.
        pass_criteria:
          - Cache stopping works properly.
          - Writes to exported object and core device during OpenCAS's work are equal
          - Data on core device is correct after cache is stopped.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(512, Unit.MebiByte)])
        core_part = core_dev.partitions[0]
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step("Add core to cache."):
        core = cache.add_core(core_part)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Read IO stats before test"):
        core_disk_writes_initial = check_device_write_stats(core_part)
        exp_obj_writes_initial = check_device_write_stats(core)

    with TestRun.step("Write data to the exported object."):
        test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
        dd = Dd().output(core.system_path) \
            .input(test_file_main.full_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Read IO stats after write to the exported object."):
        core_disk_writes_increase = (
            check_device_write_stats(core_part) - core_disk_writes_initial
        )
        exp_obj_writes_increase = (
            check_device_write_stats(core) - exp_obj_writes_initial
        )

    with TestRun.step("Validate IO stats after write to the exported object."):
        if core_disk_writes_increase > 0:
            TestRun.LOGGER.error("Writes should occur only on the exported object.")
        if exp_obj_writes_increase != test_file_main.size.value:
            TestRun.LOGGER.error("Not all writes reached the exported object.")

    with TestRun.step("Read data from the exported object."):
        test_file_1 = File.create_file("/tmp/test_file_1")
        dd = Dd().output(test_file_1.full_path) \
            .input(core.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_1.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_1.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Read data from the core device."):
        test_file_2 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_2.full_path) \
            .input(core_part.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_2.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main == test_file_2.md5sum():
            TestRun.LOGGER.error("Md5 sums should be different.")

    with TestRun.step("Read IO stats before stopping cache."):
        core_disk_writes_before_stop = check_device_write_stats(core_part)

    with TestRun.step("Stop cache."):
        cache.stop()

    with TestRun.step("Read IO stats after stopping cache."):
        core_disk_writes_increase = (
            check_device_write_stats(core_part) - core_disk_writes_before_stop
        )

    with TestRun.step("Validate IO stats after stopping cache."):
        if core_disk_writes_increase == 0:
            TestRun.LOGGER.error("Writes should occur on the core device after stopping cache.")
        if core_disk_writes_increase != exp_obj_writes_increase:
            TestRun.LOGGER.error("Write statistics for the core device should be equal "
                                 "to those from the exported object.")

    with TestRun.step("Read data from the core device."):
        test_file_3 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_3.full_path) \
            .input(core_part.system_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_3.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_3.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_1.remove(True)
        test_file_2.remove(True)
        test_file_3.remove(True)
예제 #21
0
def test_write_fetch_partial_misses(cache_mode, cache_line_size):
    """
        title: No caching of partial write miss operations
        description: |
          Validate CAS ability to not cache entire cache line size for
          partial write miss operations
        pass_criteria:
          - Appropriate number of write partial misses, write hits and writes to cache
            in cache statistics
          - Appropriate number of writes to cache in iostat
    """
    pattern = f"0x{uuid.uuid4().hex}"
    io_size = Size(600, Unit.MebiByte)

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        core_disk.create_partitions([io_size + Size(1, Unit.MebiByte)])
        core_part = core_disk.partitions[0]

    with TestRun.step("Fill core partition with pattern."):
        cache_mode_traits = CacheMode.get_traits(cache_mode)
        if CacheModeTrait.InsertRead in cache_mode_traits:
            run_fio(target=core_part.path,
                    operation_type=ReadWrite.write,
                    blocksize=Size(4, Unit.KibiByte),
                    io_size=io_size,
                    verify=True,
                    pattern=pattern)
        else:
            TestRun.LOGGER.info(f"Skipped for {cache_mode} cache mode.")

    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_disk, cache_mode, cache_line_size)
        Udev.disable()
        core = cache.add_core(core_part)
    with TestRun.step("Cache half of file."):
        operation_type = ReadWrite.read if CacheModeTrait.InsertRead in cache_mode_traits \
            else ReadWrite.write
        run_fio(target=core.path,
                operation_type=operation_type,
                skip=cache_line_size.value,
                blocksize=cache_line_size.value,
                io_size=io_size,
                verify=True,
                pattern=pattern)
        if CacheModeTrait.InsertRead not in cache_mode_traits:
            cache.flush_cache()
        casadm.reset_counters(cache.cache_id, core.core_id)
    with TestRun.step("Run writes to CAS device using fio."):
        io_stats_before_io = cache_disk.get_io_stats()
        blocksize = cache_line_size.value / 2 * 3
        skip_size = cache_line_size.value / 2
        run_fio(target=core.path,
                operation_type=ReadWrite.write,
                skip=skip_size,
                blocksize=blocksize,
                io_size=io_size)
    with TestRun.step(
            "Verify CAS statistics for partial misses, write hits and writes to cache."
    ):
        check_statistics(cache=cache,
                         blocksize=blocksize,
                         skip_size=skip_size,
                         io_size=io_size,
                         partial_misses=True)
    with TestRun.step(
            "Verify number of writes to cache device using iostat. Shall be 0.75 of "
            f"io size ({str(io_size * 0.75)}) + metadata for cache mode with write "
            f"insert feature."):
        check_io_stats(cache_disk=cache_disk,
                       cache=cache,
                       io_stats_before=io_stats_before_io,
                       io_size=io_size,
                       blocksize=blocksize,
                       skip_size=skip_size)
def test_cache_stop_and_load(cache_mode):
    """
        title: Test for stopping and loading cache back with dynamic cache mode switching.
        description: |
          Validate the ability of the CAS to switch cache modes at runtime and
          check if all of them are working properly after switching and
          after stopping and reloading cache back.
          Check also other parameters consistency after reload.
        pass_criteria:
          - In all cache modes data reads and writes are handled properly before and after reload.
          - All cache parameters preserve their values after reload.
    """

    with TestRun.step("Partition cache and core devices"):
        cache_dev, core_dev = storage_prepare()

    with TestRun.step(f"Start cache in {cache_mode[0]} mode"):
        cache = casadm.start_cache(cache_dev, cache_mode[0], force=True)
        Udev.disable()

    with TestRun.step("Add core to the cache"):
        core = cache.add_core(core_dev)

    with TestRun.step(f"Change cache mode to {cache_mode[1]}"):
        cache.set_cache_mode(cache_mode[1], flush=True)
        check_cache_config = cache.get_cache_config()

    with TestRun.step(f"Check if {cache_mode[1]} cache mode works properly"):
        check_cache_mode_operation(cache, core, cache_mode[1])

    with TestRun.step("Stop and load cache back"):
        cache.stop()
        cache = casadm.load_cache(cache_dev)

    with TestRun.step("Check parameters consistency"):
        if check_cache_config != cache.get_cache_config():
            failed_params = ""
            if check_cache_config.cache_mode != cache.get_cache_mode():
                failed_params += (
                    f"Cache mode is: {check_cache_config.cache_mode}, "
                    f"should be: {cache.get_cache_mode()}\n")
            if check_cache_config.cleaning_policy != cache.get_cleaning_policy(
            ):
                failed_params += (
                    f"Cleaning policy is: {check_cache_config.cleaning_policy}, "
                    f"should be: {cache.get_cleaning_policy()}\n")
            if check_cache_config.cache_line_size != cache.get_cache_line_size(
            ):
                failed_params += (
                    f"Cache line size is: {check_cache_config.cache_line_size}, "
                    f"should be: {cache.get_cache_line_size()}\n")
            TestRun.fail(
                f"Parameters do not match after reload:\n{failed_params}")

    with TestRun.step(
            f"Check if {cache_mode[1]} cache mode works properly after reload"
    ):
        if cache_mode[1] == CacheMode.WA or cache_mode[1] == CacheMode.WO:
            check_separated_read_write_after_reload(cache, core, cache_mode[1],
                                                    io_size)
        else:
            check_cache_mode_operation(cache, core, cache_mode[1])

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
        Udev.enable()
def test_recovery_unplug_cache_fs(cache_mode, cls, filesystem, direct):
    """
            title: Test for recovery after cache drive removal - test with filesystem.
            description: |
              Verify that unflushed data can be safely recovered after, when SSD drive is removed
              after write completion - test with filesystem.
            pass_criteria:
              - CAS recovers successfully after cache drive unplug
              - No data corruption
    """
    with TestRun.step("Prepare devices"):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_disk.create_partitions([Size(16, Unit.GibiByte)])
        cache_device = cache_disk.partitions[0]
        core_device = core_disk.partitions[0]

    with TestRun.step("Create test files."):
        source_file, target_file = create_test_files(test_file_size)
        source_file_md5 = source_file.md5sum()

    with TestRun.step("Create filesystem on core device."):
        core_device.create_filesystem(filesystem)

    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_device, cache_mode, cls)
        core = cache.add_core(core_device)

    with TestRun.step("Mount CAS device."):
        core.mount(mount_point)

    with TestRun.step("Copy file to CAS."):
        copy_file(source=source_file.full_path, target=test_file_path,
                  size=test_file_size, direct="oflag" if direct else None)
        TestRun.LOGGER.info(str(core.get_statistics()))

    with TestRun.step("Unmount CAS device."):
        core.unmount()

    with TestRun.step("Unplug cache device."):
        cache_disk.unplug()
        TestRun.LOGGER.info(f"List caches:\n{casadm.list_caches().stdout}")
        TestRun.LOGGER.info(f"Dirty blocks on cache: "
                            f"{cache.get_dirty_blocks().get_value(Unit.Blocks4096)}")

    with TestRun.step("Stop cache."):
        try:
            cache.stop()
            TestRun.fail("Stopping the cache should be aborted without --no-flush flag.")
        except CmdException as e:
            TestRun.LOGGER.info(str(e.output))
            try:
                cache.stop(no_data_flush=True)
                TestRun.LOGGER.warning("Expected stopping cache with errors with --no-flush flag.")
            except CmdException as e1:
                cli_messages.check_stderr_msg(e1.output, cli_messages.stop_cache_errors)

    with TestRun.step("Plug missing cache device."):
        TestRun.LOGGER.info(str(casadm.list_caches(by_id_path=False)))
        cache_disk.plug()

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_device)
        TestRun.LOGGER.info(f"Dirty blocks on cache: "
                            f"{cache.get_dirty_blocks().get_value(Unit.Blocks4096)}")

    with TestRun.step("Stop cache with data flush."):
        cache.stop()

    with TestRun.step("Mount core device."):
        core_device.mount(mount_point)

    with TestRun.step("Copy file from core device and check md5sum."):
        copy_file(source=test_file_path, target=target_file.full_path,
                  size=test_file_size, direct="iflag" if direct else None)
        target_file_md5 = target_file.md5sum()
        compare_files(source_file_md5, target_file_md5)

    with TestRun.step("Unmount core device and remove files."):
        core_device.unmount()
        try:
            target_file.remove()
            source_file.remove()
        except Exception:
            # On some OSes files at /tmp location are automatically removed after DUT hard reset
            pass
예제 #24
0
def test_concurrent_caches_flush(cache_mode):
    """
        title: Success to flush two caches simultaneously.
        description: |
          CAS should successfully flush multiple caches if there is already other flush in progress.
        pass_criteria:
          - No system crash.
          - Flush for each cache should finish successfully.
    """
    with TestRun.step("Prepare caches and cores."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([cache_size] * caches_number)
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([cache_size * 2] * caches_number)

    with TestRun.step(f"Start {caches_number} caches."):
        caches = []
        for part in cache_dev.partitions:
            caches.append(casadm.start_cache(part, cache_mode, force=True))

    with TestRun.step("Disable cleaning and sequential cutoff."):
        for cache in caches:
            cache.set_cleaning_policy(CleaningPolicy.nop)
            cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step(f"Add core devices to caches."):
        cores = []
        for i, cache in enumerate(caches):
            cores.append(cache.add_core(core_dev.partitions[i]))

    with TestRun.step("Run workload on each OpenCAS device."):
        # Each cache has one core fully saturated with dirty blocks.
        block_size = Size(4, Unit.MebiByte)
        count = int(cache_size.value / block_size.value)
        total_saturation = block_size * count
        for core in cores:
            Dd().output(core.path) \
                .input("/dev/urandom") \
                .block_size(block_size) \
                .count(count) \
                .run()

    with TestRun.step("Check if each cache is full of dirty blocks."):
        for cache in caches:
            if not int(cache.get_dirty_blocks()) != total_saturation.get_value(Unit.Blocks4096):
                TestRun.fail(f"The cache {cache.cache_id} does not contain dirty blocks.")

    with TestRun.step("Start flushing all caches simultaneously."):
        flush_pids = []
        for cache in caches:
            flush_pids.append(
                TestRun.executor.run_in_background(cli.flush_cache_cmd(str(cache.cache_id)))
            )

    with TestRun.step("Wait for all caches to finish flushing."):
        is_flushing = [True] * len(flush_pids)
        while any(is_flushing):
            for i, pid in enumerate(flush_pids):
                is_flushing[i] = (TestRun.executor.run(f"ls /proc/{pid}").exit_code == 0)

    with TestRun.step("Check number of dirty data on each cache."):
        for cache in caches:
            if int(cache.get_dirty_blocks()) > 0:
                TestRun.LOGGER.error(f"The quantity of dirty cache lines on the cache "
                                     f"{str(cache.cache_id)} after complete flush should be zero.")

    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
예제 #25
0
def test_clean_remove_core_with_fs(cache_mode, fs):
    """
        title: Test of the ability to remove core from cache in lazy-write modes with filesystem.
        description: |
          Test if OpenCAS removes core from cache in modes with lazy writes and with different
          filesystems without data loss.
        pass_criteria:
          - Core removing works properly.
          - Data on core device is correct after core is removed.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(256, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(512, Unit.MebiByte)])
        core_part = core_dev.partitions[0]
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step(f"Add core with {fs.name} filesystem to cache and mount it."):
        core_part.create_filesystem(fs)
        core = cache.add_core(core_part)
        core.mount(mnt_point)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Create test file and read its md5 sum."):
        test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
        test_file_md5sum_main = test_file_main.md5sum()

    with TestRun.step("Copy test file to the exported object."):
        test_file_1 = File.create_file(mnt_point + "test_file_1")
        dd = Dd().output(test_file_1.full_path) \
            .input(test_file_main.full_path) \
            .block_size(bs) \
            .count(int(test_file_main.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_1.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_1.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Unmount and remove core."):
        core.unmount()
        core.remove_core()

    with TestRun.step("Mount core device."):
        core_part.mount(mnt_point)

    with TestRun.step("Read data from the core device."):
        test_file_2 = File.create_file("/tmp/test_file_2")
        dd = Dd().output(test_file_2.full_path) \
            .input(test_file_1.full_path) \
            .block_size(bs) \
            .count(int(test_file_1.size / bs)) \
            .oflag("direct")
        dd.run()
        test_file_2.refresh_item()
        sync()

    with TestRun.step("Compare md5 sum of test files."):
        if test_file_md5sum_main != test_file_2.md5sum():
            TestRun.LOGGER.error("Md5 sums should be equal.")

    with TestRun.step("Delete test files."):
        test_file_main.remove(True)
        test_file_1.remove(True)
        test_file_2.remove(True)

    with TestRun.step("Unmount core device."):
        core_part.unmount()
        remove(mnt_point, True, True, True)
예제 #26
0
def test_concurrent_cores_flush(cache_mode):
    """
        title: Fail to flush two cores simultaneously.
        description: |
          CAS should return an error on attempt to flush second core if there is already
          one flush in progress.
        pass_criteria:
          - No system crash.
          - First core flushing should finish successfully.
          - It should not be possible to run flushing command on cores within
            the same cache simultaneously.
    """
    with TestRun.step("Prepare cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([cache_size])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([cache_size * 2] * 2)
        core_part1 = core_dev.partitions[0]
        core_part2 = core_dev.partitions[1]

    with TestRun.step("Start cache."):
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step(f"Add both core devices to cache."):
        core1 = cache.add_core(core_part1)
        core2 = cache.add_core(core_part2)

    with TestRun.step("Run workload on concurrent cores."):
        block_size = Size(4, Unit.MebiByte)
        count = int(cache_size.value / 2 / block_size.value)

        dd_pid = Dd().output(core1.path) \
            .input("/dev/urandom") \
            .block_size(block_size) \
            .count(count) \
            .run_in_background()

        Dd().output(core2.path) \
            .input("/dev/urandom") \
            .block_size(block_size) \
            .count(count) \
            .run()

    with TestRun.step("Check if both DD operations finished."):
        while TestRun.executor.run(f"ls /proc/{dd_pid}").exit_code == 0:
            sleep(1)

    with TestRun.step("Check if both cores contain dirty blocks."):
        if int(core1.get_dirty_blocks()) == 0:
            TestRun.fail("The first core does not contain dirty blocks.")
        if int(core2.get_dirty_blocks()) == 0:
            TestRun.fail("The second core does not contain dirty blocks.")
        core2_dirty_blocks_before = int(core2.get_dirty_blocks())

    with TestRun.step("Start flushing the first core."):
        TestRun.executor.run_in_background(
            cli.flush_core_cmd(str(cache.cache_id), str(core1.core_id))
        )

    with TestRun.step("Wait some time and start flushing the second core."):
        sleep(2)
        percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
        while percentage < 40:
            percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)

        try:
            core2.flush_core()
            TestRun.fail("The first core is flushing right now so flush attempt of the second core "
                         "should fail.")
        except CmdException:
            TestRun.LOGGER.info("The first core is flushing right now so the second core's flush "
                                "fails as expected.")

    with TestRun.step("Wait for the first core to finish flushing."):
        try:
            percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
            while percentage < 100:
                percentage = casadm_parser.get_flushing_progress(cache.cache_id, core1.core_id)
        except CmdException:
            TestRun.LOGGER.info("The first core is not flushing dirty data anymore.")

    with TestRun.step("Check number of dirty data on both cores."):
        if int(core1.get_dirty_blocks()) > 0:
            TestRun.LOGGER.error("The quantity of dirty cache lines on the first core "
                                 "after completed flush should be zero.")

        core2_dirty_blocks_after = int(core2.get_dirty_blocks())
        if core2_dirty_blocks_before != core2_dirty_blocks_after:
            TestRun.LOGGER.error("The quantity of dirty cache lines on the second core "
                                 "after failed flush should not change.")

    with TestRun.step("Stop cache."):
        cache.stop()
def test_core_pool_exclusive_open():
    """
    title: Exclusive open of core pool.
    description: |
      Check that CAS exclusively opens core devices from core device pool so that the core device
      cannot be used in any other way.
    pass_criteria:
      - No system crash while reloading CAS modules.
      - Core device was added successfully to core pool.
      - Core device is exclusively open in the core pool and cannot be used otherwise.
    """
    with TestRun.step("Prepare core device and create filesystem on it."):
        core_disk = TestRun.disks["core"]
        core_disk.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]
        core_dev.create_filesystem(Filesystem.ext4)
    with TestRun.step(
            "Add core device to core device pool using --try-add flag."):
        core = casadm.try_add(core_dev, 1)
    with TestRun.step(
            "Check if core status of added core in core pool is detached."):
        status = core.get_status()
        if status is not CoreStatus.detached:
            TestRun.fail(f"Core status should be detached but is {status}.")
    with TestRun.step(
            "Check if it is impossible to add core device from core pool to "
            "running cache."):
        TestRun.disks["cache"].create_partitions([Size(2, Unit.GibiByte)])
        cache_dev = TestRun.disks["cache"].partitions[0]
        cache = casadm.start_cache(cache_dev, force=True)
        try:
            cache.add_core(core_dev)
            TestRun.fail(
                "Core from core pool added to cache, this is unexpected behaviour."
            )
        except CmdException:
            TestRun.LOGGER.info(
                "Adding core from core pool to cache is blocked as expected.")
        cache.stop()
    with TestRun.step(
            "Check if it is impossible to start cache with casadm start command on the "
            "core device from core pool."):
        try:
            cache = casadm.start_cache(core_dev)
            cache.stop()
            TestRun.fail(
                "Cache started successfully on core device from core pool, "
                "this is unexpected behaviour.")
        except CmdException:
            TestRun.LOGGER.info(
                "Using core device from core pool as cache is blocked as expected."
            )
    with TestRun.step(
            "Check if it is impossible to make filesystem on the core device "
            "from core pool."):
        try:
            core_dev.create_filesystem(Filesystem.ext4, force=False)
            TestRun.fail(
                "Successfully created filesystem on core from core pool, "
                "this is unexpected behaviour.")
        except Exception:
            TestRun.LOGGER.info(
                "Creating filesystem on core device from core pool is "
                "blocked as expected.")
    with TestRun.step(
            "Check if it is impossible to mount the core device from core pool."
    ):
        try:
            core_dev.mount("/mnt")
            TestRun.fail(
                "Successfully mounted core pool device, this is unexpected behaviour."
            )
        except Exception:
            TestRun.LOGGER.info(
                "Mounting core device form core pool is blocked as expected.")
    with TestRun.step("Remove core from core pool."):
        casadm.remove_all_detached_cores()
예제 #28
0
def test_core_inactive_stats():
    """
        1. Start cache with 3 cores.
        2. Switch cache into WB mode.
        3. Issue IO to each core.
        4. Stop cache without flush.
        5. Remove two core devices.
        6. Load cache.
        7. Check if cache stats are equal to sum of valid and inactive cores stats.
        8. Check if percentage values are calculated properly.
    """
    cache, core_device = prepare()

    cache_device = cache.cache_device

    TestRun.LOGGER.info("Switching cache mode to WB")
    cache.set_cache_mode(cache_mode=CacheMode.WB)
    cores = cache.get_core_devices()
    TestRun.LOGGER.info("Issue IO to each core")
    for core in cores:
        dd = (
            Dd()
            .input("/dev/zero")
            .output(core.system_path)
            .count(1000)
            .block_size(Size(4, Unit.KibiByte))
        ).run()

    TestRun.LOGGER.info("Stopping cache with dirty data")
    cores[2].flush_core()
    cache.stop(no_data_flush=True)

    TestRun.LOGGER.info("Removing two of core devices")
    core_device.remove_partitions()
    core_device.create_partitions([Size(1, Unit.GibiByte)])

    TestRun.LOGGER.info("Loading cache with missing core device")
    cache = casadm.start_cache(cache_device, load=True)

    # Accumulate valid cores stats
    cores_occupancy = 0
    cores_clean = 0
    cores_dirty = 0
    cores = cache.get_core_devices()
    for core in cores:
        core_stats = core.get_core_statistics()
        cores_occupancy += core_stats["occupancy"].value
        cores_clean += core_stats["clean"].value
        cores_dirty += core_stats["dirty"].value

    cache_stats = cache.get_cache_statistics()
    # Add inactive core stats
    cores_occupancy += cache_stats["inactive occupancy"].value
    cores_clean += cache_stats["inactive clean"].value
    cores_dirty += cache_stats["inactive dirty"].value

    assert cache_stats["occupancy"].value == cores_occupancy
    assert cache_stats["dirty"].value == cores_dirty
    assert cache_stats["clean"].value == cores_clean

    cache_stats_percentage = cache.get_cache_statistics(percentage_val=True)
    # Calculate expected percentage value of inactive core stats
    inactive_occupancy_perc = (
        cache_stats["inactive occupancy"].value / cache_stats["cache size"].value
    )
    inactive_clean_perc = (
        cache_stats["inactive clean"].value / cache_stats["occupancy"].value
    )
    inactive_dirty_perc = (
        cache_stats["inactive dirty"].value / cache_stats["occupancy"].value
    )

    inactive_occupancy_perc = round(100 * inactive_occupancy_perc, 1)
    inactive_clean_perc = round(100 * inactive_clean_perc, 1)
    inactive_dirty_perc = round(100 * inactive_dirty_perc, 1)

    TestRun.LOGGER.info(str(cache_stats_percentage))
    assert inactive_occupancy_perc == cache_stats_percentage["inactive occupancy"]
    assert inactive_clean_perc == cache_stats_percentage["inactive clean"]
    assert inactive_dirty_perc == cache_stats_percentage["inactive dirty"]
def test_load_cache_with_mounted_core(cache_mode):
    """
        title: Fault injection test for adding mounted core on cache load.
        description: |
          Negative test of the ability of CAS to add to cache while its loading
          core device which is mounted.
        pass_criteria:
          - No system crash while loading cache.
          - Adding mounted core while loading cache fails.
    """
    with TestRun.step("Prepare cache and core devices. Start CAS."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(1, Unit.GibiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(4, Unit.GibiByte)])
        core_part = core_dev.partitions[0]
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step(
            "Add core device with xfs filesystem to cache and mount it."):
        core_part.create_filesystem(Filesystem.xfs)
        core = cache.add_core(core_part)
        core.mount(mount_point)

    with TestRun.step(
            f"Create test file in mount point of exported object and check its md5 sum."
    ):
        test_file = fs_utils.create_random_test_file(test_file_path)
        test_file_md5_before = test_file.md5sum()

    with TestRun.step("Unmount core device."):
        core.unmount()

    with TestRun.step("Stop cache."):
        cache.stop()
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 0:
            TestRun.fail(
                f"Expected caches count: 0; Actual caches count: {caches_count}."
            )

    with TestRun.step("Mount core device."):
        core_part.mount(mount_point)

    with TestRun.step("Try to load cache."):
        cache = casadm.load_cache(cache.cache_device)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(
                f"Expected caches count: 1 Actual caches count: {caches_count}."
            )
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 0:
            TestRun.fail(
                f"Expected cores count: 0; Actual cores count: {cores_count}.")

    with TestRun.step("Check md5 sum of test file again."):
        if test_file_md5_before != test_file.md5sum():
            TestRun.LOGGER.error("Md5 sum of test file is different.")
        core_part.unmount()

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
예제 #30
0
def test_acp_param_flush_max_buffers(cache_line_size, cache_mode):
    """
        title: Functional test for ACP flush-max-buffers parameter.
        description: |
          Verify if there is appropriate number of I/O requests between wake-up time intervals,
          which depends on flush-max-buffer parameter.
        pass_criteria:
          - ACP triggered dirty data flush
          - Number of writes to core is lower or equal than flush_max_buffers
    """
    with TestRun.step("Test prepare."):
        buffer_values = get_random_list(
            min_val=FlushParametersAcp.acp_params_range().flush_max_buffers[0],
            max_val=FlushParametersAcp.acp_params_range().flush_max_buffers[1],
            n=10,
        )

        default_config = FlushParametersAcp.default_acp_params()
        acp_configs = [
            FlushParametersAcp(flush_max_buffers=buf,
                               wake_up_time=Time(seconds=1))
            for buf in buffer_values
        ]
        acp_configs.append(default_config)

    with TestRun.step("Prepare partitions."):
        core_size = Size(5, Unit.GibiByte)
        cache_device = TestRun.disks["cache"]
        core_device = TestRun.disks["core"]
        cache_device.create_partitions([Size(10, Unit.GibiByte)])
        core_device.create_partitions([core_size])

    with TestRun.step(
            f"Start cache in {cache_mode} with {cache_line_size} and add core."
    ):
        cache = casadm.start_cache(cache_device.partitions[0], cache_mode,
                                   cache_line_size)
        core = cache.add_core(core_device.partitions[0])

    with TestRun.step("Set cleaning policy to NOP."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Start IO in background."):
        fio = get_fio_cmd(core, core_size)
        fio.run_in_background()
        time.sleep(10)

    with TestRun.step("Set cleaning policy to ACP."):
        cache.set_cleaning_policy(CleaningPolicy.acp)

    with TestRun.group(
            "Verify IO number for different max_flush_buffers values."):
        for acp_config in acp_configs:
            with TestRun.step(f"Setting {acp_config}"):
                cache.set_params_acp(acp_config)

            with TestRun.step(
                    "Using blktrace verify if there is appropriate number of I/O requests, "
                    "which depends on flush-max-buffer parameter."):
                blktrace = BlkTrace(core.core_device, BlkTraceMask.write)
                blktrace.start_monitoring()
                time.sleep(20)
                blktrace_output = blktrace.stop_monitoring()

                cleaning_started = False
                flush_writes = 0
                for (prev, curr) in zip(blktrace_output, blktrace_output[1:]):
                    if cleaning_started and write_to_core(prev):
                        flush_writes += 1
                    if new_acp_iteration(prev, curr):
                        if cleaning_started:
                            if flush_writes <= acp_config.flush_max_buffers:
                                flush_writes = 0
                            else:
                                TestRun.LOGGER.error(
                                    f"Incorrect number of handled io requests. "
                                    f"Expected {acp_config.flush_max_buffers} - "
                                    f"actual {flush_writes}")
                                flush_writes = 0

                        cleaning_started = True

                if not cleaning_started:
                    TestRun.fail(f"ACP flush not triggered for {acp_config}")

    with TestRun.step("Stop all caches"):
        kill_all_io()
        casadm.stop_all_caches()