def prepare():
    base_prepare()
    ioclass_config.remove_ioclass_config()
    cache_device = next(
        disk
        for disk in TestProperties.dut.disks
        if disk.disk_type in [DiskType.optane, DiskType.nand]
    )
    core_device = next(
        disk
        for disk in TestProperties.dut.disks
        if (disk.disk_type.value > cache_device.disk_type.value and disk != cache_device)
    )

    cache_device.create_partitions([Size(500, Unit.MebiByte)])
    core_device.create_partitions([Size(2, Unit.GibiByte)])

    cache_device = cache_device.partitions[0]
    core_device_1 = core_device.partitions[0]

    Udev.disable()

    TestProperties.LOGGER.info(f"Staring cache")
    cache = casadm.start_cache(cache_device, force=True)
    TestProperties.LOGGER.info(f"Setting cleaning policy to NOP")
    cache.set_cleaning_policy(CleaningPolicy.nop)
    TestProperties.LOGGER.info(f"Adding core devices")
    core = cache.add_core(core_dev=core_device_1)

    output = TestProperties.executor.execute(f"mkdir -p {mountpoint}")
    if output.exit_code != 0:
        raise Exception(f"Failed to create mountpoint")

    return cache, core
Пример #2
0
def prepare(cache_mode: CacheMode):
    ioclass_config.remove_ioclass_config()
    cache_device = TestRun.disks['cache']
    core_device = TestRun.disks['core']

    cache_device.create_partitions([Size(500, Unit.MebiByte)])
    core_device.create_partitions([
        Size(1, Unit.GibiByte),
        Size(1, Unit.GibiByte),
        Size(1, Unit.GibiByte)
    ])

    cache_device = cache_device.partitions[0]
    core_device_1 = core_device.partitions[0]
    core_device_2 = core_device.partitions[1]
    core_device_3 = core_device.partitions[2]

    Udev.disable()

    TestRun.LOGGER.info(f"Starting cache")
    cache = casadm.start_cache(cache_device, cache_mode=cache_mode, force=True)
    TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
    casadm.set_param_cleaning(cache_id=cache_id, policy=CleaningPolicy.nop)
    TestRun.LOGGER.info(f"Adding core devices")
    core_1 = cache.add_core(core_dev=core_device_1)
    core_2 = cache.add_core(core_dev=core_device_2)
    core_3 = cache.add_core(core_dev=core_device_3)

    output = TestRun.executor.run(f"mkdir -p {mountpoint}")
    if output.exit_code != 0:
        raise Exception(f"Failed to create mountpoint")

    return cache, [core_1, core_2, core_3]
def test_ioclass_process_name(prepare_and_cleanup):
    """Check if data generated by process with particular name is cached"""
    cache, core = prepare()

    ioclass_id = 1
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    iterations = 100

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"process_name:dd&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    cache.flush_cache()

    Udev.disable()

    TestProperties.LOGGER.info(
        f"Check if all data generated by dd process is cached.")
    for i in range(iterations):
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(dd_count).block_size(dd_size).seek(i))
        dd.run()
        sync()
        time.sleep(0.1)
        stats = cache.get_cache_statistics(io_class_id=ioclass_id)
        assert stats["dirty"].get_value(Unit.Blocks4096) == (i + 1) * dd_count
Пример #4
0
def test_cleaning_policies_in_write_back(cleaning_policy):
    """
        title: Test for cleaning policy operation in Write-Back cache mode.
        description: |
          Check if ALRU, NOP and ACP cleaning policies preserve their
          parameters when changed and if they flush dirty data properly
          in Write-Back cache mode.
        pass_criteria:
          - Flush parameters preserve their values when changed.
          - Dirty data is flushed or not according to the policy used.
    """

    with TestRun.step("Partition cache and core devices"):
        cache_dev, core_dev = storage_prepare()
        Udev.disable()

    with TestRun.step(
            f"Start cache in Write-Back mode with {cleaning_policy} cleaning policy"
    ):
        cache = casadm.start_cache(cache_dev.partitions[0],
                                   CacheMode.WB,
                                   force=True)
        set_cleaning_policy_and_params(cache, cleaning_policy)

    with TestRun.step("Check for running CAS cleaner"):
        if TestRun.executor.run(
                f"pgrep {cas_cleaner_process_name}").exit_code != 0:
            TestRun.fail("CAS cleaner process is not running!")

    with TestRun.step(f"Add {cores_count} cores to the cache"):
        core = []
        for i in range(cores_count):
            core.append(cache.add_core(core_dev.partitions[i]))

    with TestRun.step("Run 'fio'"):
        fio = fio_prepare()
        for i in range(cores_count):
            fio.add_job().target(core[i].path)
        fio.run()
        time.sleep(3)
        core_writes_before_wait_for_cleaning = (
            cache.get_statistics().block_stats.core.writes)

    with TestRun.step(f"Wait {time_to_wait} seconds"):
        time.sleep(time_to_wait)

    with TestRun.step("Check write statistics for core device"):
        core_writes_after_wait_for_cleaning = (
            cache.get_statistics().block_stats.core.writes)
        check_cleaning_policy_operation(
            cleaning_policy,
            core_writes_before_wait_for_cleaning,
            core_writes_after_wait_for_cleaning,
        )

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
        Udev.enable()
Пример #5
0
def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
    """
        title: Test verifying if trim requests do not cause eviction on CAS device.
        description: |
          When trim requests enabled and files are being added and removed from CAS device,
          there is no eviction (no reads from cache).
        pass_criteria:
          - Reads from cache device are the same before and after removing test file.
    """
    mount_point = "/mnt"
    test_file_path = os.path.join(mount_point, "test_file")

    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]

    with TestRun.step("Start cache on device supporting trim and add core."):
        cache = casadm.start_cache(cache_dev, cache_mode, cache_line_size)
        cache.set_cleaning_policy(cleaning)
        Udev.disable()
        core = cache.add_core(core_dev)

    with TestRun.step("Create filesystem on CAS device and mount it."):
        core.create_filesystem(filesystem)
        core.mount(mount_point, ["discard"])

    with TestRun.step("Create random file using ddrescue."):
        test_file = fs_utils.create_random_test_file(test_file_path,
                                                     core_dev.size * 0.9)
        create_file_with_ddrescue(core_dev, test_file)

    with TestRun.step("Remove file and create a new one."):
        cache_iostats_before = cache_dev.get_io_stats()
        test_file.remove()
        os_utils.sync()
        os_utils.drop_caches()
        create_file_with_ddrescue(core_dev, test_file)

    with TestRun.step(
            "Check using iostat that reads from cache did not occur."):
        cache_iostats_after = cache_dev.get_io_stats()
        reads_before = cache_iostats_before.sectors_read
        reads_after = cache_iostats_after.sectors_read

        if reads_after != reads_before:
            TestRun.fail(
                f"Number of reads from cache before and after removing test file "
                f"differs. Reads before: {reads_before}, reads after: {reads_after}."
            )
        else:
            TestRun.LOGGER.info(
                "Number of reads from cache before and after removing test file is the same."
            )
Пример #6
0
def test_one_core_release(cache_mode):
    """
        title: Test if OpenCAS dynamically allocates space according to core devices needs.
        description: |
          When one or more core devices are unused in a single cache instance all blocks
          previously occupied should be available to other core devices.
          Test is without pass through mode.
        pass_criteria:
          - No system crash.
          - The remaining core is able to use cache.
          - OpenCAS frees blocks occupied by unused core and allocates it to the remaining core.
    """
    with TestRun.step("Prepare two cache and one core devices."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(512, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
        core_part1 = core_dev.partitions[0]
        core_part2 = core_dev.partitions[1]
        Udev.disable()

    with TestRun.step("Start cache"):
        cache = casadm.start_cache(cache_part, cache_mode, force=True)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")

    with TestRun.step("Add both core devices to cache."):
        core1 = cache.add_core(core_part1)
        core2 = cache.add_core(core_part2)
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 2:
            TestRun.fail(f"Expected cores count: 2; Actual cores count: {cores_count}.")

    with TestRun.step("Change sequential cutoff policy to 'never'."):
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Fill cache with pages from the first core."):
        dd_builder(cache_mode, core1, cache.size).run()
        core1_occupied_blocks_before = core1.get_occupancy()

    with TestRun.step("Check if the remaining core is able to use cache."):
        dd_builder(cache_mode, core2, Size(100, Unit.MebiByte)).run()
        core1_occupied_blocks_after = core1.get_occupancy()

    with TestRun.step("Check if occupancy from the first core is removed from cache."):
        # The first core's occupancy should be lower than cache's occupancy
        # by the value of the remaining core's occupancy because cache
        # should reallocate blocks from unused core to used core.
        if core1_occupied_blocks_after >= core1_occupied_blocks_before \
                or cache.get_occupancy() <= core1_occupied_blocks_after \
                or not float(core2.get_occupancy().get_value()) > 0:
            TestRun.LOGGER.error("Blocks previously occupied by the first core aren't released.")

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
Пример #7
0
def prepare():
    cache_dev = TestRun.disks['cache']
    cache_dev.create_partitions([cache_size])
    cache_part = cache_dev.partitions[0]
    core_dev = TestRun.disks['core']
    core_dev.create_partitions([cache_size * 2])
    core_part = core_dev.partitions[0]
    Udev.disable()
    return cache_part, core_part
def test_ioclass_pid():
    cache, core = prepare()

    ioclass_id = 1
    iterations = 20
    dd_count = 100
    dd_size = Size(4, Unit.KibiByte)

    Udev.disable()

    # Since 'dd' has to be executed right after writing pid to 'ns_last_pid',
    # 'dd' command is created and is appended to 'echo' command instead of running it
    dd_command = str(
        Dd()
        .input("/dev/zero")
        .output(core.system_path)
        .count(dd_count)
        .block_size(dd_size)
    )

    for i in range(iterations):
        cache.flush_cache()

        output = TestRun.executor.run("cat /proc/sys/kernel/ns_last_pid")
        if output.exit_code != 0:
            raise Exception(
                f"Failed to retrieve pid. stdout: {output.stdout} \n stderr :{output.stderr}"
            )

        # Few pids might be used by system during test preparation
        pid = int(output.stdout) + 50

        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"pid:eq:{pid}&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

        TestRun.LOGGER.info(f"Running dd with pid {pid}")
        # pid saved in 'ns_last_pid' has to be smaller by one than target dd pid
        dd_and_pid_command = (
            f"echo {pid-1} > /proc/sys/kernel/ns_last_pid && {dd_command}"
        )
        output = TestRun.executor.run(dd_and_pid_command)
        if output.exit_code != 0:
            raise Exception(
                f"Failed to run dd with target pid. "
                f"stdout: {output.stdout} \n stderr :{output.stderr}"
            )
        sync()
        dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
        if dirty.get_value(Unit.Blocks4096) != dd_count:
            TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
        ioclass_config.remove_ioclass(ioclass_id)
def test_ioclass_conditions_and(filesystem):
    """
        title: IO class condition 'and'.
        description: |
          Load config with IO class combining 5 conditions contradicting
          at least one other condition.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one of the conditions is not classified.
    """

    file_size = Size(random.randint(25, 50), Unit.MebiByte)
    file_size_bytes = int(file_size.get_value(Unit.Byte))

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
            f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
            f"file_size:eq:{file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_occupancy = cache.get_io_class_statistics(
        io_class_id=1).usage_stats.occupancy
    # Perform IO
    for size in [
            file_size, file_size + Size(1, Unit.MebiByte),
            file_size - Size(1, Unit.MebiByte)
    ]:
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(size).read_write(
                ReadWrite.write).target(f"{mountpoint}/test_file").run())
        sync()
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=1).usage_stats.occupancy

        if new_occupancy != base_occupancy:
            TestRun.fail(
                "Unexpected occupancy increase!\n"
                f"Expected: {base_occupancy}, actual: {new_occupancy}")
def test_ioclass_conditions_or(filesystem):
    """
        title: IO class condition 'or'.
        description: |
          Load config with IO class combining 5 contradicting conditions connected by OR operator.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one condition is classified properly.
    """

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
            f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        for i in range(1, 6):
            fs_utils.create_directory(f"{mountpoint}/dir{i}")
        sync()

    with TestRun.step(
            "Perform IO fulfilling each condition and check if occupancy raises."
    ):
        for i in range(1, 6):
            file_size = Size(random.randint(25, 50), Unit.MebiByte)
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy
            (Fio().create_command().io_engine(
                IoEngine.libaio).size(file_size).read_write(
                    ReadWrite.write).target(
                        f"{mountpoint}/dir{i}/test_file").run())
            sync()
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy

            if new_occupancy != base_occupancy + file_size:
                TestRun.fail(
                    "Occupancy has not increased correctly!\n"
                    f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
                )
Пример #11
0
def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mode):
    """
    title: Stress test for multistream sequential cutoff on the device with a filesystem
    description: |
        Testing the stability of a system when there are multiple sequential and random I/O streams
        running against the exported object with a filesystem when the sequential cutoff policy is
        set to always and the sequential cutoff threshold is set to a value which is able
        to be reached by sequential I/O streams.
    pass_criteria:
        - No system crash
    """
    mount_point = "/mnt"
    with TestRun.step("Prepare devices. Create filesystem on core device."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        core_disk.create_filesystem(filesystem)

    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_disk, cache_mode, force=True)
        Udev.disable()
        core = cache.add_core(core_disk)

    with TestRun.step("Mount core."):
        core.mount(mount_point)

    with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB."):
        core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
        core.set_seq_cutoff_threshold(Size(20, Unit.MebiByte))

    with TestRun.step("Reset core statistics counters."):
        core.reset_counters()

    with TestRun.step("Run I/O"):
        sequential_streams = streams_seq_rand[0]
        random_streams = streams_seq_rand[1]
        stream_size = core_disk.size / 256
        fio = (Fio().create_command()
               .io_engine(IoEngine.libaio)
               .block_size(Size(1, Unit.Blocks4096))
               .direct()
               .offset_increment(stream_size))

        for i in range(0, sequential_streams + random_streams):
            fio_job = fio.add_job(job_name=f"stream_{i}")
            fio_job.size(stream_size)
            fio_job.target(os.path.join(mount_point, f"file_{i}"))
            if i < sequential_streams:
                fio_job.read_write(ReadWrite.write)
            else:
                fio_job.read_write(ReadWrite.randwrite)

        pid = fio.run_in_background()
        while TestRun.executor.check_if_process_exists(pid):
            sleep(5)
            TestRun.LOGGER.info(f"{core.get_statistics()}")
Пример #12
0
def prepare(filesystem, cores_number):
    ioclass_config.remove_ioclass_config()
    cache_device = TestRun.disks["cache"]
    core_device = TestRun.disks["core"]

    cache_device.create_partitions([Size(10, Unit.GibiByte)])
    core_device.create_partitions([Size(5, Unit.GibiByte)] * cores_number)

    cache_device = cache_device.partitions[0]

    cache = casadm.start_cache(cache_device,
                               cache_mode=CacheMode.WT,
                               force=True)

    Udev.disable()
    casadm.set_param_cleaning(cache_id=cache.cache_id,
                              policy=CleaningPolicy.nop)

    cores = []
    for part in core_device.partitions:
        if filesystem:
            part.create_filesystem(filesystem)
        cores.append(casadm.add_core(cache, core_dev=part))

    cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    ioclass_config.create_ioclass_config(
        add_default_rule=False,
        ioclass_config_path=ioclass_config.default_config_file_path)
    # To make test more precise all workload except of tested ioclass should be
    # put in pass-through mode
    ioclass_config.add_ioclass(
        ioclass_id=0,
        eviction_priority=22,
        allocation="1.00",
        rule="unclassified",
        ioclass_config_path=ioclass_config.default_config_file_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=1,
        eviction_priority=22,
        allocation="0.00",
        rule="metadata",
        ioclass_config_path=ioclass_config.default_config_file_path,
    )
    ioclass_config.add_ioclass(
        ioclass_id=2,
        eviction_priority=22,
        allocation="0.00",
        rule="direct",
        ioclass_config_path=ioclass_config.default_config_file_path,
    )

    return cache, cores
def test_ioclass_request_size():
    cache, core = prepare()

    ioclass_id = 1
    iterations = 100

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"request_size:ge:8192&request_size:le:16384&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    Udev.disable()

    # Check if requests with appropriate size are cached
    TestRun.LOGGER.info(
        f"Check if requests with size within defined range are cached")
    cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
    for i in range(iterations):
        cache.flush_cache()
        req_size = random.choice(cached_req_sizes)
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(1).block_size(req_size).oflag("direct"))
        dd.run()
        dirty = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.dirty
        if dirty.get_value(
                Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
            TestRun.fail("Incorrect number of dirty blocks!")

    cache.flush_cache()

    # Check if requests with inappropriate size are not cached
    TestRun.LOGGER.info(
        f"Check if requests with size outside defined range are not cached")
    not_cached_req_sizes = [
        Size(1, Unit.Blocks4096),
        Size(8, Unit.Blocks4096),
        Size(16, Unit.Blocks4096),
    ]
    for i in range(iterations):
        req_size = random.choice(not_cached_req_sizes)
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(1).block_size(req_size).oflag("direct"))
        dd.run()
        dirty = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.dirty
        if dirty.get_value(Unit.Blocks4096) != 0:
            TestRun.fail("Dirty data present!")
def test_ioclass_request_size(prepare_and_cleanup):
    cache, core = prepare()

    ioclass_id = 1
    iterations = 100

    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"request_size:ge:8192&request_size:le:16384&done",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    Udev.disable()

    # Check if requests with appropriate size are cached
    TestProperties.LOGGER.info(
        f"Check if requests with size within defined range are cached")
    cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
    for i in range(iterations):
        cache.flush_cache()
        req_size = random.choice(cached_req_sizes)
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(1).block_size(req_size).oflag("direct"))
        dd.run()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert (stats["dirty"].get_value(Unit.Blocks4096) == req_size.value /
                Unit.Blocks4096.value)

    cache.flush_cache()

    # Check if requests with inappropriate size are not cached
    TestProperties.LOGGER.info(
        f"Check if requests with size outside defined range are not cached")
    not_cached_req_sizes = [
        Size(1, Unit.Blocks4096),
        Size(8, Unit.Blocks4096),
        Size(16, Unit.Blocks4096),
    ]
    for i in range(iterations):
        req_size = random.choice(not_cached_req_sizes)
        dd = (Dd().input("/dev/zero").output(
            core.system_path).count(1).block_size(req_size).oflag("direct"))
        dd.run()
        stats = cache.get_cache_statistics(per_io_class=True,
                                           io_class_id=ioclass_id)
        assert stats["dirty"].get_value(Unit.Blocks4096) == 0
Пример #15
0
def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
    """
    title: Stress test for multistream sequential cutoff on raw device
    description: |
        Testing the stability of a system when there are multiple sequential and random I/O streams
        running against the raw exported object with the sequential cutoff policy set to always and
        the sequential cutoff threshold set to a value which is able to be reached by
        sequential I/O streams.
    pass_criteria:
        - No system crash
    """
    with TestRun.step("Start cache and add core device."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(1.5, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
        Udev.disable()
        core = cache.add_core(core_disk)

    with TestRun.step(f"Set seq-cutoff policy to always and threshold to 512KiB."):
        core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
        core.set_seq_cutoff_threshold(Size(512, Unit.KibiByte))

    with TestRun.step("Reset core statistics counters."):
        core.reset_counters()

    with TestRun.step("Run I/O"):
        stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
        sequential_streams = streams_seq_rand[0]
        random_streams = streams_seq_rand[1]
        fio = (Fio().create_command()
               .io_engine(IoEngine.libaio)
               .block_size(Size(1, Unit.Blocks4096))
               .direct()
               .offset_increment(stream_size))

        for i in range(0, sequential_streams + random_streams):
            fio_job = fio.add_job(job_name=f"stream_{i}")
            fio_job.size(stream_size)
            fio_job.target(core.path)
            if i < sequential_streams:
                fio_job.read_write(ReadWrite.write)
            else:
                fio_job.read_write(ReadWrite.randwrite)

        pid = fio.run_in_background()
        while TestRun.executor.check_if_process_exists(pid):
            sleep(5)
            TestRun.LOGGER.info(f"{core.get_statistics()}")
def prepare_configuration(cache_mode, cache_line_size):
    cache_device = TestRun.disks["cache"]
    core_device = TestRun.disks["core"]

    with TestRun.step("Creating cache partition"):
        cache_device.create_partitions([Size(25, Unit.MebiByte)])

    with TestRun.step("Creating cache error device"):
        error_device = ErrorDevice("error", cache_device.partitions[0])

    with TestRun.step("Staring cache to check metadata offset"):
        cache = casadm.start_cache(error_device,
                                   cache_line_size=cache_line_size,
                                   force=True)
        cache_size = cache.size
        cache.stop()

    with TestRun.step("Setting errors on non-metadata area"):
        error_device.change_table(
            DmTable.error_table(
                offset=(cache_device.partitions[0].size -
                        cache_size).get_value(Unit.Blocks512),
                size=cache_size,
            ).fill_gaps(cache_device.partitions[0]))

    with TestRun.step("Create core partition with size of usable cache space"):
        core_device.create_partitions([cache_size])

    with TestRun.step("Starting and configuring cache"):
        cache = casadm.start_cache(error_device,
                                   cache_mode=cache_mode,
                                   cache_line_size=cache_line_size,
                                   force=True)
        result = cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
        if result.exit_code:
            TestRun.LOGGER.exception("Couldn't set seq cutoff policy")
        result = cache.set_cleaning_policy(CleaningPolicy.nop)
        if result.exit_code:
            TestRun.LOGGER.exception("Couldn't set cleaning policy")

    with TestRun.step("Stopping udev"):
        Udev.disable()

    with TestRun.step("Adding core device"):
        core = cache.add_core(core_dev=core_device.partitions[0])

    return cache, core, core_device.partitions[0]
Пример #17
0
def test_flush_over_640_gibibytes_raw_device(cache_mode):
    """
        title: Test of the ability to flush huge amount of dirty data on raw device.
        description: |
          Flush cache when amount of dirty data in cache exceeds 640 GiB.
        pass_criteria:
          - Flushing completes successfully without any errors.
    """
    with TestRun.step("Prepare devices for cache and core."):
        cache_dev = TestRun.disks['cache']
        check_disk_size(cache_dev)
        cache_dev.create_partitions([required_disk_size])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        check_disk_size(core_dev)
        Udev.disable()

    with TestRun.step(f"Start cache in {cache_mode} mode."):
        cache = casadm.start_cache(cache_part, cache_mode)

    with TestRun.step(f"Add core to cache."):
        core = cache.add_core(core_dev)

    with TestRun.step("Disable cleaning and sequential cutoff."):
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Create test file"):
        fio = (Fio().create_command().io_engine(IoEngine.libaio).read_write(
            ReadWrite.write).block_size(bs).direct().io_depth(256).target(
                core).size(file_size))
        fio.default_run_time = timedelta(
            hours=4)  # timeout for non-time-based fio
        fio.run()

    with TestRun.step(f"Check if dirty data exceeded {file_size * 0.98} GiB."):
        minimum_4KiB_blocks = int(
            (file_size * 0.98).get_value(Unit.Blocks4096))
        if int(cache.get_statistics().usage_stats.dirty) < minimum_4KiB_blocks:
            TestRun.fail("There is not enough dirty data in the cache!")

    with TestRun.step("Stop cache with flush."):
        # this operation could take few hours, depending on core disk
        output = TestRun.executor.run(stop_cmd(str(cache.cache_id)),
                                      timedelta(hours=12))
        if output.exit_code != 0:
            TestRun.fail(f"Stopping cache with flush failed!\n{output.stderr}")
Пример #18
0
def prepare(
    cache_size=Size(500, Unit.MebiByte),
    core_size=Size(10, Unit.GibiByte),
    cache_mode=CacheMode.WB,
    cache_line_size=CacheLineSize.LINE_4KiB,
):
    ioclass_config.remove_ioclass_config()
    cache_device = TestRun.disks["cache"]
    core_device = TestRun.disks["core"]

    cache_device.create_partitions([cache_size])
    core_device.create_partitions([core_size])

    cache_device = cache_device.partitions[0]
    core_device = core_device.partitions[0]

    TestRun.LOGGER.info(f"Starting cache")
    cache = casadm.start_cache(cache_device,
                               cache_mode=cache_mode,
                               cache_line_size=cache_line_size,
                               force=True)

    Udev.disable()
    TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
    casadm.set_param_cleaning(cache_id=cache.cache_id,
                              policy=CleaningPolicy.nop)
    TestRun.LOGGER.info(f"Adding core device")
    core = casadm.add_core(cache, core_dev=core_device)
    TestRun.LOGGER.info(f"Setting seq cutoff policy to never")
    core.set_seq_cutoff_policy(SeqCutOffPolicy.never)
    ioclass_config.create_ioclass_config(
        add_default_rule=False, ioclass_config_path=ioclass_config_path)
    # To make test more precise all workload except of tested ioclass should be
    # put in pass-through mode
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID,
        eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY,
        allocation="0.00",
        rule=ioclass_config.DEFAULT_IO_CLASS_RULE,
        ioclass_config_path=ioclass_config_path,
    )

    output = TestRun.executor.run(f"mkdir -p {mountpoint}")
    if output.exit_code != 0:
        raise Exception(f"Failed to create mountpoint")

    return cache, core
Пример #19
0
def test_ioclass_process_name():
    """
        title: Test IO classification by process name.
        description: Check if data generated by process with particular name is cached.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly based on process generating IO name.
    """
    ioclass_id = 1
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 1
    iterations = 100

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()

    with TestRun.step("Create and load IO class config file."):
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation="1.00",
            rule=f"process_name:dd&done",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Flush cache and disable udev."):
        cache.flush_cache()
        Udev.disable()

    with TestRun.step("Check if all data generated by dd process is cached."):
        for i in range(iterations):
            dd = (
                Dd()
                .input("/dev/zero")
                .output(core.path)
                .count(dd_count)
                .block_size(dd_size)
                .seek(i)
            )
            dd.run()
            sync()
            time.sleep(0.1)
            dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
            if dirty.get_value(Unit.Blocks4096) != (i + 1) * dd_count:
                TestRun.LOGGER.error(f"Wrong amount of dirty data ({dirty}).")
Пример #20
0
def test_stat_max_core(cache_mode):
    """
        title: CAS statistics values for maximum core devices.
        description: |
          Check CAS ability to display correct values in statistics
          for 62 core devices.
        pass_criteria:
          - Core's statistics matches cache's statistics.
    """

    cores_per_cache = 62

    with TestRun.step(f"Create 1 cache and {cores_per_cache} core partitions"):
        cache_dev = TestRun.disks["cache"]
        cache_dev.create_partitions([cache_size])
        core_dev = TestRun.disks["core"]
        core_parts = [core_size] * cores_per_cache
        core_dev.create_partitions(core_parts)
        Udev.disable()

    with TestRun.step(
            f"Start cache in {cache_mode} cache mode and add {cores_per_cache} cores"
    ):
        cache = casadm.start_cache(cache_dev.partitions[0],
                                   cache_mode=cache_mode,
                                   force=True)
        cache.set_cleaning_policy(CleaningPolicy.nop)
        cores = []
        for j in range(cores_per_cache):
            cores.append(cache.add_core(core_dev.partitions[j]))

    with TestRun.step("Run 'fio'"):
        fio = fio_prepare()
        for j in range(cores_per_cache):
            fio.add_job().target(cores[j].system_path)
        fio.run()
        sleep(3)

    with TestRun.step("Check if cache's statistics matches core's statistics"):
        cache_stats = cache.get_statistics_flat(stat_filter=stat_filter)
        cores_stats = [
            cores[j].get_statistics_flat(stat_filter=stat_filter)
            for j in range(cores_per_cache)
        ]
        fail_message = f"In {cache_mode} cache mode "
        stats_compare(cache_stats, cores_stats, cores_per_cache, fail_message)
def test_ioclass_conditions_and(filesystem):
    """
    Load config with IO class combining 5 conditions contradicting at least one other condition
    connected by AND operator.
    Check if every IO fulfilling one of the conditions is not classified.
    """
    cache, core = prepare()
    Udev.disable()
    file_size = Size(random.randint(25, 50), Unit.MebiByte)
    file_size_bytes = int(file_size.get_value(Unit.Byte))

    # directories OR condition
    ioclass_config.add_ioclass(
        ioclass_id=1,
        eviction_priority=1,
        allocation=True,
        rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
        f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
        f"file_size:eq:{file_size_bytes}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=1)["occupancy"]
    # Perform IO
    for size in [
            file_size, file_size + Size(1, Unit.MebiByte),
            file_size - Size(1, Unit.MebiByte)
    ]:
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(size).read_write(
                ReadWrite.write).target(f"{mountpoint}/test_file").run())
        sync()
        new_occupancy = cache.get_statistics_deprecated(
            io_class_id=1)["occupancy"]

        assert new_occupancy == base_occupancy, \
            "Unexpected occupancy increase!\n" \
            f"Expected: {base_occupancy}, actual: {new_occupancy}"
def test_stats_values():
    """
        title: Check for proper statistics values.
        description: |
          Check if CAS displays proper usage, request, block and error statistics values
          for core devices in every cache mode - at the start, after IO and after cache
          reload. Also check if core's statistics match cache's statistics.
        pass_criteria:
          - Usage, request, block and error statistics have proper values.
          - Core's statistics match cache's statistics.
    """

    with TestRun.step("Partition cache and core devices"):
        cache_dev, core_dev = storage_prepare()
        Udev.disable()

    with TestRun.step(
        f"Start {caches_count} caches (one for every cache mode) "
        f"and add {cores_per_cache} cores per cache"
    ):
        caches, cores = cache_prepare(cache_dev, core_dev)

    with TestRun.step("Check initial statistics values for each core"):
        check_stats_initial(caches, cores)

    with TestRun.step("Run 'fio'"):
        fio = fio_prepare()
        for i in range(caches_count):
            for j in range(cores_per_cache):
                fio.add_job().target(cores[i][j].path)
        fio.run()
        sleep(3)

    with TestRun.step("Check statistics values after IO"):
        check_stats_after_io(caches, cores)

    with TestRun.step("Check if cache's statistics match core's statistics"):
        check_stats_sum(caches, cores)

    with TestRun.step("Stop and load caches back"):
        casadm.stop_all_caches()
        caches = cache_load(cache_dev)

    with TestRun.step("Check statistics values after reload"):
        check_stats_after_io(caches, cores, after_reload=True)
def test_ioclass_conditions_or(filesystem):
    """
    Load config with IO class combining 5 contradicting conditions connected by OR operator.
    Check if every IO fulfilling one condition is classified properly.
    """
    cache, core = prepare()
    Udev.disable()

    # directories OR condition
    ioclass_config.add_ioclass(
        ioclass_id=1,
        eviction_priority=1,
        allocation=True,
        rule=
        f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
        f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    for i in range(1, 6):
        fs_utils.create_directory(f"{mountpoint}/dir{i}")
    sync()

    # Perform IO fulfilling each condition and check if occupancy raises
    for i in range(1, 6):
        file_size = Size(random.randint(25, 50), Unit.MebiByte)
        base_occupancy = cache.get_statistics_deprecated(
            io_class_id=1)["occupancy"]
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(file_size).read_write(
                ReadWrite.write).target(
                    f"{mountpoint}/dir{i}/test_file").run())
        sync()
        new_occupancy = cache.get_statistics_deprecated(
            io_class_id=1)["occupancy"]

        assert new_occupancy == base_occupancy + file_size, \
            "Occupancy has not increased correctly!\n" \
            f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
Пример #24
0
def test_write_fetch_full_misses(cache_mode, cache_line_size):
    """
        title: No caching of full write miss operations with block size smaller than cache line size
        description: |
          Validate CAS ability to not cache entire cache line size for full write miss operations
          when block size is smaller than cache line size – no fetch for writes
        pass_criteria:
          - Appropriate number of write full misses and writes to cache in cache statistics
          - Appropriate number of writes to cache in iostat
    """
    io_size = Size(300, Unit.MebiByte)

    with TestRun.step("Start cache and add core."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache = casadm.start_cache(cache_disk, cache_mode, cache_line_size)
        Udev.disable()
        core = cache.add_core(core_disk)
    with TestRun.step("Run writes to CAS device using fio."):
        io_stats_before_io = cache_disk.get_io_stats()
        blocksize = cache_line_size.value / 2
        skip_size = cache_line_size.value / 2
        run_fio(target=core.path,
                operation_type=ReadWrite.write,
                skip=skip_size,
                blocksize=blocksize,
                io_size=io_size)
    with TestRun.step(
            "Verify CAS statistics for write full misses and writes to cache."
    ):
        check_statistics(cache=cache,
                         blocksize=blocksize,
                         skip_size=skip_size,
                         io_size=io_size)
    with TestRun.step(
            "Verify number of writes to cache device using iostat. Shall be half of "
            f"io size ({str(io_size / 2)}) + metadata for WB."):
        check_io_stats(cache_disk=cache_disk,
                       cache=cache,
                       io_stats_before=io_stats_before_io,
                       io_size=io_size,
                       blocksize=blocksize,
                       skip_size=skip_size)
Пример #25
0
def prepare(cores_count=1, cache_line_size: CacheLineSize = None):
    cache_device = TestRun.disks['cache']
    core_device = TestRun.disks['core']
    cache_device.create_partitions(
        [(SEQ_CUTOFF_THRESHOLD_MAX * cores_count + Size(5, Unit.GibiByte)).align_down(0x1000)])
    partitions = \
        [(SEQ_CUTOFF_THRESHOLD_MAX + Size(10, Unit.GibiByte)).align_down(0x1000)] * cores_count
    core_device.create_partitions(partitions)
    cache_part = cache_device.partitions[0]
    core_parts = core_device.partitions
    TestRun.LOGGER.info("Starting cache")

    cache = casadm.start_cache(cache_part, force=True, cache_line_size=cache_line_size)
    Udev.disable()
    TestRun.LOGGER.info("Adding core devices")
    core_list = []
    for core_part in core_parts:
        core_list.append(cache.add_core(core_dev=core_part))
    return cache, core_list
Пример #26
0
def prepare():
    cache_dev = TestRun.disks["cache"]
    core_dev = TestRun.disks["core"]

    cache_dev.create_partitions([Size(100, Unit.MiB)])
    core_dev.create_partitions([Size(200, Unit.MiB)])

    Udev.disable()
    cache = casadm.start_cache(cache_dev.partitions[0], force=True, cache_mode=CacheMode.WB)
    core = cache.add_core(core_dev.partitions[0])
    cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
    cache.set_cleaning_policy(CleaningPolicy.alru)
    cache.set_params_alru(
        FlushParametersAlru(
            activity_threshold=Time(seconds=100),
            staleness_time=Time(seconds=1),
        )
    )

    return cache, core
Пример #27
0
def prepare_config(cache_line_size, cache_mode):
    cache_device = TestRun.disks["cache"]
    core_device = TestRun.disks["core"]

    core_device.create_partitions([Size(3, Unit.GiB)])

    cache = casadm.start_cache(
        cache_device,
        cache_mode=cache_mode,
        cache_line_size=cache_line_size,
        force=True,
    )
    cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    cache.set_cleaning_policy(CleaningPolicy.nop)

    Udev.disable()

    core = cache.add_core(core_device.partitions[0])

    return cache, core
Пример #28
0
def prepare(random_cls, cache_count=1, cores_per_cache=1):
    cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
    ioclass_config.remove_ioclass_config()

    cache_device = TestRun.disks['cache']
    core_device = TestRun.disks['core']

    cache_device.create_partitions([Size(500, Unit.MebiByte)] * cache_count)
    core_device.create_partitions([Size(2, Unit.GibiByte)] * cache_count *
                                  cores_per_cache)

    cache_devices = cache_device.partitions
    core_devices = core_device.partitions
    for core_device in core_devices:
        core_device.create_filesystem(Filesystem.ext4)

    Udev.disable()
    caches, cores = [], []
    for i, cache_device in enumerate(cache_devices):
        TestRun.LOGGER.info(f"Starting cache on {cache_device.path}")
        cache = casadm.start_cache(cache_device,
                                   force=True,
                                   cache_mode=cache_modes[i],
                                   cache_line_size=random_cls)
        caches.append(cache)
        TestRun.LOGGER.info("Setting cleaning policy to NOP")
        cache.set_cleaning_policy(CleaningPolicy.nop)
        for core_device in core_devices[i * cores_per_cache:(i + 1) *
                                        cores_per_cache]:
            TestRun.LOGGER.info(
                f"Adding core device {core_device.path} to cache {cache.cache_id}"
            )
            core = cache.add_core(core_dev=core_device)
            core.reset_counters()
            cores.append(core)

    TestRun.executor.run_expect_success(f"mkdir -p {mountpoint}")

    return caches, cores
Пример #29
0
def prepare():
    ioclass_config.remove_ioclass_config()
    cache_device = TestRun.disks['cache']
    core_device = TestRun.disks['core']

    cache_device.create_partitions([Size(500, Unit.MebiByte)])
    core_device.create_partitions([Size(1, Unit.GibiByte)])

    cache_device = cache_device.partitions[0]
    core_device = core_device.partitions[0]

    TestRun.LOGGER.info(f"Starting cache")
    cache = casadm.start_cache(cache_device, cache_mode=CacheMode.WB, force=True)

    Udev.disable()
    TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
    casadm.set_param_cleaning(cache_id=cache.cache_id, policy=CleaningPolicy.nop)
    TestRun.LOGGER.info(f"Adding core device")
    core = casadm.add_core(cache, core_dev=core_device)
    core.set_seq_cutoff_policy(SeqCutOffPolicy.never)
    ioclass_config.create_ioclass_config(
        add_default_rule=False, ioclass_config_path=ioclass_config_path
    )
    # To make test more precise all workload except of tested ioclass should be
    # put in pass-through mode
    ioclass_config.add_ioclass(
        ioclass_id=0,
        eviction_priority=22,
        allocation=False,
        rule="unclassified",
        ioclass_config_path=ioclass_config_path,
    )

    output = TestRun.executor.run(f"mkdir -p {mountpoint}")
    if output.exit_code != 0:
        raise Exception(f"Failed to create mountpoint")

    return cache, core
def prepare_configuration(cache_mode, cache_line_size):
    cache_device = TestRun.disks["cache"]
    core_device = TestRun.disks["core"]

    cache_device.create_partitions([Size(70, Unit.MebiByte)])
    core_device.create_partitions(
        [Size(70, Unit.MebiByte), Size(70, Unit.MebiByte)]
    )
    core1 = core_device.partitions[0]
    core2 = core_device.partitions[1]

    error_device = ErrorDevice(
        "error",
        core1,
        DmTable.uniform_error_table(
            start_lba=0,
            stop_lba=int(core1.size.get_value(Unit.Blocks512)),
            num_error_zones=100,
            error_zone_size=Size(5, Unit.Blocks512),
        ).fill_gaps(core1),
    )

    cache = casadm.start_cache(
        cache_device.partitions[0],
        cache_mode=cache_mode,
        cache_line_size=cache_line_size,
        force=True,
    )
    cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    cache.set_cleaning_policy(CleaningPolicy.nop)

    Udev.disable()
    error_core = cache.add_core(core_dev=error_device)
    good_core = cache.add_core(core_dev=core2)

    return cache, error_core, good_core