예제 #1
0
def setup_workload(target: str,
                   runtime: timedelta,
                   io_depth=128,
                   verify=True,
                   block_size=int(Size(4, Unit.KibiByte)),
                   num_jobs=1,
                   method=ReadWrite.randrw,
                   io_engine=IoEngine.libaio):
    fio_run = Fio().create_command()
    fio_run.io_engine(io_engine)
    fio_run.direct()
    fio_run.time_based()

    fio_run.run_time(runtime)
    fio_run.io_depth(io_depth)
    if verify:
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.meta)
        fio_run.verify_dump()

    fio_run.read_write(method)
    fio_run.target(target)
    fio_run.block_size(block_size)

    for i in range(num_jobs):
        fio_run.add_job()

    return fio_run
예제 #2
0
def test_data_integrity_5d():
    """
        title: |
          Data integrity test on three cas instances with different
          cache modes with duration time equal to 5 days
        description: |
          Create 3 cache instances with different cache modes on caches equal to 50GB
          and cores equal to 150GB, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Run test workloads with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.target(core)
        fio_run.run()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode in zip(core_md5s, dev_md5s, cache_modes):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode}")
예제 #3
0
def fill_cache(target):
    fio_run_fill = Fio().create_command()
    fio_run_fill.io_engine(IoEngine.libaio)
    fio_run_fill.direct()
    fio_run_fill.read_write(ReadWrite.write)
    fio_run_fill.io_depth(16)
    fio_run_fill.block_size(Size(1, Unit.MebiByte))
    fio_run_fill.target(target)
    fio_run_fill.run()
예제 #4
0
def fio_workload(target: str, runtime: timedelta):
    fio_run = Fio().create_command()
    fio_run.io_engine(IoEngine.libaio)
    fio_run.direct()
    fio_run.time_based()
    fio_run.run_time(runtime)
    fio_run.io_depth(64)
    fio_run.read_write(ReadWrite.readwrite)
    fio_run.target(target)
    fio_run.block_size(int(Size(4, Unit.KibiByte)))

    return fio_run
def run_workload(target):
    fio_run = Fio().create_command()
    fio_run.io_engine(IoEngine.libaio)
    fio_run.direct()
    fio_run.time_based()
    fio_run.do_verify()
    fio_run.verify(VerifyMethod.meta)
    fio_run.verify_dump()
    fio_run.run_time(runtime)
    fio_run.read_write(ReadWrite.randrw)
    fio_run.io_depth(128)
    fio_run.target(target)

    for block_size in range(start_size, stop_size + 1, step):
        fio_job = fio_run.add_job()
        fio_job.stonewall()
        fio_job.block_size(block_size)

    fio_run.run()
def test_ioclass_direct(filesystem):
    """
        title: Direct IO classification.
        description: Check if direct requests are properly cached.
        pass_criteria:
          - No kernel bug.
          - Data from direct IO should be cached.
          - Data from buffered IO should not be cached and if performed to/from already cached data
            should cause reclassification to unclassified IO class.
    """

    ioclass_id = 1
    io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config."):
        # direct IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule="direct",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step("Prepare fio command."):
        fio = Fio().create_command() \
            .io_engine(IoEngine.libaio) \
            .size(io_size).offset(io_size) \
            .read_write(ReadWrite.write) \
            .target(f"{mountpoint}/tmp_file" if filesystem else core.path)

    with TestRun.step("Prepare filesystem."):
        if filesystem:
            TestRun.LOGGER.info(
                f"Preparing {filesystem.name} filesystem and mounting {core.path} at"
                f" {mountpoint}"
            )
            core.create_filesystem(filesystem)
            core.mount(mountpoint)
            sync()
        else:
            TestRun.LOGGER.info("Testing on raw exported object.")

    with TestRun.step(f"Run buffered writes to {'file' if filesystem else 'device'}"):
        base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        fio.run()
        sync()

    with TestRun.step("Check if buffered writes are not cached."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy:
            TestRun.fail("Buffered writes were cached!\n"
                         f"Expected: {base_occupancy}, actual: {new_occupancy}")

    with TestRun.step(f"Run direct writes to {'file' if filesystem else 'device'}"):
        fio.direct()
        fio.run()
        sync()

    with TestRun.step("Check if direct writes are cached."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + io_size:
            TestRun.fail("Wrong number of direct writes was cached!\n"
                         f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")

    with TestRun.step(f"Run buffered reads from {'file' if filesystem else 'device'}"):
        fio.remove_param("readwrite").remove_param("direct")
        fio.read_write(ReadWrite.read)
        fio.run()
        sync()

    with TestRun.step("Check if buffered reads caused reclassification."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy:
            TestRun.fail("Buffered reads did not cause reclassification!"
                         f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")

    with TestRun.step(f"Run direct reads from {'file' if filesystem else 'device'}"):
        fio.direct()
        fio.run()
        sync()

    with TestRun.step("Check if direct reads are cached."):
        new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + io_size:
            TestRun.fail("Wrong number of direct reads was cached!\n"
                         f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
예제 #7
0
def test_data_integrity_5d_dss(filesystems):
    """
        title: |
          Data integrity test on three cas instances with different
          file systems with duration time equal to 5 days
        description: |
          Create 3 cache instances on caches equal to 50GB and cores equal to 150GB
          with different file systems, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is being preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Load default io class config for each cache"):
        for cache in caches:
            cache.load_io_class("/etc/opencas/ioclass-config.csv")

    with TestRun.step("Create filesystems and mount cores"):
        for i, core in enumerate(cores):
            mount_point = core.path.replace('/dev/', '/mnt/')
            if not fs_utils.check_if_directory_exists(mount_point):
                fs_utils.create_directory(mount_point)
            TestRun.LOGGER.info(
                f"Create filesystem {filesystems[i].name} on {core.path}")
            core.create_filesystem(filesystems[i])
            TestRun.LOGGER.info(
                f"Mount filesystem {filesystems[i].name} on {core.path} to "
                f"{mount_point}")
            core.mount(mount_point)
            sync()

    with TestRun.step("Run test workloads on filesystems with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.nr_files(4096)
        fio_run.file_size_range([(file_min_size, file_max_size)])
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.directory(core.mount_point)
            fio_job.size(core.size)
        fio_run.run()

    with TestRun.step("Unmount cores"):
        for core in cores:
            core.unmount()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode, fs in zip(core_md5s, dev_md5s,
                                               cache_modes, filesystems):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode} Filesystem: {fs}")
def test_zero_metadata_dirty_data():
    """
        title: Test for '--zero-metadata' and dirty data scenario.
        description: |
          Test for '--zero-metadata' with and without 'force' option if there are dirty data
          on cache.
        pass_criteria:
          - Zeroing metadata without force failed on cache with dirty data.
          - Zeroing metadata with force ran successfully on cache with dirty data.
          - Cache started successfully after zeroing metadata on cache with dirty data.
    """
    with TestRun.step("Prepare cache and core devices."):
        cache_dev, core_disk, cache_disk = prepare_devices()

    with TestRun.step("Start cache."):
        cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
        core = cache.add_core(core_disk)
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Run workload on CAS"):
        fio_run_fill = Fio().create_command()
        fio_run_fill.io_engine(IoEngine.libaio)
        fio_run_fill.direct()
        fio_run_fill.read_write(ReadWrite.randwrite)
        fio_run_fill.io_depth(16)
        fio_run_fill.block_size(Size(1, Unit.MebiByte))
        fio_run_fill.target(core.path)
        fio_run_fill.run_time(timedelta(seconds=5))
        fio_run_fill.time_based()
        fio_run_fill.run()

    with TestRun.step("Stop cache without flushing dirty data."):
        cache.stop(no_data_flush=True)

    with TestRun.step("Start cache (expect to fail)."):
        try:
            cache = casadm.start_cache(cache_dev, CacheMode.WB)
        except CmdException:
            TestRun.LOGGER.info("Start cache failed as expected.")

    with TestRun.step("Zeroing metadata on CAS device without force"):
        try:
            casadm.zero_metadata(cache_dev)
            TestRun.LOGGER.error("Zeroing metadata without force should fail!")
        except CmdException as e:
            cli_messages.check_stderr_msg(e.output,
                                          cli_messages.cache_dirty_data)

    with TestRun.step("Zeroing metadata on cache device with force"):
        try:
            casadm.zero_metadata(cache_dev, force=True)
            TestRun.LOGGER.info("Zeroing metadata with force successful!")
        except CmdException as e:
            TestRun.LOGGER.error(
                f"Zeroing metadata with force should work for cache device!"
                f"Error message: {e.output}")

        with TestRun.step("Start cache without 'force' option."):
            try:
                cache = casadm.start_cache(cache_dev, CacheMode.WB)
                TestRun.LOGGER.info("Cache started successfully.")
            except CmdException:
                TestRun.LOGGER.error("Start cache failed.")