def setup_workload(target: str, runtime: timedelta, io_depth=128, verify=True, block_size=int(Size(4, Unit.KibiByte)), num_jobs=1, method=ReadWrite.randrw, io_engine=IoEngine.libaio): fio_run = Fio().create_command() fio_run.io_engine(io_engine) fio_run.direct() fio_run.time_based() fio_run.run_time(runtime) fio_run.io_depth(io_depth) if verify: fio_run.do_verify() fio_run.verify(VerifyMethod.meta) fio_run.verify_dump() fio_run.read_write(method) fio_run.target(target) fio_run.block_size(block_size) for i in range(num_jobs): fio_run.add_job() return fio_run
def test_data_integrity_5d(): """ title: | Data integrity test on three cas instances with different cache modes with duration time equal to 5 days description: | Create 3 cache instances with different cache modes on caches equal to 50GB and cores equal to 150GB, and run workload with data verification. pass_criteria: - System does not crash. - All operations complete successfully. - Data consistency is preserved. """ with TestRun.step("Prepare cache and core devices"): cache_devices, core_devices = prepare_devices() with TestRun.step( "Run 4 cache instances in different cache modes, add single core to each" ): cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO] caches = [] cores = [] for i in range(4): cache, core = start_instance(cache_devices[i], core_devices[i], cache_modes[i]) caches.append(cache) cores.append(core) with TestRun.step("Run test workloads with verification"): fio_run = Fio().create_command() fio_run.io_engine(IoEngine.libaio) fio_run.direct() fio_run.time_based() fio_run.do_verify() fio_run.verify(VerifyMethod.md5) fio_run.verify_dump() fio_run.run_time(runtime) fio_run.read_write(ReadWrite.randrw) fio_run.io_depth(128) fio_run.blocksize_range([(start_size, stop_size)]) for core in cores: fio_job = fio_run.add_job() fio_job.target(core) fio_run.run() with TestRun.step("Calculate md5 for each core"): core_md5s = [File(core.full_path).md5sum() for core in cores] with TestRun.step("Stop caches"): for cache in caches: cache.stop() with TestRun.step("Calculate md5 for each core"): dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices] with TestRun.step("Compare md5 sums for cores and core devices"): for core_md5, dev_md5, mode in zip(core_md5s, dev_md5s, cache_modes): if core_md5 != dev_md5: TestRun.fail(f"MD5 sums of core and core device do not match! " f"Cache mode: {mode}")
def fio_workload(target: str, runtime: timedelta): fio_run = Fio().create_command() fio_run.io_engine(IoEngine.libaio) fio_run.direct() fio_run.time_based() fio_run.run_time(runtime) fio_run.io_depth(64) fio_run.read_write(ReadWrite.readwrite) fio_run.target(target) fio_run.block_size(int(Size(4, Unit.KibiByte))) return fio_run
def run_workload(target): fio_run = Fio().create_command() fio_run.io_engine(IoEngine.libaio) fio_run.direct() fio_run.time_based() fio_run.do_verify() fio_run.verify(VerifyMethod.meta) fio_run.verify_dump() fio_run.run_time(runtime) fio_run.read_write(ReadWrite.randrw) fio_run.io_depth(128) fio_run.target(target) for block_size in range(start_size, stop_size + 1, step): fio_job = fio_run.add_job() fio_job.stonewall() fio_job.block_size(block_size) fio_run.run()
def test_data_integrity_5d_dss(filesystems): """ title: | Data integrity test on three cas instances with different file systems with duration time equal to 5 days description: | Create 3 cache instances on caches equal to 50GB and cores equal to 150GB with different file systems, and run workload with data verification. pass_criteria: - System does not crash. - All operations complete successfully. - Data consistency is being preserved. """ with TestRun.step("Prepare cache and core devices"): cache_devices, core_devices = prepare_devices() with TestRun.step( "Run 4 cache instances in different cache modes, add single core to each" ): cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO] caches = [] cores = [] for i in range(4): cache, core = start_instance(cache_devices[i], core_devices[i], cache_modes[i]) caches.append(cache) cores.append(core) with TestRun.step("Load default io class config for each cache"): for cache in caches: cache.load_io_class("/etc/opencas/ioclass-config.csv") with TestRun.step("Create filesystems and mount cores"): for i, core in enumerate(cores): mount_point = core.path.replace('/dev/', '/mnt/') if not fs_utils.check_if_directory_exists(mount_point): fs_utils.create_directory(mount_point) TestRun.LOGGER.info( f"Create filesystem {filesystems[i].name} on {core.path}") core.create_filesystem(filesystems[i]) TestRun.LOGGER.info( f"Mount filesystem {filesystems[i].name} on {core.path} to " f"{mount_point}") core.mount(mount_point) sync() with TestRun.step("Run test workloads on filesystems with verification"): fio_run = Fio().create_command() fio_run.io_engine(IoEngine.libaio) fio_run.direct() fio_run.time_based() fio_run.nr_files(4096) fio_run.file_size_range([(file_min_size, file_max_size)]) fio_run.do_verify() fio_run.verify(VerifyMethod.md5) fio_run.verify_dump() fio_run.run_time(runtime) fio_run.read_write(ReadWrite.randrw) fio_run.io_depth(128) fio_run.blocksize_range([(start_size, stop_size)]) for core in cores: fio_job = fio_run.add_job() fio_job.directory(core.mount_point) fio_job.size(core.size) fio_run.run() with TestRun.step("Unmount cores"): for core in cores: core.unmount() with TestRun.step("Calculate md5 for each core"): core_md5s = [File(core.full_path).md5sum() for core in cores] with TestRun.step("Stop caches"): for cache in caches: cache.stop() with TestRun.step("Calculate md5 for each core"): dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices] with TestRun.step("Compare md5 sums for cores and core devices"): for core_md5, dev_md5, mode, fs in zip(core_md5s, dev_md5s, cache_modes, filesystems): if core_md5 != dev_md5: TestRun.fail(f"MD5 sums of core and core device do not match! " f"Cache mode: {mode} Filesystem: {fs}")
def test_zero_metadata_dirty_data(): """ title: Test for '--zero-metadata' and dirty data scenario. description: | Test for '--zero-metadata' with and without 'force' option if there are dirty data on cache. pass_criteria: - Zeroing metadata without force failed on cache with dirty data. - Zeroing metadata with force ran successfully on cache with dirty data. - Cache started successfully after zeroing metadata on cache with dirty data. """ with TestRun.step("Prepare cache and core devices."): cache_dev, core_disk, cache_disk = prepare_devices() with TestRun.step("Start cache."): cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True) core = cache.add_core(core_disk) cache.set_cleaning_policy(CleaningPolicy.nop) with TestRun.step("Run workload on CAS"): fio_run_fill = Fio().create_command() fio_run_fill.io_engine(IoEngine.libaio) fio_run_fill.direct() fio_run_fill.read_write(ReadWrite.randwrite) fio_run_fill.io_depth(16) fio_run_fill.block_size(Size(1, Unit.MebiByte)) fio_run_fill.target(core.path) fio_run_fill.run_time(timedelta(seconds=5)) fio_run_fill.time_based() fio_run_fill.run() with TestRun.step("Stop cache without flushing dirty data."): cache.stop(no_data_flush=True) with TestRun.step("Start cache (expect to fail)."): try: cache = casadm.start_cache(cache_dev, CacheMode.WB) except CmdException: TestRun.LOGGER.info("Start cache failed as expected.") with TestRun.step("Zeroing metadata on CAS device without force"): try: casadm.zero_metadata(cache_dev) TestRun.LOGGER.error("Zeroing metadata without force should fail!") except CmdException as e: cli_messages.check_stderr_msg(e.output, cli_messages.cache_dirty_data) with TestRun.step("Zeroing metadata on cache device with force"): try: casadm.zero_metadata(cache_dev, force=True) TestRun.LOGGER.info("Zeroing metadata with force successful!") except CmdException as e: TestRun.LOGGER.error( f"Zeroing metadata with force should work for cache device!" f"Error message: {e.output}") with TestRun.step("Start cache without 'force' option."): try: cache = casadm.start_cache(cache_dev, CacheMode.WB) TestRun.LOGGER.info("Cache started successfully.") except CmdException: TestRun.LOGGER.error("Start cache failed.")