Exemplo n.º 1
0
def setup_workload(target: str,
                   runtime: timedelta,
                   io_depth=128,
                   verify=True,
                   block_size=int(Size(4, Unit.KibiByte)),
                   num_jobs=1,
                   method=ReadWrite.randrw,
                   io_engine=IoEngine.libaio):
    fio_run = Fio().create_command()
    fio_run.io_engine(io_engine)
    fio_run.direct()
    fio_run.time_based()

    fio_run.run_time(runtime)
    fio_run.io_depth(io_depth)
    if verify:
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.meta)
        fio_run.verify_dump()

    fio_run.read_write(method)
    fio_run.target(target)
    fio_run.block_size(block_size)

    for i in range(num_jobs):
        fio_run.add_job()

    return fio_run
Exemplo n.º 2
0
def test_dirty_load():
    """
        title: Loading cache after dirty shutdown.
        description: Test for loading cache containing dirty data after DUT hard restart.
        pass_criteria:
          - DUT should reboot successfully.
          - Cache should load successfully.
    """
    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(2, Unit.GibiByte)] * 2)
        core_devices = core_disk.partitions

    with TestRun.step("Start cache in Write-Back mode and add cores."):
        cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
        cores = []
        for dev in core_devices:
            cores.append(cache.add_core(dev))

    with TestRun.step("Set cleaning policy to nop."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Populate cache with dirty data."):
        fio = Fio().create_command()\
            .size(Size(1, Unit.GibiByte))\
            .read_write(ReadWrite.randwrite)\
            .io_engine(IoEngine.libaio)\
            .block_size(Size(1, Unit.Blocks4096))
        for i, core in enumerate(cores):
            fio.add_job(f"core{i}").target(core.path)
        fio.run()

        if cache.get_dirty_blocks() <= Size.zero():
            TestRun.fail("Cache does not contain dirty data.")

    with TestRun.step("Remove one core without flushing dirty data."):
        casadm.remove_core_with_script_command(cache.cache_id, core.core_id,
                                               True)

    with TestRun.step("Reset platform."):
        power_control = TestRun.plugin_manager.get_plugin('power_control')
        power_control.power_cycle()

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)

        caches_num = len(casadm_parser.get_caches())
        if caches_num != 1:
            TestRun.LOGGER.error(
                f"Wrong number of caches. Expected 1, actual {caches_num}.")

        cores_num = len(casadm_parser.get_cores(cache.cache_id))
        if cores_num != 1:
            TestRun.LOGGER.error(
                f"Wrong number of cores. Expected 1, actual {cores_num}.")
Exemplo n.º 3
0
def test_data_integrity_5d():
    """
        title: |
          Data integrity test on three cas instances with different
          cache modes with duration time equal to 5 days
        description: |
          Create 3 cache instances with different cache modes on caches equal to 50GB
          and cores equal to 150GB, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Run test workloads with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.target(core)
        fio_run.run()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode in zip(core_md5s, dev_md5s, cache_modes):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode}")
def run_workload(target):
    fio_run = Fio().create_command()
    fio_run.io_engine(IoEngine.libaio)
    fio_run.direct()
    fio_run.time_based()
    fio_run.do_verify()
    fio_run.verify(VerifyMethod.meta)
    fio_run.verify_dump()
    fio_run.run_time(runtime)
    fio_run.read_write(ReadWrite.randrw)
    fio_run.io_depth(128)
    fio_run.target(target)

    for block_size in range(start_size, stop_size + 1, step):
        fio_job = fio_run.add_job()
        fio_job.stonewall()
        fio_job.block_size(block_size)

    fio_run.run()
Exemplo n.º 5
0
def test_data_integrity_5d_dss(filesystems):
    """
        title: |
          Data integrity test on three cas instances with different
          file systems with duration time equal to 5 days
        description: |
          Create 3 cache instances on caches equal to 50GB and cores equal to 150GB
          with different file systems, and run workload with data verification.
        pass_criteria:
            - System does not crash.
            - All operations complete successfully.
            - Data consistency is being preserved.
    """
    with TestRun.step("Prepare cache and core devices"):
        cache_devices, core_devices = prepare_devices()

    with TestRun.step(
            "Run 4 cache instances in different cache modes, add single core to each"
    ):
        cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO]
        caches = []
        cores = []
        for i in range(4):
            cache, core = start_instance(cache_devices[i], core_devices[i],
                                         cache_modes[i])
            caches.append(cache)
            cores.append(core)

    with TestRun.step("Load default io class config for each cache"):
        for cache in caches:
            cache.load_io_class("/etc/opencas/ioclass-config.csv")

    with TestRun.step("Create filesystems and mount cores"):
        for i, core in enumerate(cores):
            mount_point = core.path.replace('/dev/', '/mnt/')
            if not fs_utils.check_if_directory_exists(mount_point):
                fs_utils.create_directory(mount_point)
            TestRun.LOGGER.info(
                f"Create filesystem {filesystems[i].name} on {core.path}")
            core.create_filesystem(filesystems[i])
            TestRun.LOGGER.info(
                f"Mount filesystem {filesystems[i].name} on {core.path} to "
                f"{mount_point}")
            core.mount(mount_point)
            sync()

    with TestRun.step("Run test workloads on filesystems with verification"):
        fio_run = Fio().create_command()
        fio_run.io_engine(IoEngine.libaio)
        fio_run.direct()
        fio_run.time_based()
        fio_run.nr_files(4096)
        fio_run.file_size_range([(file_min_size, file_max_size)])
        fio_run.do_verify()
        fio_run.verify(VerifyMethod.md5)
        fio_run.verify_dump()
        fio_run.run_time(runtime)
        fio_run.read_write(ReadWrite.randrw)
        fio_run.io_depth(128)
        fio_run.blocksize_range([(start_size, stop_size)])
        for core in cores:
            fio_job = fio_run.add_job()
            fio_job.directory(core.mount_point)
            fio_job.size(core.size)
        fio_run.run()

    with TestRun.step("Unmount cores"):
        for core in cores:
            core.unmount()

    with TestRun.step("Calculate md5 for each core"):
        core_md5s = [File(core.full_path).md5sum() for core in cores]

    with TestRun.step("Stop caches"):
        for cache in caches:
            cache.stop()

    with TestRun.step("Calculate md5 for each core"):
        dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices]

    with TestRun.step("Compare md5 sums for cores and core devices"):
        for core_md5, dev_md5, mode, fs in zip(core_md5s, dev_md5s,
                                               cache_modes, filesystems):
            if core_md5 != dev_md5:
                TestRun.fail(f"MD5 sums of core and core device do not match! "
                             f"Cache mode: {mode} Filesystem: {fs}")