Пример #1
0
def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
    """
    title: Sequential cut-off tests during writes and reads on full cache for 'full' policy
    description: |
        Testing if amount of data written to cache after sequential io against fully occupied
        cache for 'full' sequential cut-off policy with cache configured with different cache
        line sizes is valid for sequential cut-off threshold parameter.
    pass_criteria:
        - Amount of written blocks to cache is big enough to fill cache when 'never' sequential
          cut-off policy is set
        - Amount of written blocks to cache is less or equal than amount set
          with sequential cut-off parameter in case of 'full' policy.
    """
    with TestRun.step(
            f"Test prepare (start cache (cache line size: {cls}) and add cores)"
    ):
        cache, cores = prepare(cores_count=1, cache_line_size=cls)
        fio_additional_size = Size(10, Unit.Blocks4096)
        threshold = Size(threshold_param, Unit.KibiByte)
        io_size = (threshold + fio_additional_size).align_down(0x1000)

    with TestRun.step(f"Setting cache sequential cut off policy mode to "
                      f"{SeqCutOffPolicy.never}"):
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step(
            "Filling cache (sequential writes IO with size of cache device)"):
        sync()
        (Fio().create_command().io_engine(IoEngine.libaio).size(
            cache.cache_device.size).read_write(io_dir).target(
                f"{cores[0].path}").direct()).run()

    with TestRun.step(
            "Check if cache is filled enough (expecting occupancy not less than "
            "95%)"):
        occupancy = cache.get_statistics(
            percentage_val=True).usage_stats.occupancy
        if occupancy < 95:
            TestRun.fail(
                f"Cache occupancy is too small: {occupancy}, expected at least 95%"
            )

    with TestRun.step(f"Setting cache sequential cut off policy mode to "
                      f"{SeqCutOffPolicy.full}"):
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.full)
    with TestRun.step(f"Setting cache sequential cut off policy threshold to "
                      f"{threshold}"):
        cache.set_seq_cutoff_threshold(threshold)

    with TestRun.step(f"Running sequential IO ({io_dir})"):
        sync()
        writes_before = cores[0].get_statistics().block_stats.cache.writes
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(io_size).read_write(io_dir).target(
                f"{cores[0].path}").direct()).run()

    with TestRun.step("Verify writes to cache count"):
        verify_writes_count(cores[0], writes_before, threshold, io_size,
                            VerifyType.POSITIVE)
Пример #2
0
def run_io(exported_objects):
    for i in range(0, cores_number):
        fio = Fio() \
            .create_command() \
            .read_write(ReadWrite.randrw) \
            .io_engine(IoEngine.libaio) \
            .direct() \
            .sync() \
            .io_depth(32) \
            .run_time(timedelta(minutes=5)) \
            .num_jobs(5) \
            .target(exported_objects[i].path)
        fio.run_in_background()
Пример #3
0
def get_fio_rw_cmd(core, write_percentage):
    fio = (Fio().create_command().target(core).read_write(
        ReadWrite.randrw).write_percentage(write_percentage).io_engine(
            IoEngine.libaio).block_size(Size(
                16, Unit.Blocks4096)).run_time(runtime).time_based(
                    runtime).io_depth(32).num_jobs(72).direct(1))
    return fio
Пример #4
0
def file_operation(target_path, data_pattern, io_pattern):
    fio = (Fio().create_command().target(target_path).io_engine(
        IoEngine.libaio).size(
            test_file_size).read_write(io_pattern).block_size(
                Size(1, Unit.Blocks4096)).verification_with_pattern(
                    data_pattern).direct().set_param("do_verify", 0))
    fio.run()
def run_fio(targets):
    for target in targets:
        fio = (Fio().create_command().io_engine(
            IoEngine.libaio).read_write(ReadWrite.randwrite).direct(1).size(
                Size(100,
                     Unit.MebiByte)).sync().io_depth(32).target(f"{target}"))
        fio.run()
Пример #6
0
def get_fio_cmd(core, core_size):
    fio = (Fio().create_command().target(core).read_write(
        ReadWrite.write).io_engine(IoEngine.libaio).io_size(
            Size(10, Unit.TebiByte)).size(core_size).block_size(
                Size(1, Unit.Blocks4096)).run_time(timedelta(
                    seconds=9999)).io_depth(32).num_jobs(1).direct(1))
    return fio
Пример #7
0
def test_stop_cache_during_io():
    """
        title: Test for stopping cache during IO.
        description: |
          Creating CAS device, running fio on it and checking
          if cache can be stopped during IO operations.
        pass_criteria:
          - Cache is not stopped.
    """
    with TestRun.step("Start cache and add core"):
        cache, core = prepare()

    with TestRun.step("Running 'fio'"):
        fio = (
            Fio()
            .create_command()
            .io_engine(IoEngine.libaio)
            .block_size(Size(4, Unit.KibiByte))
            .read_write(ReadWrite.randrw)
            .target(f"{core.system_path}")
            .direct(1)
            .run_time(timedelta(minutes=4))
            .time_based()
        )
        fio_pid = fio.run_in_background()
        time.sleep(10)

    with TestRun.step("Try to stop cache during 'fio'"):
        TestRun.executor.run_expect_fail(cli.stop_cmd(f"{cache.cache_id}"))

    with TestRun.step("Stopping 'fio'"):
        TestRun.executor.kill_process(fio_pid)

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
Пример #8
0
def test_kedr_basic_io_raw(module, unload_modules, install_kedr):
    """
    title: Basic IO test with kedr started with memory leaks profile
    description: |
        Load CAS modules, start kedr against one of them, start cache and add core,
        run simple 4 minute random IO, stop cache and unload modules
    pass_criteria:
      - No memory leaks observed
    """
    with TestRun.step("Preparing cache device"):
        cache_device = TestRun.disks['cache']
        cache_device.create_partitions([Size(500, Unit.MebiByte)])
        cache_part = cache_device.partitions[0]

    with TestRun.step("Preparing core device"):
        core_device = TestRun.disks['core']
        core_device.create_partitions([Size(1, Unit.GibiByte)])
        core_part = core_device.partitions[0]

    with TestRun.step("Unload CAS modules if needed"):
        if os_utils.is_kernel_module_loaded(module.value):
            cas_module.unload_all_cas_modules()

    with TestRun.step(f"Starting kedr against {module.value}"):
        Kedr.start(module.value)

    with TestRun.step(f"Loading CAS modules"):
        os_utils.load_kernel_module(cas_module.CasModule.cache.value)

    with TestRun.step("Starting cache"):
        cache = casadm.start_cache(cache_part, force=True)

    with TestRun.step("Adding core"):
        core = cache.add_core(core_dev=core_part)

    with TestRun.step(f"Running IO"):
        (Fio().create_command()
              .io_engine(IoEngine.libaio)
              .run_time(timedelta(minutes=4))
              .time_based()
              .read_write(ReadWrite.randrw)
              .target(f"{core.path}")
              .direct()
         ).run()

    with TestRun.step("Stopping cache"):
        cache.stop()

    with TestRun.step(f"Unloading CAS modules"):
        cas_module.unload_all_cas_modules()

    with TestRun.step(f"Checking for memory leaks for {module.value}"):
        try:
            Kedr.check_for_mem_leaks(module.value)
        except Exception as e:
            TestRun.LOGGER.error(f"{e}")

    with TestRun.step(f"Stopping kedr"):
        Kedr.stop()
Пример #9
0
def write_pattern(device):
    return (Fio().create_command()
            .io_engine(IoEngine.libaio)
            .read_write(ReadWrite.write)
            .target(device)
            .direct()
            .verification_with_pattern()
            )
Пример #10
0
def get_fio_cmd(dev):
    fio = (Fio().create_command().target(dev.system_path).io_engine(
        IoEngine.sync).size(Size(1, Unit.GibiByte)).block_size(
            Size(1, Unit.Blocks4096)).io_depth(32).num_jobs(1).direct(
                1).lat_percentiles(True).slat_percentiles(
                    False).clat_percentiles(False).percentile_list(
                        ["90.0", "99.0", "99.9", "99.99"]))
    return fio
Пример #11
0
def run_fio_on_lvm(volumes: []):
    fio_run = (Fio().create_command().read_write(ReadWrite.randrw).io_engine(
        IoEngine.sync).io_depth(1).time_based().run_time(
            datetime.timedelta(seconds=180)).do_verify().verify(
                VerifyMethod.md5).block_size(Size(1, Unit.Blocks4096)))
    for lvm in volumes:
        fio_run.add_job().target(lvm).size(lvm.size)
    fio_run.run()
def test_ioclass_conditions_and(filesystem):
    """
        title: IO class condition 'and'.
        description: |
          Load config with IO class combining 5 conditions contradicting
          at least one other condition.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one of the conditions is not classified.
    """

    file_size = Size(random.randint(25, 50), Unit.MebiByte)
    file_size_bytes = int(file_size.get_value(Unit.Byte))

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
            f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
            f"file_size:eq:{file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_occupancy = cache.get_io_class_statistics(
        io_class_id=1).usage_stats.occupancy
    # Perform IO
    for size in [
            file_size, file_size + Size(1, Unit.MebiByte),
            file_size - Size(1, Unit.MebiByte)
    ]:
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(size).read_write(
                ReadWrite.write).target(f"{mountpoint}/test_file").run())
        sync()
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=1).usage_stats.occupancy

        if new_occupancy != base_occupancy:
            TestRun.fail(
                "Unexpected occupancy increase!\n"
                f"Expected: {base_occupancy}, actual: {new_occupancy}")
Пример #13
0
def dut_prepare(reinstall: bool):
    if not check_if_installed() or reinstall:
        TestRun.LOGGER.info("Installing iotrace:")
        install_iotrace()
    else:
        TestRun.LOGGER.info("iotrace is already installed by previous test")

    # Call it after installing iotrace because we need iotrace
    # to get valid paths
    dut_cleanup()

    fio = Fio()
    if not fio.is_installed():
        TestRun.LOGGER.info("Installing fio")
        fio.install()

    TestRun.LOGGER.info("Killing all IO")
    kill_all_io()
def test_ioclass_conditions_or(filesystem):
    """
        title: IO class condition 'or'.
        description: |
          Load config with IO class combining 5 contradicting conditions connected by OR operator.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one condition is classified properly.
    """

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
            f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        for i in range(1, 6):
            fs_utils.create_directory(f"{mountpoint}/dir{i}")
        sync()

    with TestRun.step(
            "Perform IO fulfilling each condition and check if occupancy raises."
    ):
        for i in range(1, 6):
            file_size = Size(random.randint(25, 50), Unit.MebiByte)
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy
            (Fio().create_command().io_engine(
                IoEngine.libaio).size(file_size).read_write(
                    ReadWrite.write).target(
                        f"{mountpoint}/dir{i}/test_file").run())
            sync()
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy

            if new_occupancy != base_occupancy + file_size:
                TestRun.fail(
                    "Occupancy has not increased correctly!\n"
                    f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
                )
Пример #15
0
def test_multistream_seq_cutoff_stress_fs(streams_seq_rand, filesystem, cache_mode):
    """
    title: Stress test for multistream sequential cutoff on the device with a filesystem
    description: |
        Testing the stability of a system when there are multiple sequential and random I/O streams
        running against the exported object with a filesystem when the sequential cutoff policy is
        set to always and the sequential cutoff threshold is set to a value which is able
        to be reached by sequential I/O streams.
    pass_criteria:
        - No system crash
    """
    mount_point = "/mnt"
    with TestRun.step("Prepare devices. Create filesystem on core device."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        core_disk.create_filesystem(filesystem)

    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_disk, cache_mode, force=True)
        Udev.disable()
        core = cache.add_core(core_disk)

    with TestRun.step("Mount core."):
        core.mount(mount_point)

    with TestRun.step(f"Set seq-cutoff policy to always and threshold to 20MiB."):
        core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
        core.set_seq_cutoff_threshold(Size(20, Unit.MebiByte))

    with TestRun.step("Reset core statistics counters."):
        core.reset_counters()

    with TestRun.step("Run I/O"):
        sequential_streams = streams_seq_rand[0]
        random_streams = streams_seq_rand[1]
        stream_size = core_disk.size / 256
        fio = (Fio().create_command()
               .io_engine(IoEngine.libaio)
               .block_size(Size(1, Unit.Blocks4096))
               .direct()
               .offset_increment(stream_size))

        for i in range(0, sequential_streams + random_streams):
            fio_job = fio.add_job(job_name=f"stream_{i}")
            fio_job.size(stream_size)
            fio_job.target(os.path.join(mount_point, f"file_{i}"))
            if i < sequential_streams:
                fio_job.read_write(ReadWrite.write)
            else:
                fio_job.read_write(ReadWrite.randwrite)

        pid = fio.run_in_background()
        while TestRun.executor.check_if_process_exists(pid):
            sleep(5)
            TestRun.LOGGER.info(f"{core.get_statistics()}")
Пример #16
0
def fio_prepare(core, io_mode, io_size=io_size):
    fio = (
        Fio()
        .create_command()
        .io_engine(IoEngine.libaio)
        .size(io_size)
        .read_write(io_mode)
        .target(core.path)
        .direct(1)
    )
    return fio
Пример #17
0
def prepare_base_fio():
    return Fio().create_command() \
        .remove_flag('group_reporting') \
        .read_write(ReadWrite.randwrite) \
        .no_random_map() \
        .direct() \
        .block_size(block_size) \
        .size(job_workset_size) \
        .rand_seed(fio_seed) \
        .set_param("allrandrepeat", 1) \
        .set_flags("refill_buffers")
def fio_prepare():
    fio = (
        Fio()
        .create_command()
        .io_engine(IoEngine.libaio)
        .block_size(Size(4, Unit.KibiByte))
        .size(io_size)
        .read_write(ReadWrite.randwrite)
        .direct(1)
    )
    return fio
def fio_prepare():
    fio = (
        Fio()
        .create_command()
        .io_engine(IoEngine.libaio)
        .read_write(ReadWrite.randwrite)
        .size(io_size)
        .direct()
    )

    return fio
def test_latency_histogram_basic(io_dir):
    """
        title: Test for basic latency histogram properties
        description: |
            Test if samples count reported by fio equals count from iotracer (taking
            into consideration dropped ones).
        pass_criteria:
            - Fio's samples number equals number of iotracer samples + number of
              dropped ones
    """
    read = ReadWrite.read is io_dir
    iotrace = TestRun.plugins['iotrace']

    for disk in TestRun.dut.disks:
        with TestRun.step(f"Start tracing on {disk.system_path}"):
            tracer = iotrace.start_tracing([disk.system_path])
            time.sleep(3)

        with TestRun.step(f"Run {io_dir} IO"):
            fio = (Fio().create_command(
                output_type=FioOutput.jsonplus).io_engine(
                    IoEngine.libaio).size(Size(300, Unit.MebiByte)).block_size(
                        Size(1, Unit.Blocks4096)).read_write(io_dir).target(
                            disk.system_path).direct())
            fio_out = fio.run()

        with TestRun.step("Stop tracing"):
            iotrace.stop_tracing()

        with TestRun.step("Get latency histogram from iotracer"):
            trace_path = iotrace.get_latest_trace_path()
            iot_histograms = iotrace.get_latency_histograms(trace_path)
            iot_histo = iot_histograms.read if read else iot_histograms.write

        with TestRun.step("Build histogram from fio bins"):
            fio_histograms = LatencyHistograms.build_histo_from_fio_job(
                iot_histograms, fio_out[0].job, io_dir)
            fio_histo = fio_histograms.read if read else fio_histograms.write

        with TestRun.step(
                "Check if count of samples reported by iotracer "
                "(including dropped ones) equals count of fio samples "):
            summary = iotrace.get_trace_summary(trace_path)
            dropped_events = int(summary['droppedEvents'])

            iot_samples_count = iot_histo.samples_count() + dropped_events
            fio_samples_count = fio_histo.samples_count()

            if iot_samples_count != fio_samples_count:
                TestRun.fail(
                    f"Wrong samples count reported by iotracer "
                    f"compared to fio's ({fio_samples_count} samples). "
                    f"Iotracer samples count: {iot_histo.samples_count()} "
                    f"Iotracer dropped samples count: {dropped_events}")
Пример #21
0
def fio_prepare():
    fio = (
        Fio()
        .create_command()
        .io_engine(IoEngine.libaio)
        .read_write(ReadWrite.randrw)
        .direct()
        .run_time(timedelta(minutes=4))
        .time_based()
    )
    return fio
def test_cache_insert_error(cache_mode, cache_line_size):
    """
        title: Cache insert test with error device
        description: |
          Validate CAS ability to handle write errors while it tries to insert
          cache lines. For lazy writes cache modes (WO, WB) issue only reads.
        pass_criteria:
          - No I/O errors returned to the user
          - Cache write error statistics are counted properly
          - No cache line gets inserted into cache
    """
    with TestRun.step("Prepare core and cache"):
        cache, core, core_device = prepare_configuration(cache_mode, cache_line_size)

    fio_cmd = (
        Fio()
        .create_command()
        .io_engine(IoEngine.libaio)
        .size(core.size)
        .block_size(cache_line_size)
        .target(core)
        .direct()
    )
    if cache_mode in [CacheMode.WB, CacheMode.WO]:
        fio_cmd = fio_cmd.read_write(ReadWrite.randread)
    else:
        fio_cmd = fio_cmd.read_write(ReadWrite.randrw).verify_pattern().verify(VerifyMethod.pattern)

    with TestRun.step("Run fio and verify no errors present"):
        fio_errors = fio_cmd.run()[0].total_errors()
        if fio_errors != 0:
            TestRun.fail(f"Some I/O ended with errors {fio_errors}")

    with TestRun.step("Check error statistics on cache"):
        stats = cache.get_statistics()

        occupancy = cache.get_occupancy().get_value()
        if occupancy != 0:
            TestRun.fail(f"Occupancy is not zero, but {occupancy}")

        cache_writes = (stats.block_stats.cache.writes / cache_line_size.value).get_value()
        cache_errors = stats.error_stats.cache.total
        if cache_writes != cache_errors:
            TestRun.fail(
                f"Cache errors ({cache_errors}) should equal to number of"
                " requests to cache ({cache_writes})"
            )

    if cache_mode not in [CacheMode.WB, CacheMode.WO]:
        with TestRun.step("Verify core device contents for non-lazy-writes cache modes"):
            cache.stop()

            fio_cmd.target(core_device).verify_only().run()
def dut_prepare(item):
    if not TestRun.plugins['iotrace'].installed:
        TestRun.LOGGER.info("Installing iotrace")
        install_iotrace()
    else:
        TestRun.LOGGER.info("iotrace is already installed by previous test")

    # Call it after installing iotrace because we need iotrace
    # to get valid paths
    dut_cleanup()

    fio = Fio()
    if not fio.is_installed():
        TestRun.LOGGER.info("Installing fio")
        fio.install()

    TestRun.LOGGER.info("Killing all IO")
    kill_all_io()

    TestRun.LOGGER.info("Probing module")
    insert_module()
def test_cache_write_lazy_insert_error(cache_mode, cache_line_size):
    """
        title: Cache insert test with error device for writes on lazy writes cache mode
        description: |
          Validate CAS ability to handle write errors while it tries to insert
          cache lines. This test is exclusively for lazy writes cache modes.
        pass_criteria:
          - I/O errors returned to user
          - Cache automatically stops after encountering errors
          - No cache line gets inserted into cache
    """
    with TestRun.step("Prepare core and cache"):
        cache, core, _ = prepare_configuration(cache_mode, cache_line_size)

    with TestRun.step("Run fio and verify errors are present"):
        fio_errors = (
            Fio()
            .create_command()
            .io_engine(IoEngine.libaio)
            .size(core.size)
            .block_size(cache_line_size)
            .read_write(ReadWrite.randwrite)
            .target(core)
            .continue_on_error(ErrorFilter.io)
            .direct()
            .run()[0]
            .total_errors()
        )
        if fio_errors == 0:
            TestRun.fail(f"No I/O ended with error")

    with TestRun.step("Check error statistics and state on cache"):
        stats = cache.get_statistics()

        occupancy = cache.get_occupancy().get_value()
        if occupancy != 0:
            TestRun.fail(f"Occupancy is not zero, but {occupancy}")

        cache_writes = (stats.block_stats.cache.writes / cache_line_size.value).get_value()
        cache_errors = stats.error_stats.cache.total

        if cache_writes != 1:
            TestRun.fail(f"There only should be one cache write attempt before cache stop")
        if cache_writes != cache_errors:
            TestRun.fail(
                f"Cache errors ({cache_errors}) should equal to number of requests to"
                " cache ({cache_writes})"
            )

        state = cache.get_status()
        if state != CacheStatus.not_running:
            TestRun.fail(f"Cache should be in 'Not running' state, and it's {state}")
Пример #25
0
def test_dirty_load():
    """
        title: Loading cache after dirty shutdown.
        description: Test for loading cache containing dirty data after DUT hard restart.
        pass_criteria:
          - DUT should reboot successfully.
          - Cache should load successfully.
    """
    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]

        core_disk = TestRun.disks['core']
        core_disk.create_partitions([Size(2, Unit.GibiByte)] * 2)
        core_devices = core_disk.partitions

    with TestRun.step("Start cache in Write-Back mode and add cores."):
        cache = casadm.start_cache(cache_dev, cache_mode=CacheMode.WB)
        cores = []
        for dev in core_devices:
            cores.append(cache.add_core(dev))

    with TestRun.step("Set cleaning policy to nop."):
        cache.set_cleaning_policy(CleaningPolicy.nop)

    with TestRun.step("Populate cache with dirty data."):
        fio = Fio().create_command()\
            .size(Size(1, Unit.GibiByte))\
            .read_write(ReadWrite.randwrite)\
            .io_engine(IoEngine.libaio)\
            .block_size(Size(1, Unit.Blocks4096))
        for i, core in enumerate(cores):
            fio.add_job(f"core{i}").target(core.path)
        fio.run()

        if cache.get_dirty_blocks() <= Size.zero():
            TestRun.fail("Cache does not contain dirty data.")

    with TestRun.step("Remove one core without flushing dirty data."):
        casadm.remove_core_with_script_command(cache.cache_id, core.core_id,
                                               True)

    with TestRun.step("Reset platform."):
        power_control = TestRun.plugin_manager.get_plugin('power_control')
        power_control.power_cycle()

    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)

        caches_num = len(casadm_parser.get_caches())
        if caches_num != 1:
            TestRun.LOGGER.error(
                f"Wrong number of caches. Expected 1, actual {caches_num}.")

        cores_num = len(casadm_parser.get_cores(cache.cache_id))
        if cores_num != 1:
            TestRun.LOGGER.error(
                f"Wrong number of cores. Expected 1, actual {cores_num}.")
def fio_prepare(core):
    fio = (
        Fio()
        .create_command()
        .io_engine(IoEngine.libaio)
        .read_write(ReadWrite.randrw)
        .target(core.system_path)
        .continue_on_error(ErrorFilter.io)
        .direct(1)
        .run_time(timedelta(seconds=30))
        .time_based()
    )
    return fio
Пример #27
0
def run_fio_count(core, blocksize, num_ios):
    (
        Fio()
        .create_command()
        .target(core)
        .io_engine(IoEngine.libaio)
        .read_write(ReadWrite.randread)
        .block_size(blocksize)
        .direct()
        .file_size(Size(10, Unit.GibiByte))
        .num_ios(num_ios)
        .run()
    )
Пример #28
0
def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
    """
    title: Stress test for multistream sequential cutoff on raw device
    description: |
        Testing the stability of a system when there are multiple sequential and random I/O streams
        running against the raw exported object with the sequential cutoff policy set to always and
        the sequential cutoff threshold set to a value which is able to be reached by
        sequential I/O streams.
    pass_criteria:
        - No system crash
    """
    with TestRun.step("Start cache and add core device."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']
        cache_disk.create_partitions([Size(1.5, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
        Udev.disable()
        core = cache.add_core(core_disk)

    with TestRun.step(f"Set seq-cutoff policy to always and threshold to 512KiB."):
        core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
        core.set_seq_cutoff_threshold(Size(512, Unit.KibiByte))

    with TestRun.step("Reset core statistics counters."):
        core.reset_counters()

    with TestRun.step("Run I/O"):
        stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
        sequential_streams = streams_seq_rand[0]
        random_streams = streams_seq_rand[1]
        fio = (Fio().create_command()
               .io_engine(IoEngine.libaio)
               .block_size(Size(1, Unit.Blocks4096))
               .direct()
               .offset_increment(stream_size))

        for i in range(0, sequential_streams + random_streams):
            fio_job = fio.add_job(job_name=f"stream_{i}")
            fio_job.size(stream_size)
            fio_job.target(core.path)
            if i < sequential_streams:
                fio_job.read_write(ReadWrite.write)
            else:
                fio_job.read_write(ReadWrite.randwrite)

        pid = fio.run_in_background()
        while TestRun.executor.check_if_process_exists(pid):
            sleep(5)
            TestRun.LOGGER.info(f"{core.get_statistics()}")
Пример #29
0
def get_fio_trim(core):
    fio = (
        Fio()
        .create_command()
        .target(core)
        .read_write(ReadWrite.trim)
        .io_engine(IoEngine.libaio)
        .block_size(Size(16, Unit.Blocks4096))
        .run_time(runtime)
        .time_based(runtime)
        .io_depth(1)
        .num_jobs(1)
        .direct(1)
    )
    return fio
Пример #30
0
def run_fio(target,
            operation_type: ReadWrite,
            blocksize,
            io_size,
            verify=False,
            pattern=None,
            skip: Size = None):
    fio_operation_type = operation_type.name
    if skip:
        fio_operation_type += f":{int(skip.get_value(Unit.KibiByte))}k"
    fio = (Fio().create_command().target(target).io_engine(
        IoEngine.sync).block_size(blocksize).direct().file_size(
            io_size).set_param("readwrite", fio_operation_type))
    if verify:
        fio.verification_with_pattern(pattern)
    fio.run()