Ejemplo n.º 1
0
def test_seq_cutoff_set_invalid_threshold(threshold):
    """
    title: Invalid sequential cut-off threshold test
    description: Test if CAS is allowing setting invalid sequential cut-off threshold
    pass_criteria:
      - Setting invalid sequential cut-off threshold should be blocked
    """
    with TestRun.step("Test prepare (start cache and add core)"):
        cache, cores = prepare()
        _threshold = Size(threshold, Unit.KibiByte)

    with TestRun.step(
            f"Setting cache sequential cut off threshold to out of range value: "
            f"{_threshold}"):
        command = set_param_cutoff_cmd(cache_id=str(cache.cache_id),
                                       core_id=str(cores[0].core_id),
                                       threshold=str(
                                           int(_threshold.get_value())))
        output = TestRun.executor.run_expect_fail(command)
        if "Invalid sequential cutoff threshold, must be in the range 1-4194181"\
                not in output.stderr:
            TestRun.fail("Command succeeded (should fail)!")

    with TestRun.step(f"Setting cache sequential cut off threshold "
                      f"to value passed as a float"):
        command = set_param_cutoff_cmd(cache_id=str(cache.cache_id),
                                       core_id=str(cores[0].core_id),
                                       threshold=str(_threshold.get_value()))
        output = TestRun.executor.run_expect_fail(command)
        if "Invalid sequential cutoff threshold, must be a correct unsigned decimal integer"\
                not in output.stderr:
            TestRun.fail("Command succeeded (should fail)!")
Ejemplo n.º 2
0
def check_io_stats(cache_disk, cache, io_stats_before, io_size, blocksize,
                   skip_size):
    io_stats_after = cache_disk.get_io_stats()
    logical_block_size = int(
        TestRun.executor.run(
            f"cat /sys/block/{cache_disk.device_name}/queue/logical_block_size"
        ).stdout)
    diff = io_stats_after.sectors_written - io_stats_before.sectors_written
    written_sector_size = Size(logical_block_size) * diff
    TestRun.LOGGER.info(
        f"Sectors written: "
        f"{io_stats_after.sectors_written - io_stats_before.sectors_written} "
        f"({written_sector_size.get_value(Unit.MebiByte)}MiB)")

    expected_writes = io_size * (blocksize / (blocksize + skip_size))

    cache_mode_traits = CacheMode.get_traits(cache.get_cache_mode())
    if CacheModeTrait.InsertWrite | CacheModeTrait.LazyWrites in cache_mode_traits:
        # Metadata size is 4KiB per each cache line
        metadata_size = (io_size / cache.get_cache_line_size().value) * Size(
            4, Unit.KibiByte)
        expected_writes += metadata_size

    if not validate_value(expected_writes.get_value(),
                          written_sector_size.get_value()):
        TestRun.LOGGER.error(
            f"IO stat writes to cache "
            f"({written_sector_size.get_value(Unit.MebiByte)}MiB) "
            f"inconsistent with expected value "
            f"({expected_writes.get_value(Unit.MebiByte)}MiB)")
Ejemplo n.º 3
0
    def error_table(cls, offset: int, size: Size):
        table = cls()

        table.add_entry(
            DmTable.TableEntry(offset, size.get_value(Unit.Blocks512), DmTarget.ERROR)
        )

        return table
Ejemplo n.º 4
0
    def get_lba_histogram(trace_path: str,
                          bucket_size: Size = Size(0, Unit.Byte),
                          subrange_start: int = 0,
                          subrange_end: int = 0,
                          shortcut: bool = False) -> list:
        """
        Get lba histogram of given trace path

        :param trace_path: trace path
        :param bucket_size: bucket size
        :param subrange_start: subrange start
        :param subrange_end: subrange end
        :param shortcut: Use shorter command
        :type trace_path: str
        :type bucket_size: Size
        :type subrange_start: int
        :type subrange_end: int
        :type shortcut: bool
        :return: LBA histogram
        :raises Exception: if iotrace command or histogram is invalid
        """
        bucket_size_range = range(1, 4294967296)
        subrange_range = range(1, 9223372036854775808)
        if subrange_start and subrange_end:
            if subrange_start > subrange_end:
                subrange_start, subrange_end = subrange_end, subrange_start

        command = 'iotrace' + (' -P' if shortcut else ' --trace-parser')
        command += ' -B' if shortcut else ' --lba-histogram'
        command += (' -p ' if shortcut else ' --path ') + f'{trace_path}'

        if bucket_size is not None:
            if int(bucket_size.get_value(Unit.Byte)) not in bucket_size_range:
                raise CmdException(
                    f"Given size is out of range {bucket_size_range}.")
            command += ' -b ' if shortcut else ' --bucket-size '
            command += f'{int(bucket_size.get_value(Unit.Byte))}'

        if subrange_start is not None:
            if subrange_start not in subrange_range:
                raise CmdException(
                    f"Given start position is out of range {subrange_range}.")
            command += ' -s ' if shortcut else ' --subrange-start '
            command += f'{subrange_start}'

        if subrange_end is not None:
            if subrange_end not in subrange_range:
                raise CmdException(
                    f"Given end position is out of range {subrange_range}.")
            command += ' -e ' if shortcut else ' --subrange-end '
            command += f'{subrange_end}'
        command += (' -f ' if shortcut else ' --format ') + 'json'

        output = TestRun.executor.run(command)
        if output.stdout == "":
            raise CmdException("Invalid histogram", output)

        return parse_json(output.stdout)
def test_ioclass_conditions_and(filesystem):
    """
        title: IO class condition 'and'.
        description: |
          Load config with IO class combining 5 conditions contradicting
          at least one other condition.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one of the conditions is not classified.
    """

    file_size = Size(random.randint(25, 50), Unit.MebiByte)
    file_size_bytes = int(file_size.get_value(Unit.Byte))

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
            f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
            f"file_size:eq:{file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_occupancy = cache.get_io_class_statistics(
        io_class_id=1).usage_stats.occupancy
    # Perform IO
    for size in [
            file_size, file_size + Size(1, Unit.MebiByte),
            file_size - Size(1, Unit.MebiByte)
    ]:
        (Fio().create_command().io_engine(
            IoEngine.libaio).size(size).read_write(
                ReadWrite.write).target(f"{mountpoint}/test_file").run())
        sync()
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=1).usage_stats.occupancy

        if new_occupancy != base_occupancy:
            TestRun.fail(
                "Unexpected occupancy increase!\n"
                f"Expected: {base_occupancy}, actual: {new_occupancy}")
Ejemplo n.º 6
0
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
                     policy: SeqCutOffPolicy = None):
    _threshold = None if threshold is None else int(threshold.get_value(Unit.KibiByte))
    command = set_param_cutoff_cmd(
        cache_id=cache_id, core_id=core_id,
        threshold=_threshold, policy=policy)
    output = TestRun.executor.run(command)
    if output.exit_code != 0:
        raise CmdException("Error while setting sequential cut-off params.", output)
    return output
    def uniform_error_table(cls, start_lba: int, stop_lba: int,
                            num_error_zones: int, error_zone_size: Size):
        table = cls()
        increment = (stop_lba - start_lba) // num_error_zones

        for zone_start in range(start_lba, stop_lba, increment):
            table.add_entry(
                DmTable.TableEntry(
                    zone_start,
                    error_zone_size.get_value(Unit.Blocks512),
                    DmTarget.ERROR,
                ))

        return table
Ejemplo n.º 8
0
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
                     policy: SeqCutOffPolicy = None, promotion_count: int = None):
    _core_id = None if core_id is None else str(core_id)
    _threshold = None if threshold is None else str(int(threshold.get_value(Unit.KibiByte)))
    _policy = None if policy is None else policy.name
    _promotion_count = None if promotion_count is None else str(promotion_count)
    command = set_param_cutoff_cmd(
        cache_id=str(cache_id),
        core_id=_core_id,
        threshold=_threshold,
        policy=_policy,
        promotion_count=_promotion_count
    )
    output = TestRun.executor.run(command)
    if output.exit_code != 0:
        raise CmdException("Error while setting sequential cut-off params.", output)
    return output
Ejemplo n.º 9
0
def set_param_cutoff(cache_id: int, core_id: int = None, threshold: Size = None,
                     policy: SeqCutOffPolicy = None):
    _threshold = None if threshold is None else threshold.get_value(Unit.KibiByte)
    if core_id is None:
        command = set_param_cutoff_cmd(
            cache_id=str(cache_id), threshold=_threshold,
            policy=policy.name)
    else:
        command = set_param_cutoff_cmd(
            cache_id=str(cache_id), core_id=str(core_id),
            threshold=_threshold, policy=policy.name)
    output = TestRun.executor.run(command)
    if output.exit_code != 0:
        raise Exception(
            f"Error while setting sequential cut-off params."
            f" stdout: {output.stdout} \n stderr :{output.stderr}")
    return output
def test_ioclass_effective_ioclass(filesystem):
    """
        title: Effective IO class with multiple non-exclusive conditions
        description: |
            Test CAS ability to properly classify IO fulfilling multiple conditions based on
            IO class ids and presence of '&done' annotation in IO class rules
        pass_criteria:
         - In every iteration first IO is classified to the last in order IO class
         - In every iteration second IO is classified to the IO class with '&done' annotation
    """
    with TestRun.LOGGER.step(f"Test prepare"):
        cache, core = prepare()
        Udev.disable()
        file_size = Size(10, Unit.Blocks4096)
        file_size_bytes = int(file_size.get_value(Unit.Byte))
        test_dir = f"{mountpoint}/test"
        rules = ["direct",  # rule contradicting other rules
                 f"directory:{test_dir}",
                 f"file_size:le:{2 * file_size_bytes}",
                 f"file_size:ge:{file_size_bytes // 2}"]

    with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
                             f"and mounting {core.path} at {mountpoint}"):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        fs_utils.create_directory(test_dir)
        sync()

    for i, permutation in TestRun.iteration(enumerate(permutations(range(1, 5)), start=1)):
        with TestRun.LOGGER.step("Load IO classes in order specified by permutation"):
            load_io_classes_in_permutation_order(rules, permutation, cache)
            io_class_id = 3 if rules[permutation.index(4)] == "direct" else 4

        with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"):
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            fio = (Fio().create_command()
                   .io_engine(IoEngine.libaio)
                   .size(file_size)
                   .read_write(ReadWrite.write)
                   .target(f"{test_dir}/test_file{i}"))
            fio.run()
            sync()

        with TestRun.LOGGER.step("Check if IO was properly classified "
                                 "(to the last non-contradicting IO class)"):
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            if new_occupancy != base_occupancy + file_size:
                TestRun.LOGGER.error("Wrong IO classification!\n"
                                     f"Expected: {base_occupancy + file_size}, "
                                     f"actual: {new_occupancy}")

        with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"):
            io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache)

        with TestRun.LOGGER.step("Repeat IO"):
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            fio.run()
            sync()

        with TestRun.LOGGER.step("Check if IO was properly classified "
                                 "(to the IO class with '&done' annotation)"):
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=io_class_id).usage_stats.occupancy
            if new_occupancy != base_occupancy + file_size:
                TestRun.LOGGER.error("Wrong IO classification!\n"
                                     f"Expected: {base_occupancy + file_size}, "
                                     f"actual: {new_occupancy}")
def test_ioclass_id_as_condition(filesystem):
    """
        title: IO class as a condition.
        description: |
          Load config in which IO class ids are used as conditions in other IO class definitions.
        pass_criteria:
          - No kernel bug.
          - IO is classified properly as described in IO class config.
    """

    base_dir_path = f"{mountpoint}/base_dir"
    ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
    ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directory condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{base_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        # file size condition
        ioclass_config.add_ioclass(
            ioclass_id=2,
            eviction_priority=1,
            allocation=True,
            rule=f"file_size:eq:{ioclass_file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        # direct condition
        ioclass_config.add_ioclass(
            ioclass_id=3,
            eviction_priority=1,
            allocation=True,
            rule="direct",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 1 OR 2 condition
        ioclass_config.add_ioclass(
            ioclass_id=4,
            eviction_priority=1,
            allocation=True,
            rule="io_class:1|io_class:2",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 4 AND file size condition (same as IO class 2)
        ioclass_config.add_ioclass(
            ioclass_id=5,
            eviction_priority=1,
            allocation=True,
            rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
            ioclass_config_path=ioclass_config_path,
        )
        # IO class 3 condition
        ioclass_config.add_ioclass(
            ioclass_id=6,
            eviction_priority=1,
            allocation=True,
            rule="io_class:3",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        fs_utils.create_directory(base_dir_path)
        sync()

    with TestRun.step("Run IO fulfilling IO class 1 condition (and not IO class 2) and check if "
                      "it is classified properly."):
        # Should be classified as IO class 4
        base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
        non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(non_ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_1")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy

        if new_occupancy != base_occupancy + non_ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + non_ioclass_file_size}, "
                         f"actual: {new_occupancy}")

    with TestRun.step("Run IO fulfilling IO class 2 condition (and not IO class 1) and check if "
                      "it is classified properly."):
        # Should be classified as IO class 5
        base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{mountpoint}/test_file_2")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")

    with TestRun.step("Run IO fulfilling IO class 1 and 2 conditions and check if "
                      "it is classified properly."):
        # Should be classified as IO class 5
        base_occupancy = new_occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_3")
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")

    with TestRun.step("Run direct IO fulfilling IO class 1 and 2 conditions and check if "
                      "it is classified properly."):
        # Should be classified as IO class 6
        base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
        (Fio().create_command()
         .io_engine(IoEngine.libaio)
         .size(ioclass_file_size)
         .read_write(ReadWrite.write)
         .target(f"{base_dir_path}/test_file_3")
         .direct()
         .run())
        sync()
        new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy

        if new_occupancy != base_occupancy + ioclass_file_size:
            TestRun.fail("Writes were not properly cached!\n"
                         f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
Ejemplo n.º 12
0
def test_multistream_seq_cutoff_functional(threshold, streams_number):
    """
    title: Functional test for multistream sequential cutoff
    description: |
        Testing if amount of data written to cache and core is correct after running sequential
        writes from multiple streams with different sequential cut-off thresholds.
    pass_criteria:
        - Amount of data written to cache is equal to amount set with sequential cutoff threshold
        - Amount of data written in pass-through is equal to io size run after reaching the
        sequential cutoff threshold
    """
    with TestRun.step("Start cache and add core device."):
        cache_disk = TestRun.disks['cache']
        core_disk = TestRun.disks['core']

        cache = casadm.start_cache(cache_disk, CacheMode.WB, force=True)
        Udev.disable()
        core = cache.add_core(core_disk)

    with TestRun.step(
            f"Set seq-cutoff policy to always, threshold to {threshold} "
            f"and reset statistics counters."):
        core.set_seq_cutoff_policy(SeqCutOffPolicy.always)
        core.set_seq_cutoff_threshold(threshold)
        core.reset_counters()

    with TestRun.step(
            f"Run {streams_number} I/O streams with amount of sequential writes equal to "
            f"seq-cutoff threshold value minus one 4k block."):
        kib_between_streams = 100
        range_step = int(threshold.get_value(
            Unit.KibiByte)) + kib_between_streams
        max_range_offset = streams_number * range_step

        offsets = [o for o in range(0, max_range_offset, range_step)]
        core_statistics_before = core.get_statistics()

        for i in TestRun.iteration(range(0, len(offsets))):
            TestRun.LOGGER.info(
                f"Statistics before I/O:\n{core_statistics_before}")

            offset = Size(offsets[i], Unit.KibiByte)
            run_dd(core.system_path,
                   count=int(threshold.get_value(Unit.Blocks4096) - 1),
                   seek=int(offset.get_value(Unit.Blocks4096)))

            core_statistics_after = core.get_statistics()
            check_statistics(core_statistics_before,
                             core_statistics_after,
                             expected_pt=0,
                             expected_writes_to_cache=threshold -
                             Size(1, Unit.Blocks4096))
            core_statistics_before = core_statistics_after

    with TestRun.step(
            "Write random number of 4k block requests to each stream and check if all "
            "writes were sent in pass-through mode."):
        core_statistics_before = core.get_statistics()
        random.shuffle(offsets)

        for i in TestRun.iteration(range(0, len(offsets))):
            TestRun.LOGGER.info(
                f"Statistics before second I/O:\n{core_statistics_before}")
            additional_4k_blocks_writes = random.randint(
                1, kib_between_streams / 4)
            offset = Size(offsets[i], Unit.KibiByte)
            run_dd(core.system_path,
                   count=additional_4k_blocks_writes,
                   seek=int(
                       offset.get_value(Unit.Blocks4096) +
                       threshold.get_value(Unit.Blocks4096) - 1))

            core_statistics_after = core.get_statistics()
            check_statistics(core_statistics_before,
                             core_statistics_after,
                             expected_pt=additional_4k_blocks_writes,
                             expected_writes_to_cache=Size.zero())
            core_statistics_before = core_statistics_after
Ejemplo n.º 13
0
def test_block_stats_write(cache_mode, zero_stats):
    """Perform read and write operations to cache instance in different cache modes
        and check if block stats values are correct"""
    cache, cores = prepare(cache_mode)
    iterations = 10
    dd_size = Size(4, Unit.KibiByte)
    dd_count = 10

    flush(cache)

    # Check stats for cache after performing write operation
    for core in cores:
        dd_seek = 0
        dd = (Dd().input("/dev/zero").output(f"{core.path}").count(
            dd_count).block_size(dd_size).oflag("direct"))
        # Since every IO has the same size, every stat should be increased with the same step.
        # So there is no need to keep value of every stat in separate variable
        cache_stat = ((dd_size.get_value(Unit.Blocks4096) * dd_count) *
                      (core.core_id - 1) * iterations)
        for i in range(iterations):
            dd.seek(dd_seek)
            dd.run()
            cache_stats = cache.get_statistics_flat(
                stat_filter=[StatsFilter.blk])
            core_stats = core.get_statistics_flat(
                stat_filter=[StatsFilter.blk])

            # Check cache stats
            assumed_value = (dd_size.get_value(Unit.Blocks4096) *
                             dd_count) * (i + 1)
            for key, value in cache_stats.items():
                if key in zero_stats:
                    assert value.get_value(Unit.Blocks4096) == 0, (
                        f"{key} has invalid value\n"
                        f"core id {core.core_id}, i: {i}, dd_size: "
                        f"{dd_size.get_value(Unit.Blocks4096)}\n"
                        f"dd count: {dd_count}, cache_stat {cache_stat}")
                else:
                    # For each next tested core, cache stats has to include
                    # sum of each previous core
                    assert cache_stat + assumed_value == value.get_value(
                        Unit.Blocks4096
                    ), (f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
                        f"core id {core.core_id}, i: {i}, dd_size: "
                        f"{dd_size.get_value(Unit.Blocks4096)}\n"
                        f"dd count: {dd_count}, cache_stat {cache_stat}")

            # Check single core stats
            for key, value in core_stats.items():
                if key in zero_stats:
                    assert value.get_value(Unit.Blocks4096) == 0, (
                        f"{key} has invalid value of \n"
                        f"core id {core.core_id}, i: {i}, dd_size: "
                        f"{dd_size.get_value(Unit.Blocks4096)}\n"
                        f"dd count: {dd_count}, cache_stat {cache_stat}")
                else:
                    assert assumed_value == value.get_value(Unit.Blocks4096), (
                        f"{key} has invalid value of {value.get_value(Unit.Blocks4096)}\n"
                        f"core id {core.core_id}, i: {i}, dd_size: "
                        f"{dd_size.get_value(Unit.Blocks4096)}\n"
                        f"dd count: {dd_count}, dd seek: {dd_seek}. Cache mode {cache_mode}"
                    )
        dd_seek += dd_count
Ejemplo n.º 14
0
    def start_tracing(self,
                      bdevs: list = [],
                      buffer: Size = None,
                      trace_file_size: Size = None,
                      timeout: timedelta = None,
                      label: str = None,
                      shortcut: bool = False):
        """
        Start tracing given block devices. Trace all available if none given.

        :param bdevs: Block devices to trace, can be empty
        (for all available)
        :param buffer: Size of the internal trace buffer in MiB
        :param trace_file_size: Max size of trace file in MiB
        :param timeout: Max trace duration time in seconds
        :param label: User defined custom label
        :param shortcut: Use shorter command
        :type bdevs: list of strings
        :type buffer: Size
        :type trace_file_size: Size
        :type timeout: timedelta
        :type label: str
        :type shortcut: bool
        """

        if len(bdevs) == 0:
            disks = TestRun.dut.disks
            for disk in disks:
                bdevs.append(disk.system_path)

        buffer_range = range(1, 1025)
        trace_file_size_range = range(1, 100000001)
        timeout_range = range(1, 4294967296)

        command = 'iotrace' + (' -S' if shortcut else ' --start-tracing')
        command += (' -d ' if shortcut else ' --devices ') + ','.join(bdevs)

        if buffer is not None:
            if not int(buffer.get_value(Unit.MebiByte)) in buffer_range:
                raise CmdException(f"Given buffer is out of range {buffer_range}.")
            command += ' -b ' if shortcut else ' --buffer '
            command += f'{int(buffer.get_value(Unit.MebiByte))}'

        if trace_file_size is not None:
            if not int(trace_file_size.get_value(Unit.MebiByte)) in trace_file_size_range:
                raise CmdException(f"Given size is out of range {trace_file_size_range}.")
            command += ' -s ' if shortcut else ' --size '
            command += f'{int(trace_file_size.get_value(Unit.MebiByte))}'

        if timeout is not None:
            if not int(timeout.total_seconds()) in timeout_range:
                raise CmdException(f"Given time is out of range {timeout_range}.")
            command += ' -t ' if shortcut else ' --time '
            command += f'{int(timeout.total_seconds())}'

        if label is not None:
            command += ' -l ' if shortcut else ' --label ' + f'{label}'

        self.pid = str(TestRun.executor.run_in_background(command))
        TestRun.LOGGER.info("Started tracing of: " + ','.join(bdevs))
        # Make sure there's a >0 duration in all tests
        time.sleep(2)
def test_io_events():
    TestRun.LOGGER.info("Testing io events during tracing")
    iotrace = TestRun.plugins['iotrace']
    for disk in TestRun.dut.disks:
        with TestRun.step("Start tracing"):
            iotrace.start_tracing([disk.system_path])
            time.sleep(5)
        with TestRun.step("Send write command"):
            write_length = Size(17, disk.block_size)
            write_offset = 2 * write_length.get_value()
            dd = (Dd().input("/dev/urandom").output(disk.system_path).count(
                1).block_size(write_length).oflag('direct,sync').seek(
                    int(write_offset / write_length.get_value())))
            dd.run()
        with TestRun.step("Send read command"):
            read_length = Size(19, disk.block_size)
            read_offset = 2 * read_length.get_value()
            dd = (Dd().input(disk.system_path).output("/dev/null").count(
                1).block_size(read_length).iflag('direct,sync').skip(
                    int(read_offset / read_length.get_value())))
            dd.run()
        with TestRun.step("Send discard command"):
            discard_length = Size(21, disk.block_size).get_value()
            discard_offset = int(2 * discard_length)
            TestRun.executor.run_expect_success(
                f"blkdiscard -o {discard_offset}"
                f" -l {int(discard_length)} {disk.system_path}")
        with TestRun.step("Stop tracing"):
            iotrace.stop_tracing()
        with TestRun.step("Verify trace correctness"):
            trace_path = IotracePlugin.get_latest_trace_path()
            events_parsed = IotracePlugin.get_trace_events(trace_path)
            result = any(
                'io' in event and 'operation' in event['io']
                and event['io']['operation'] == 'Write'
                # LBA 0 events don't have a lba field, so skip them
                and 'lba' in event['io'] and int(event['io']['lba']) == int(
                    write_offset / iotrace_lba_len) and int(event['io']['len'])
                == int(write_length.get_value() / iotrace_lba_len) and
                f"/dev/{event['device']['name']}" == disk.system_path
                for event in events_parsed)
            if not result:
                TestRun.fail("Could not find write event")
            result = any(
                'io' in event and 'operation' in event['io']
                and event['io']['operation'] == 'Read'
                # LBA 0 events don't have a lba field, so skip them
                and 'lba' in event['io'] and int(event['io']['lba']) == int(
                    read_offset / iotrace_lba_len) and int(event['io']['len'])
                == int(read_length.get_value() / iotrace_lba_len) and
                f"/dev/{event['device']['name']}" == disk.system_path
                for event in events_parsed)
            if not result:
                TestRun.fail("Could not find read event")
            result = any('io' in event and 'operation' in event['io']
                         and event['io']['operation'] == 'Discard'
                         # LBA 0 events don't have a lba field, so skip them
                         and 'lba' in event['io'] and int(event['io']['lba'])
                         == int(discard_offset / iotrace_lba_len) and
                         int(event['io']['len']) == int(discard_length /
                                                        iotrace_lba_len) and
                         f"/dev/{event['device']['name']}" == disk.system_path
                         for event in events_parsed)
            if not result:
                TestRun.fail("Could not find discard event")
Ejemplo n.º 16
0
def is_size_lower_or_equal(size_a: str, size_b: Size):
    """Returns true if size_a is lower or equal than size_b"""
    return int(size_a) <= int(size_b.get_value(Unit.MebiByte))
Ejemplo n.º 17
0
 def offset(self, value: Size):
     return self.set_param('offset', int(value.get_value()))
Ejemplo n.º 18
0
 def block_size(self, value: size.Size):
     return self.set_param('bs', int(value.get_value()))
Ejemplo n.º 19
0
 def set_max_io_size(self, new_max_io_size: Size):
     self.set_sysfs_property("max_sectors_kb",
                             int(new_max_io_size.get_value(Unit.KibiByte)))
Ejemplo n.º 20
0
 def size(self, value: Size):
     return self.set_param('size', int(value.get_value()))
Ejemplo n.º 21
0
def is_size_almost_equal(size_a: Size, size_b: str):
    """Returns true if both sizes are equal +/- 10%"""
    return isclose(int(size_a.get_value(Unit.MebiByte)), int(size_b), rel_tol=0.1)