예제 #1
0
def fill_cache(target):
    fio_run_fill = Fio().create_command()
    fio_run_fill.io_engine(IoEngine.libaio)
    fio_run_fill.direct()
    fio_run_fill.read_write(ReadWrite.write)
    fio_run_fill.io_depth(16)
    fio_run_fill.block_size(Size(1, Unit.MebiByte))
    fio_run_fill.target(target)
    fio_run_fill.run()
예제 #2
0
def prepare_core_device(settings):
    if settings["device"] == "SCSI-debug module":
        core_device = create_scsi_debug_device(settings["logical_block_size"],
                                               4, settings["dev_size_mb"])
    else:
        core_device = TestRun.disks['core']
    core_device.set_max_io_size(Size(settings["max_sectors_kb"],
                                     Unit.KibiByte))
    return core_device
def prepare(cores_count=1):
    cache_device = TestRun.disks['cache']
    core_device = TestRun.disks['core']
    cache_device.create_partitions([Size(500, Unit.MebiByte)])
    partitions = []
    for x in range(cores_count):
        partitions.append(Size(1, Unit.GibiByte))

    core_device.create_partitions(partitions)
    cache_part = cache_device.partitions[0]
    core_parts = core_device.partitions
    TestRun.LOGGER.info("Staring cache")
    cache = casadm.start_cache(cache_part, force=True)
    TestRun.LOGGER.info("Adding core devices")
    core_list = []
    for core_part in core_parts:
        core_list.append(cache.add_core(core_dev=core_part))
    return cache, core_list
예제 #4
0
def test_cli_add_remove_custom_id(shortcut):
    """
        title: Test for adding and removing a core with a custom ID - short and long command
        description: |
          Start a new cache and add a core to it with passing a random core ID
          (from allowed pool) as an argument and then remove this core from the cache.
        pass_criteria:
          - The core is added to the cache with a default ID
          - The core is successfully removed from the cache
    """
    with TestRun.step("Prepare the devices."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(50, Unit.MebiByte)])
        cache_device = cache_disk.partitions[0]
        core_device = TestRun.disks['core']

    with TestRun.step("Start the cache and add the core with a random ID."):
        core_id = randint(*CORE_ID_RANGE)
        cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True)
        core = casadm.add_core(cache,
                               core_device,
                               core_id=core_id,
                               shortcut=shortcut)
        TestRun.LOGGER.info(f"Core ID: {core_id}")

    with TestRun.step("Check if the core is added to the cache."):
        caches = casadm_parser.get_caches()
        if len(caches[0].get_core_devices()) != 1:
            TestRun.fail("One core should be present in the cache.")
        if caches[0].get_core_devices()[0].path != core.path:
            TestRun.fail(
                "The core path should be equal to the path of the core added.")

    with TestRun.step("Remove the core from the cache."):
        casadm.remove_core(cache.cache_id, core.core_id, shortcut=shortcut)

    with TestRun.step(
            "Check if the core is successfully removed from still running cache."
    ):
        caches = casadm_parser.get_caches()
        if len(caches) != 1:
            TestRun.fail(
                "One cache should be still present after removing the core.")
        if len(caches[0].get_core_devices()) != 0:
            TestRun.fail(
                "No core device should be present after removing the core.")

    with TestRun.step("Stop the cache."):
        casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut)

    with TestRun.step("Check if the cache has successfully stopped."):
        caches = casadm_parser.get_caches()
        if len(caches) != 0:
            TestRun.fail(
                "No cache should be present after stopping the cache.")
        output = casadm.list_caches(shortcut=shortcut)
        cli_messages.check_stdout_msg(output, cli_messages.no_caches_running)
예제 #5
0
def test_stress_dirty_shutdown(cache_line_size, cache_mode, cleaning_policy):
    """
        title: Stress test for dirty shutdowns during IO workload.
        description: |
          Validate the ability of CAS to start cache instances upon system boot after
          dirty shutdown during IO workloads.
        pass_criteria:
          - No system crash.
          - CAS loads correctly after DUT hard reset.
    """
    with TestRun.step("Prepare devices."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(5, Unit.GibiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_sizes = [Size(5, Unit.GibiByte)] * cores_number
        core_dev.create_partitions(core_sizes)

    with TestRun.step("Start cache according to configuration and add core devices."):
        cache = casadm.start_cache(cache_part, cache_mode, cache_line_size, force=True)
        if cleaning_policy is not None:
            cache.set_cleaning_policy(cleaning_policy)
        exported_objects = []
        for i in range(0, cores_number):
            exported_objects.append(cache.add_core(core_dev.partitions[i]))

    with TestRun.step("Create CAS init configuration file based on running configuration."):
        InitConfig.create_init_config_from_running_configuration()

    for _ in TestRun.iteration(range(0, iterations_per_config),
                               "Load cache after reboot while heavy IO."):
        with TestRun.step("Start heavy IO workload on both CAS devices."):
            run_io(exported_objects)
            time.sleep(120)

        with TestRun.step("Reset platform."):
            power_control = TestRun.plugin_manager.get_plugin('power_control')
            power_control.power_cycle()

        with TestRun.step("Check configuration after load."):
            check_configuration(cleaning_policy, cache_mode, cache_line_size)

    with TestRun.step("Stop cache."):
        cache.stop()
def prepare():
    base_prepare()
    ioclass_config.remove_ioclass_config()
    cache_device = next(
        filter(lambda disk: disk.disk_type in [DiskType.optane, DiskType.nand],
               TestRun.dut.disks))
    core_device = next(
        filter(
            lambda disk: disk.disk_type.value > cache_device.disk_type.value,
            TestRun.dut.disks))

    cache_device.create_partitions([Size(500, Unit.MebiByte)])
    core_device.create_partitions([Size(1, Unit.GibiByte)])

    cache_device = cache_device.partitions[0]
    core_device = core_device.partitions[0]

    TestRun.LOGGER.info(f"Starting cache")
    cache = casadm.start_cache(cache_device,
                               cache_mode=CacheMode.WB,
                               force=True)
    TestRun.LOGGER.info(f"Setting cleaning policy to NOP")
    casadm.set_param_cleaning(cache_id=cache.cache_id,
                              policy=CleaningPolicy.nop)
    TestRun.LOGGER.info(f"Adding core device")
    core = casadm.add_core(cache, core_dev=core_device)

    ioclass_config.create_ioclass_config(
        add_default_rule=False, ioclass_config_path=ioclass_config_path)
    # To make test more precise all workload except of tested ioclass should be
    # put in pass-through mode
    ioclass_config.add_ioclass(
        ioclass_id=0,
        eviction_priority=22,
        allocation=False,
        rule="unclassified",
        ioclass_config_path=ioclass_config_path,
    )

    output = TestRun.executor.run(f"mkdir -p {mountpoint}")
    if output.exit_code != 0:
        raise Exception(f"Failed to create mountpoint")

    return cache, core
def test_ioclass_conditions_or(filesystem):
    """
        title: IO class condition 'or'.
        description: |
          Load config with IO class combining 5 contradicting conditions connected by OR operator.
        pass_criteria:
          - No kernel bug.
          - Every IO fulfilling one condition is classified properly.
    """

    with TestRun.step("Prepare cache and core. Disable udev."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step("Create and load IO class config file."):
        # directories OR condition
        ioclass_config.add_ioclass(
            ioclass_id=1,
            eviction_priority=1,
            allocation=True,
            rule=
            f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
            f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step(f"Prepare {filesystem.name} filesystem "
                      f"and mount {core.system_path} at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        for i in range(1, 6):
            fs_utils.create_directory(f"{mountpoint}/dir{i}")
        sync()

    with TestRun.step(
            "Perform IO fulfilling each condition and check if occupancy raises."
    ):
        for i in range(1, 6):
            file_size = Size(random.randint(25, 50), Unit.MebiByte)
            base_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy
            (Fio().create_command().io_engine(
                IoEngine.libaio).size(file_size).read_write(
                    ReadWrite.write).target(
                        f"{mountpoint}/dir{i}/test_file").run())
            sync()
            new_occupancy = cache.get_io_class_statistics(
                io_class_id=1).usage_stats.occupancy

            if new_occupancy != base_occupancy + file_size:
                TestRun.fail(
                    "Occupancy has not increased correctly!\n"
                    f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}"
                )
def test_load_corrupted():
    """
    title: Standby-load corrupted metadata
    description: |
      Try to load standby instance from corrupted metadata
    pass_criteria:
      - Kernel panic doesn't occur
    """
    with TestRun.step("Prepare devices for the cache and core."):
        cache_device = TestRun.disks["cache"]
        cache_device.create_partitions([Size(200, Unit.MebiByte)])
        cache_device = cache_device.partitions[0]
        core_device = TestRun.disks["core"]
        core_device.create_partitions([Size(500, Unit.MebiByte)])
        core_device = core_device.partitions[0]

    with TestRun.step("Prepare metadata dump"):
        cache_id = 1
        cls = CacheLineSize.LINE_32KiB
        md_dump = prepare_md_dump(cache_device, core_device, cls, cache_id)

    for offset in get_offsets_to_corrupt(md_dump.size, block_size):

        with TestRun.step(
                f"Corrupt {block_size} on the offset {offset*block_size}"):
            corrupted_md = prepare_corrupted_md(md_dump, offset, block_size)

        with TestRun.step(
                f"Copy corrupted metadata to the cache-to-be device"):
            Dd().input(corrupted_md.full_path).output(cache_device.path).run()
            sync()

        with TestRun.step("Try to load cache instance"):
            output = TestRun.executor.run(
                standby_load_cmd(cache_dev=cache_device.path))

        with TestRun.step("Per iteration cleanup"):
            if output.exit_code:
                casadm.stop_all_caches()
            corrupted_md.remove(force=True, ignore_errors=True)

    with TestRun.step("Test cleanup"):
        md_dump.remove()
예제 #9
0
def test_udev_raid_core():
    """
        title: CAS udev rule execution for core after recreating RAID device existing in
        configuration file as core.
        description: |
          Verify if CAS udev rule is executed for RAID volume recreated after soft reboot.
        pass_criteria:
          - No kernel error
          - After reboot, the RAID volume is added to the cache instance and is in 'active' state
    """
    with TestRun.step("Test prepare."):
        cache_disk = TestRun.disks["cache"]
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        core_disk = TestRun.disks["core"]
        core_disk.create_partitions([Size(2, Unit.GibiByte)])
        core_disk = core_disk.partitions[0]
        core_disk2 = TestRun.disks["core2"]
        core_disk2.create_partitions([Size(2, Unit.GibiByte)])
        core_disk2 = core_disk2.partitions[0]

    with TestRun.step("Create RAID0 volume."):
        config = RaidConfiguration(level=Level.Raid0,
                                   metadata=MetadataVariant.Legacy,
                                   number_of_devices=2)
        core_dev = Raid.create(config, [core_disk, core_disk2])

    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_dev, force=True)
        core = cache.add_core(core_dev)

    with TestRun.step("Create init config from running CAS configuration."):
        InitConfig.create_init_config_from_running_configuration()

    with TestRun.step("Reboot system."):
        TestRun.executor.reboot()

    with TestRun.step(
            "Check if core device is active and not in the core pool."):
        check_if_dev_in_core_pool(core_dev, False)
        if core.get_status() != CoreStatus.active:
            TestRun.fail(
                f"Core status is {core.get_status()} instead of active.")
예제 #10
0
    def __init__(self, trace):
        try:
            self.iops = float(trace["throughput"]["value"])
            self.workset = Size(float(trace["workset"]["value"]),
                                Unit.Blocks512)
            bandwidth_unit = parse_unit(
                trace["bandwidth"]["unit"].split("/")[0])
            bandwidth_value = float(trace["bandwidth"]["value"])
            self.bandwidth = Size(bandwidth_value,
                                  UnitPerSecond(bandwidth_unit))
        except KeyError:
            self.iops = 0
            self.workset = Size(0)
            self.bandwidth = Size(0)

        if "write invalidation factor" in trace:
            self.wif = float(trace["write invalidation factor"]["value"])
        else:
            self.wif = None
예제 #11
0
def create_file_with_ddrescue(core_dev, test_file_path):
    ddrescue = Ddrescue() \
        .block_size(Size(1, Unit.Blocks4096)) \
        .size(core_dev.size * 0.9) \
        .synchronous() \
        .source("/dev/urandom") \
        .destination(test_file_path)
    ddrescue.run()

    return File(test_file_path)
예제 #12
0
def run_dd(target_path, count, seek):
    dd = Dd() \
        .input("/dev/zero") \
        .output(target_path) \
        .block_size(Size(1, Unit.Blocks4096)) \
        .count(count) \
        .oflag("direct") \
        .seek(seek)
    dd.run()
    TestRun.LOGGER.info(f"dd command:\n{dd}")
예제 #13
0
def run_io_dir(path, size_4k):
    dd = (Dd().input("/dev/zero").output(f"{path}").count(size_4k).block_size(
        Size(1, Unit.Blocks4096)))
    TestRun.LOGGER.info(f"{dd}")
    output = dd.run()
    if output.exit_code != 0:
        TestRun.fail(
            f"Failed to execute dd.\n {output.stdout}\n{output.stderr}")
    sync()
    drop_caches(DropCachesMode.ALL)
예제 #14
0
def prepare(cores_count=1, cache_line_size: CacheLineSize = None):
    cache_device = TestRun.disks['cache']
    core_device = TestRun.disks['core']
    cache_device.create_partitions(
        [(SEQ_CUTOFF_THRESHOLD_MAX * cores_count + Size(5, Unit.GibiByte)).align_down(0x1000)])
    partitions = \
        [(SEQ_CUTOFF_THRESHOLD_MAX + Size(10, Unit.GibiByte)).align_down(0x1000)] * cores_count
    core_device.create_partitions(partitions)
    cache_part = cache_device.partitions[0]
    core_parts = core_device.partitions
    TestRun.LOGGER.info("Starting cache")

    cache = casadm.start_cache(cache_part, force=True, cache_line_size=cache_line_size)
    Udev.disable()
    TestRun.LOGGER.info("Adding core devices")
    core_list = []
    for core_part in core_parts:
        core_list.append(cache.add_core(core_dev=core_part))
    return cache, core_list
예제 #15
0
    def get_lba_histogram(trace_path: str,
                          bucket_size: Size = Size(0, Unit.Byte),
                          subrange_start: int = 0,
                          subrange_end: int = 0,
                          shortcut: bool = False) -> list:
        """
        Get lba histogram of given trace path

        :param trace_path: trace path
        :param bucket_size: bucket size
        :param subrange_start: subrange start
        :param subrange_end: subrange end
        :param shortcut: Use shorter command
        :type trace_path: str
        :type bucket_size: Size
        :type subrange_start: int
        :type subrange_end: int
        :type shortcut: bool
        :return: LBA histogram
        :raises Exception: if iotrace command or histogram is invalid
        """
        bucket_size_range = range(1, 4294967296)
        subrange_range = range(1, 9223372036854775808)
        if subrange_start and subrange_end:
            if subrange_start > subrange_end:
                subrange_start, subrange_end = subrange_end, subrange_start

        command = 'iotrace' + (' -P' if shortcut else ' --trace-parser')
        command += ' -B' if shortcut else ' --lba-histogram'
        command += (' -p ' if shortcut else ' --path ') + f'{trace_path}'

        if bucket_size is not None:
            if int(bucket_size.get_value(Unit.Byte)) not in bucket_size_range:
                raise CmdException(f"Given size is out of range {bucket_size_range}.")
            command += ' -b ' if shortcut else ' --bucket-size '
            command += f'{int(bucket_size.get_value(Unit.Byte))}'

        if subrange_start is not None:
            if subrange_start not in subrange_range:
                raise CmdException(f"Given start position is out of range {subrange_range}.")
            command += ' -s ' if shortcut else ' --subrange-start '
            command += f'{subrange_start}'

        if subrange_end is not None:
            if subrange_end not in subrange_range:
                raise CmdException(f"Given end position is out of range {subrange_range}.")
            command += ' -e ' if shortcut else ' --subrange-end '
            command += f'{subrange_end}'
        command += (' -f ' if shortcut else ' --format ') + 'json'

        output = TestRun.executor.run(command)
        if output.stdout == "":
            raise CmdException("Invalid histogram", output)

        return parse_json(output.stdout)
예제 #16
0
def test_remove_core_when_other_mounted_custom_numeration():
    """
        title: |
          Test for removing one core from the cache when the other core is mounted.
          Cores have custom numeration, starting with the same digit.
        description: |
          Test of the ability to remove the unmounted core from the cache when the other core
          is mounted and its ID starts with the same digit.
        pass_criteria:
          - No system crash.
          - Removing unmounted core finished with success.
    """
    with TestRun.step("Prepare devices."):
        cache_device = TestRun.disks['cache']
        cache_device.create_partitions([Size(50, Unit.MebiByte)])
        cache_part = cache_device.partitions[0]
        core_device = TestRun.disks['core']
        core_device.create_partitions([Size(200, Unit.MebiByte)] *
                                      cores_amount)

    with TestRun.step("Start cache."):
        cache = casadm.start_cache(cache_part, force=True)

    with TestRun.step(
            "Add cores to cache and mount them except the first one."):
        random_prefix = random.randint(1, 9)
        random_interfix = random.randint(1, 9)

        free_core = cache.add_core(core_device.partitions[0], random_prefix)
        mounted_cores = []
        for i, part in enumerate(core_device.partitions[1:]):
            part.create_filesystem(Filesystem.xfs)
            mounted_cores.append(
                cache.add_core(part, f"{random_prefix}{random_interfix}{i}"))
            mounted_cores[i].mount(
                f"{mount_point}{cache.cache_id}-{mounted_cores[i].core_id}")

    with TestRun.step("Remove the unmounted core."):
        try:
            cache.remove_core(free_core.core_id)
        except CmdException as exc:
            TestRun.fail(f"Cannot remove the unmounted core.\n{exc}")
예제 #17
0
def test_add_cached_core(cache_mode):
    """
        title: Fault injection test for adding already used core to a cache.
        description: |
          Negative test of the ability to add the core to the cache twice to the same cache
          and while the core is already used by the another cache instance.
        pass_criteria:
          - No system crash.
          - Adding already used core to another cache instance fails.
          - The same core device cannot be used twice in one cache instance.
    """
    with TestRun.step("Prepare two caches and one core device."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)])
        cache_part1 = cache_dev.partitions[0]
        cache_part2 = cache_dev.partitions[1]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(4, Unit.GibiByte)])
        core_part = core_dev.partitions[0]

    with TestRun.step("Start the first cache instance"):
        cache1 = casadm.start_cache(cache_part1, cache_mode, force=True)

    with TestRun.step("Add core device to first cache instance."):
        core = cache1.add_core(core_part)

    with TestRun.step("Start the second cache instance"):
        cache2 = casadm.start_cache(cache_part2, cache_mode, force=True)

    with TestRun.step("Try adding the same core device to the second cache instance."):
        output = TestRun.executor.run_expect_fail(
            cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.system_path),
                             core_id=str(core.core_id)))
        cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)

    with TestRun.step("Try adding the same core device to the same cache for the second time."):
        output = TestRun.executor.run_expect_fail(
            cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.system_path)))
        cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)

    with TestRun.step("Stop caches."):
        casadm.stop_all_caches()
예제 #18
0
def test_remove_multilevel_core():
    """
        title: Test of the ability to remove a core used in a multilevel cache.
        description: |
          Negative test if OpenCAS does not allow to remove a core when the related exported object
          is used as a core device for another cache instance.
        pass_criteria:
          - No system crash.
          - OpenCAS does not allow removing a core used in a multilevel cache instance.
    """
    with TestRun.step("Prepare two devices for cache and one for core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(512, Unit.MebiByte)] * 2)
        cache_part1 = cache_dev.partitions[0]
        cache_part2 = cache_dev.partitions[1]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_dev.partitions[0]

    with TestRun.step("Start the first cache instance"):
        cache1 = casadm.start_cache(cache_part1, force=True)

    with TestRun.step("Add a core device to the first cache instance."):
        core1 = cache1.add_core(core_dev)

    with TestRun.step("Start the second cache instance"):
        cache2 = casadm.start_cache(cache_part2, force=True)

    with TestRun.step("Add the first cache's exported object as a core "
                      "to the second cache instance."):
        cache2.add_core(core1)

    with TestRun.step("Try to remove core from the first level cache."):
        output = TestRun.executor.run_expect_fail(
            cli.remove_core_cmd(cache_id=str(cache1.cache_id),
                                core_id=str(core1.core_id),
                                force=True))
        cli_messages.check_stderr_msg(output,
                                      cli_messages.remove_multilevel_core)

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
def read_files_with_reclassification_check(cache, target_ioclass_id: int,
                                           source_ioclass_id: int,
                                           directory: Directory,
                                           with_delay: bool):
    start_time = datetime.now()
    target_occupancy_after = cache.get_io_class_statistics(
        io_class_id=target_ioclass_id).usage_stats.occupancy
    source_occupancy_after = cache.get_io_class_statistics(
        io_class_id=source_ioclass_id).usage_stats.occupancy
    unclassified_files = []

    for file in [item for item in directory.ls() if isinstance(item, File)]:
        target_occupancy_before = target_occupancy_after
        source_occupancy_before = source_occupancy_after
        time_from_start = datetime.now() - start_time
        (Dd().input(file.full_path).output("/dev/null").block_size(
            Size(1, Unit.Blocks4096)).run())
        target_occupancy_after = cache.get_io_class_statistics(
            io_class_id=target_ioclass_id).usage_stats.occupancy
        source_occupancy_after = cache.get_io_class_statistics(
            io_class_id=source_ioclass_id).usage_stats.occupancy
        if target_occupancy_after < target_occupancy_before:
            pytest.xfail("Target IO class occupancy lowered!")
        elif target_occupancy_after - target_occupancy_before < file.size:
            unclassified_files.append(file)
            if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                continue
            pytest.xfail("Target IO class occupancy not changed properly!")
        if source_occupancy_after >= source_occupancy_before:
            if file not in unclassified_files:
                unclassified_files.append(file)
            if with_delay and time_from_start <= ioclass_config.MAX_CLASSIFICATION_DELAY:
                continue
            pytest.xfail("Source IO class occupancy not changed properly!")

    if len(unclassified_files):
        TestRun.LOGGER.info("Rereading unclassified test files...")
        sync()
        drop_caches(DropCachesMode.ALL)
        for file in unclassified_files:
            (Dd().input(file.full_path).output("/dev/null").block_size(
                Size(1, Unit.Blocks4096)).run())
예제 #20
0
def test_stop_cache_with_mounted_partition(cache_mode):
    """
        title: Fault injection test for removing core and stopping cache with mounted core.
        description: |
          Negative test of the ability of CAS to remove core and stop cache while core
          is still mounted.
        pass_criteria:
          - No system crash.
          - Unable to stop cache when partition is mounted.
          - Unable to remove core when partition is mounted.
    """
    with TestRun.step("Prepare cache and core devices. Start CAS."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(1, Unit.GibiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(4, Unit.GibiByte)])
        core_part = core_dev.partitions[0]
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Add core device with xfs filesystem and mount it."):
        core_part.create_filesystem(Filesystem.xfs)
        core = cache.add_core(core_part)
        core.mount(mount_point)

    with TestRun.step("Try to remove core from cache."):
        output = TestRun.executor.run_expect_fail(
            cli.remove_core_cmd(cache_id=str(cache.cache_id),
                                core_id=str(core.core_id)))
        cli_messages.check_stderr_msg(output, cli_messages.remove_mounted_core)

    with TestRun.step("Try to stop CAS."):
        output = TestRun.executor.run_expect_fail(
            cli.stop_cmd(cache_id=str(cache.cache_id)))
        cli_messages.check_stderr_msg(output,
                                      cli_messages.stop_cache_mounted_core)

    with TestRun.step("Unmount core device."):
        core.unmount()

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
예제 #21
0
def test_attach_core_pool():
    """
    title: Attaching from core pool on cache load.
    description: |
      Check that CAS has the ability on cache load to attach core devices that were added to
      core device pool if those devices were previously used by cache instance being loaded.
      Prevent attaching core device if they were not previously used.
    pass_criteria:
      - No system crash while reloading CAS modules.
      - Core device was added successfully to core pool.
      - Core device has been successfully attached to cache on cache load.
      - Second core device was not attached to the cache instance.
    """
    with TestRun.step("Prepare devices."):
        cache_disk = TestRun.disks["cache"]
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        core_disk = TestRun.disks["core"]
        core_disk.create_partitions([Size(2, Unit.GibiByte), Size(2, Unit.GibiByte)])
        core_dev = core_disk.partitions[0]
        second_core_dev = core_disk.partitions[1]
    with TestRun.step("Start cache."):
        cache = casadm.start_cache(cache_dev, force=True)
    with TestRun.step("Add core device."):
        cache.add_core(core_dev)
    with TestRun.step("Stop cache."):
        cache.stop()
    with TestRun.step("Add previously used core device to core pool using --try-add flag."):
        first_core = casadm.try_add(core_dev, cache.cache_id)
    with TestRun.step("Add different core device to core pool using --try-add flag."):
        second_core = casadm.try_add(second_core_dev, cache.cache_id)
    with TestRun.step("Load cache."):
        cache = casadm.load_cache(cache_dev)
    with TestRun.step("Check each core status."):
        if first_core.get_status() is not CoreStatus.active:
            TestRun.fail(f"First core status should be active but is {first_core.get_status()}.")
        if second_core.get_status() is not CoreStatus.detached:
            TestRun.fail(
                f"Second core status should be detached but is {second_core.get_status()}.")
    with TestRun.step("Stop cache and remove core from core pool."):
        casadm.remove_all_detached_cores()
        cache.stop()
예제 #22
0
def prepare():
    cache_dev = TestRun.disks["cache"]
    core_dev = TestRun.disks["core"]

    cache_dev.create_partitions([Size(100, Unit.MiB)])
    core_dev.create_partitions([Size(200, Unit.MiB)])

    Udev.disable()
    cache = casadm.start_cache(cache_dev.partitions[0], force=True, cache_mode=CacheMode.WB)
    core = cache.add_core(core_dev.partitions[0])
    cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
    cache.set_cleaning_policy(CleaningPolicy.alru)
    cache.set_params_alru(
        FlushParametersAlru(
            activity_threshold=Time(seconds=100),
            staleness_time=Time(seconds=1),
        )
    )

    return cache, core
예제 #23
0
def run_workload(target: str,
                 runtime: timedelta,
                 io_depth=128,
                 verify=True,
                 block_size=int(Size(4, Unit.KibiByte)),
                 num_jobs=1,
                 method=ReadWrite.randrw,
                 io_engine=IoEngine.libaio):
    fio_run = setup_workload(target, runtime, io_depth, verify, block_size,
                             num_jobs, method, io_engine)
    return fio_run.run()
예제 #24
0
    def __init__(self, device_name: str, raw_stats: list = None):
        metrics_number = 5
        if raw_stats is None:
            raw_stats = [0] * metrics_number
        if len(raw_stats) < metrics_number:
            raw_stats.extend([0] * metrics_number)

        self.device_name = device_name
        # tps
        self.transfers_per_second = float(raw_stats[0])
        # kB_read/s
        self.reads_per_second = Size(float(raw_stats[1]),
                                     UnitPerSecond(Unit.KiloByte))
        # kB_wrtn/s
        self.writes_per_second = Size(float(raw_stats[2]),
                                      UnitPerSecond(Unit.KiloByte))
        # kB_read
        self.total_reads = Size(float(raw_stats[3]), Unit.KibiByte)
        # kB_wrtn
        self.total_writes = Size(float(raw_stats[4]), Unit.KibiByte)
예제 #25
0
def parse_ls_output(ls_output, dir_path=''):
    split_output = ls_output.split('\n')
    fs_items = []
    for line in split_output:
        if not line.strip():
            continue
        line_fields = line.split()
        if len(line_fields) < 8:
            continue
        file_type = line[0]
        if file_type not in ['-', 'd', 'l', 'b', 'c', 'p', 's']:
            continue
        permissions = line_fields[0][1:].replace('.', '')
        owner = line_fields[2]
        group = line_fields[3]
        size = Size(float(line_fields[4]), Unit.Byte)
        split_date = line_fields[5].split('-')
        split_time = line_fields[6].split(':')
        modification_time = datetime(int(split_date[0]), int(split_date[1]),
                                     int(split_date[2]), int(split_time[0]),
                                     int(split_time[1]), int(split_time[2]))
        if dir_path and file_type != 'l':
            full_path = '/'.join([dir_path, line_fields[7]])
        else:
            full_path = line_fields[7]

        from test_utils.filesystem.file import File, FsItem
        from test_utils.filesystem.directory import Directory
        from test_utils.filesystem.symlink import Symlink

        if file_type == '-':
            fs_item = File(full_path)
        elif file_type == 'd':
            fs_item = Directory(full_path)
        elif file_type == 'l':
            target_path = TestProperties.executor.execute(
                f"readlink -f {full_path}").stdout
            fs_item = Symlink(full_path, target_path)
        else:
            fs_item = FsItem(full_path)

        fs_item.permissions.user = Permissions['|'.join(list(permissions[:3].replace('-', '')))]\
            if permissions[:3] != '---' else Permissions(0)
        fs_item.permissions.group = Permissions['|'.join(list(permissions[3:6].replace('-', '')))]\
            if permissions[3:6] != '---' else Permissions(0)
        fs_item.permissions.other = Permissions['|'.join(list(permissions[6:].replace('-', '')))]\
            if permissions[6:] != '---' else Permissions(0)

        fs_item.owner = owner
        fs_item.group = group
        fs_item.size = size
        fs_item.modification_time = modification_time
        fs_items.append(fs_item)
    return fs_items
예제 #26
0
def allocate_memory(size: Size):
    """Allocates given amount of memory"""
    mount_ramfs()
    TestRun.LOGGER.info(
        f"Allocating {size.get_value(Unit.MiB):0.2f} MiB of memory.")
    bs = Size(1, Unit.Blocks512)
    dd = (Dd().block_size(bs).count(math.ceil(
        size / bs)).input("/dev/zero").output(f"{MEMORY_MOUNT_POINT}/data"))
    output = dd.run()
    if output.exit_code != 0:
        raise CmdException("Allocating memory failed.", output)
def run_io_dir(path, num_ios):
    dd = (
        Dd()
        .input("/dev/zero")
        .output(f"{path}/tmp_file")
        .count(num_ios)
        .block_size(Size(1, Unit.Blocks4096))
    )
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
예제 #28
0
def test_create_example_partitions(prepare_and_cleanup):
    prepare()
    TestRun.LOGGER.info("Test run")
    TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
    test_disk = TestRun.dut.disks[0]
    part_sizes = []
    for i in range(1, 6):
        part_sizes.append(Size(10 * i + 100, Unit.MebiByte))
    test_disk.create_partitions(part_sizes)
    TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")
    test_disk.partitions[0].create_filesystem(Filesystem.ext3)
def file_operation(target_path, data_pattern, io_pattern):
    fio = (Fio().create_command()
           .target(target_path)
           .io_engine(IoEngine.libaio)
           .size(test_file_size)
           .read_write(io_pattern)
           .block_size(Size(1, Unit.Blocks4096))
           .verification_with_pattern(data_pattern)
           .direct()
           .set_param("do_verify", 0))
    fio.run()
def fio_prepare():
    fio = (
        Fio()
        .create_command()
        .io_engine(IoEngine.libaio)
        .block_size(Size(4, Unit.KibiByte))
        .size(io_size)
        .read_write(ReadWrite.randwrite)
        .direct(1)
    )
    return fio