def prepare_md_dump(cache_device, core_device, cls, cache_id):
    with TestRun.step("Setup WB cache instance with one core"):
        cache = casadm.start_cache(
            cache_dev=cache_device,
            cache_line_size=cls,
            cache_mode=CacheMode.WB,
            cache_id=cache_id,
            force=True,
        )
        cache.add_core(core_device)

    with TestRun.step("Get metadata size"):
        dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
        md_size = dmesg.get_metadata_size(dmesg_out)

    with TestRun.step("Dump the metadata of the cache"):
        dump_file_path = "/tmp/test_activate_corrupted.dump"
        md_dump = File(dump_file_path)
        md_dump.remove(force=True, ignore_errors=True)

        dd_count = int(md_size / Size(1, Unit.MebiByte)) + 1
        (Dd().input(cache_device.path).output(md_dump.full_path).block_size(
            Size(1, Unit.MebiByte)).count(dd_count).run())
        md_dump.refresh_item()

    with TestRun.step("Stop cache device"):
        cache.stop()

        return md_dump
def prepare_corrupted_md(md_dump, offset_to_corrupt, bs):
    invalid_dump_path = "/tmp/test_activate_corrupted.invalid_dump"
    dd_count = offset_to_corrupt + 1

    md_dump.copy(destination=invalid_dump_path, force=True)
    corrupted_md = File(invalid_dump_path)
    (Dd().input("/dev/urandom").output(corrupted_md.full_path).block_size(
        bs).count(dd_count).seek(offset_to_corrupt).conv("notrunc").run())
    corrupted_md.refresh_item()

    return corrupted_md
Esempio n. 3
0
def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
    """
        title: Planned system shutdown test.
        description: Test for data consistency after clean system shutdown.
        pass_criteria:
          - DUT should reboot successfully.
          - Checksum of file on core device should be the same before and after reboot.
    """
    with TestRun.step("Prepare CAS device."):
        cache_disk = TestRun.disks['cache']
        cache_disk.create_partitions([Size(1, Unit.GibiByte)])
        cache_dev = cache_disk.partitions[0]
        core_dev = TestRun.disks['core']
        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        core = cache.add_core(core_dev)
        core.create_filesystem(filesystem,
                               blocksize=int(Size(1, Unit.Blocks4096)))
        core.mount(mount_point)

    with TestRun.step("Create file on cache and count its checksum."):
        test_file = File(os.path.join(mount_point, "test_file"))
        Dd()\
            .input("/dev/zero")\
            .output(test_file.full_path)\
            .block_size(Size(1, Unit.KibiByte))\
            .count(1024)\
            .run()
        test_file.refresh_item()
        test_file_md5 = test_file.md5sum()
        sync()
        drop_caches(DropCachesMode.ALL)

    with TestRun.step("Reset platform."):
        if reboot_type == "soft":
            TestRun.executor.reboot()
        else:
            power_control = TestRun.plugin_manager.get_plugin('power_control')
            power_control.power_cycle()

    with TestRun.step("Load cache."):
        casadm.load_cache(cache_dev)
        core.mount(mount_point)

    with TestRun.step("Check file md5sum."):
        test_file.refresh_item()
        if test_file_md5 != test_file.md5sum():
            TestRun.LOGGER.error(
                "Checksums does not match - file is corrupted.")
        else:
            TestRun.LOGGER.info("File checksum is correct.")

    with TestRun.step("Remove test file."):
        test_file.remove()
def test_ioclass_directory_depth(filesystem):
    """
    Test if directory classification works properly for deeply nested directories for read and
    write operations.
    """
    cache, core = prepare()
    Udev.disable()

    TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
                        f"and mounting {core.system_path} at {mountpoint}")
    core.create_filesystem(filesystem)
    core.mount(mountpoint)
    sync()

    base_dir_path = f"{mountpoint}/base_dir"
    TestRun.LOGGER.info(f"Creating the base directory: {base_dir_path}")
    fs_utils.create_directory(base_dir_path)

    nested_dir_path = base_dir_path
    random_depth = random.randint(40, 80)
    for i in range(random_depth):
        nested_dir_path += f"/dir_{i}"
    TestRun.LOGGER.info(f"Creating a nested directory: {nested_dir_path}")
    fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    TestRun.LOGGER.info("Creating the first file in the nested directory")
    test_file_1 = File(f"{nested_dir_path}/test_file_1")
    dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_1.refresh_item()

    ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
    # directory IO class
    ioclass_config.add_ioclass(
        ioclass_id=ioclass_id,
        eviction_priority=1,
        allocation=True,
        rule=f"directory:{base_dir_path}",
        ioclass_config_path=ioclass_config_path,
    )
    casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)

    base_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    TestRun.LOGGER.info("Reading the file in the nested directory")
    dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
        Size(1, Unit.MebiByte)))
    dd.run()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_1.size, \
        "Wrong occupancy after reading file!\n" \
        f"Expected: {base_occupancy + test_file_1.size}, actual: {new_occupancy}"

    # Test classification in nested dir by creating a file
    base_occupancy = new_occupancy
    TestRun.LOGGER.info("Creating the second file in the nested directory")
    test_file_2 = File(f"{nested_dir_path}/test_file_2")
    dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
        random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
    dd.run()
    sync()
    drop_caches(DropCachesMode.ALL)
    test_file_2.refresh_item()

    new_occupancy = cache.get_statistics_deprecated(
        io_class_id=ioclass_id)["occupancy"]
    assert new_occupancy == base_occupancy + test_file_2.size, \
        "Wrong occupancy after creating file!\n" \
        f"Expected: {base_occupancy + test_file_2.size}, actual: {new_occupancy}"
def test_ioclass_directory_depth(filesystem):
    """
        title: Test IO classification by directory.
        description: |
          Test if directory classification works properly for deeply nested directories for read and
          write operations.
        pass_criteria:
          - No kernel bug.
          - Read and write operations to directories are classified properly.
    """
    base_dir_path = f"{mountpoint}/base_dir"

    with TestRun.step("Prepare cache and core."):
        cache, core = prepare()
        Udev.disable()

    with TestRun.step(
            f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
            f"at {mountpoint}."):
        core.create_filesystem(filesystem)
        core.mount(mountpoint)
        sync()

    with TestRun.step(f"Create the base directory: {base_dir_path}."):
        fs_utils.create_directory(base_dir_path)

    with TestRun.step(f"Create a nested directory."):
        nested_dir_path = base_dir_path
        random_depth = random.randint(40, 80)
        for i in range(random_depth):
            nested_dir_path += f"/dir_{i}"
        fs_utils.create_directory(path=nested_dir_path, parents=True)

    # Test classification in nested dir by reading a previously unclassified file
    with TestRun.step("Create the first file in the nested directory."):
        test_file_1 = File(f"{nested_dir_path}/test_file_1")
        dd = (Dd().input("/dev/urandom").output(test_file_1.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_1.refresh_item()

    with TestRun.step("Load IO class config."):
        ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
        # directory IO class
        ioclass_config.add_ioclass(
            ioclass_id=ioclass_id,
            eviction_priority=1,
            allocation=True,
            rule=f"directory:{base_dir_path}",
            ioclass_config_path=ioclass_config_path,
        )
        casadm.load_io_classes(cache_id=cache.cache_id,
                               file=ioclass_config_path)

    with TestRun.step("Read the file in the nested directory"):
        base_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        dd = (Dd().input(test_file_1.full_path).output("/dev/null").block_size(
            Size(1, Unit.MebiByte)))
        dd.run()

    with TestRun.step("Check occupancy after creating the file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_1.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after reading file!\n"
                "Expected: {base_occupancy + test_file_1.size}, "
                f"actual: {new_occupancy}")

    # Test classification in nested dir by creating a file
    with TestRun.step("Create the second file in the nested directory"):
        base_occupancy = new_occupancy
        test_file_2 = File(f"{nested_dir_path}/test_file_2")
        dd = (Dd().input("/dev/urandom").output(test_file_2.full_path).count(
            random.randint(1, 200)).block_size(Size(1, Unit.MebiByte)))
        dd.run()
        sync()
        drop_caches(DropCachesMode.ALL)
        test_file_2.refresh_item()

    with TestRun.step("Check occupancy after creating the second file."):
        new_occupancy = cache.get_io_class_statistics(
            io_class_id=ioclass_id).usage_stats.occupancy
        if new_occupancy != base_occupancy + test_file_2.size:
            TestRun.LOGGER.error(
                "Wrong occupancy after creating file!\n"
                f"Expected: {base_occupancy + test_file_2.size}, "
                f"actual: {new_occupancy}")