Esempio n. 1
0
def test_stress_start(cache_mode):
    """
        title: Stress test for starting and stopping cache.
        description: Validate the ability of CAS to start and stop cache in the loop.
        pass_criteria:
          - No system crash while starting and stopping cache in the loop.
          - Cache starts and stops successfully.
    """
    with TestRun.step("Prepare cache and core."):
        cache_dev, core_dev = prepare()

    for _ in TestRun.iteration(range(0, iterations_per_config),
                               f"Start and stop CAS {iterations_per_config} times."):
        with TestRun.step("Start cache."):
            cache = casadm.start_cache(cache_dev, cache_mode, force=True)
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")
        with TestRun.step("Add core."):
            cache.add_core(core_dev)
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.fail(f"Expected cores count: 1; Actual cores count: {cores_count}.")
        with TestRun.step("Stop cache."):
            cache.stop()
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 0:
                TestRun.fail(f"Expected caches count: 0; Actual caches count: {caches_count}.")

    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
Esempio n. 2
0
def pytest_runtest_teardown():
    """
    This method is executed always in the end of each test, even if it fails or raises exception in
    prepare stage.
    """
    TestRun.LOGGER.end_all_groups()

    with TestRun.LOGGER.step("Cleanup after test"):
        try:
            if TestRun.executor:
                if not TestRun.executor.is_active():
                    TestRun.executor.wait_for_connection()
                Udev.enable()
                kill_all_io()
                unmount_cas_devices()
                if installer.check_if_installed():
                    casadm.remove_all_detached_cores()
                    casadm.stop_all_caches()
                    from api.cas.init_config import InitConfig
                    InitConfig.create_default_init_config()
                DeviceMapper.remove_all()
        except Exception as ex:
            TestRun.LOGGER.warning(f"Exception occurred during platform cleanup.\n"
                                   f"{str(ex)}\n{traceback.format_exc()}")

    TestRun.LOGGER.end()
    for dut in TestRun.duts:
        with TestRun.use_dut(dut):
            if TestRun.executor:
                os.makedirs(os.path.join(TestRun.LOGGER.base_dir, "dut_info", dut.ip),
                            exist_ok=True)
                TestRun.LOGGER.get_additional_logs()
    Log.destroy()
    TestRun.teardown()
Esempio n. 3
0
def test_stop_cache_during_io():
    """
        title: Test for stopping cache during IO.
        description: |
          Creating CAS device, running fio on it and checking
          if cache can be stopped during IO operations.
        pass_criteria:
          - Cache is not stopped.
    """
    with TestRun.step("Start cache and add core"):
        cache, core = prepare()

    with TestRun.step("Running 'fio'"):
        fio = (
            Fio()
            .create_command()
            .io_engine(IoEngine.libaio)
            .block_size(Size(4, Unit.KibiByte))
            .read_write(ReadWrite.randrw)
            .target(f"{core.system_path}")
            .direct(1)
            .run_time(timedelta(minutes=4))
            .time_based()
        )
        fio_pid = fio.run_in_background()
        time.sleep(10)

    with TestRun.step("Try to stop cache during 'fio'"):
        TestRun.executor.run_expect_fail(cli.stop_cmd(f"{cache.cache_id}"))

    with TestRun.step("Stopping 'fio'"):
        TestRun.executor.kill_process(fio_pid)

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
Esempio n. 4
0
def test_stress_load(cache_mode):
    """
        title: Stress test for stopping and loading CAS device.
        description: |
          Validate the ability of the CAS to load and stop cache in the loop
          using different cache modes.
        pass_criteria:
          - No system crash while stop and load cache in the loop.
          - CAS device loads successfully.
    """
    with TestRun.step("Prepare cache and core."):
        cache_dev, core_dev = prepare()
    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        casadm.add_core(cache, core_dev)

    for _ in TestRun.iteration(range(0, iterations_per_config),
                               f"Stop cache and load it {iterations_per_config} times."):
        with TestRun.step("Stop cache."):
            casadm.stop_cache(cache.cache_id)
            if len(casadm_parser.get_caches()) != 0:
                TestRun.fail("Cache did not stop successfully.")
        with TestRun.step("Load cache."):
            casadm.load_cache(cache_dev)
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(
                    f"Cache did not load successfully - wrong number of caches: {caches_count}.")
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.LOGGER.error(f"Cache loaded with wrong cores count: {cores_count}.")

    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
Esempio n. 5
0
def pytest_runtest_teardown():
    """
    This method is executed always in the end of each test, even if it fails or raises exception in
    prepare stage.
    """
    if TestRun.outcome == "skipped":
        return

    TestRun.LOGGER.end_all_groups()

    with TestRun.LOGGER.step("Cleanup after test"):
        try:
            if TestRun.executor:
                if TestRun.executor.is_active():
                    TestRun.executor.wait_for_connection()
                Udev.enable()
                unmount_cas_devices()
                casadm.stop_all_caches()
        except Exception as ex:
            TestRun.LOGGER.warning(
                f"Exception occured during platform cleanup.\n"
                f"{str(ex)}\n{traceback.format_exc()}")

        if 'test_wrapper' in sys.modules:
            try:
                test_wrapper.cleanup()
            except Exception as ex:
                TestRun.LOGGER.warning(
                    f"Exception occured during test wrapper cleanup.\n{str(ex)}"
                    f"\n{traceback.format_exc()}")

    TestRun.LOGGER.end()
    if TestRun.executor:
        TestRun.LOGGER.get_additional_logs()
    Log.destroy()
Esempio n. 6
0
def test_another_cache_with_same_id():
    """
        title: Test for creating another cache device with the same ID.
        description: |
          Checking if adding another cache device and setting
          the same cache ID as the previous one fails.
        pass_criteria:
          - No additional cache device added.
    """
    with TestRun.step("Start cache with ID = 1"):
        cache_dev_1 = TestRun.disks["cache_1"]
        cache_dev_1.create_partitions([Size(2, Unit.GibiByte)])
        TestRun.executor.run_expect_success(
            cli.start_cmd(cache_dev_1.partitions[0].system_path,
                          cache_id="1",
                          force=True))

    with TestRun.step("Try to start another cache with the same ID = 1"):
        cache_dev_2 = TestRun.disks["cache_2"]
        cache_dev_2.create_partitions([Size(2, Unit.GibiByte)])
        TestRun.executor.run_expect_fail(
            cli.start_cmd(cache_dev_2.partitions[0].system_path,
                          cache_id="1",
                          force=True))

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
Esempio n. 7
0
def test_stress_reload_cache(cache_mode):
    """
        title: Stress test for reloading cache with simple data integrity check.
        description: |
          Validate the ability of CAS to reload cache in the loop
          with no data corruption.
        pass_criteria:
          - No system crash while reloading cache.
          - CAS device loads successfully.
          - No data corruption.
    """
    with TestRun.step("Prepare cache and core. Create test file and count it's checksum."):
        cache, core, md5_before_load, size_before_load, permissions_before_load = \
            prepare_with_file_creation(cache_mode)

    for _ in TestRun.iteration(range(0, iterations_per_config),
                               f"Stop and load cache {iterations_per_config} times."):
        with TestRun.step("Stop cache."):
            cache.stop()
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 0:
                TestRun.fail(f"Expected caches count: 0; Actual caches count: {caches_count}.")
        with TestRun.step("Load cache."):
            cache = casadm.load_cache(cache.cache_device)
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.fail(f"Expected cores count: 1; Actual cores count: {cores_count}.")

    with TestRun.step("Check md5 of test file."):
        check_files(core, size_before_load, permissions_before_load, md5_before_load)
    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
Esempio n. 8
0
def base_prepare(item):
    with TestRun.LOGGER.step("Cleanup before test"):
        Udev.enable()
        kill_all_io()

        if installer.check_if_installed():
            try:
                unmount_cas_devices()
                casadm.stop_all_caches()
            except Exception:
                pass  # TODO: Reboot DUT if test is executed remotely

        for disk in TestRun.dut.disks:
            disk.umount_all_partitions()
            if not create_partition_table(disk, PartitionTable.gpt):
                TestRun.exception(f"Failed to remove partitions from {disk}")

        if get_force_param(
                item) and not TestRun.plugins['opencas'].already_updated:
            installer.reinstall_opencas()
        elif not installer.check_if_installed():
            installer.install_opencas()
        TestRun.plugins['opencas'].already_updated = True
        from api.cas import init_config
        init_config.create_default_init_config()
        TestRun.LOGGER.add_build_info(f'Commit hash:')
        TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
        TestRun.LOGGER.add_build_info(f'Commit message:')
        TestRun.LOGGER.add_build_info(f'{git.get_current_commit_message()}')
Esempio n. 9
0
def base_prepare(item):
    with TestRun.LOGGER.step("Cleanup before test"):
        TestRun.executor.run("pkill --signal=SIGKILL fsck")
        Udev.enable()
        kill_all_io()
        DeviceMapper.remove_all()

        if installer.check_if_installed():
            try:
                from api.cas.init_config import InitConfig
                InitConfig.create_default_init_config()
                unmount_cas_devices()
                casadm.stop_all_caches()
                casadm.remove_all_detached_cores()
            except Exception:
                pass  # TODO: Reboot DUT if test is executed remotely

        for disk in TestRun.dut.disks:
            disk.umount_all_partitions()
            create_partition_table(disk, PartitionTable.gpt)

        if get_force_param(item) and not TestRun.usr.already_updated:
            installer.rsync_opencas_sources()
            installer.reinstall_opencas()
        elif not installer.check_if_installed():
            installer.rsync_opencas_sources()
            installer.set_up_opencas()
        TestRun.usr.already_updated = True
        TestRun.LOGGER.add_build_info(f'Commit hash:')
        TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
        TestRun.LOGGER.add_build_info(f'Commit message:')
        TestRun.LOGGER.add_build_info(f'{git.get_current_commit_message()}')
Esempio n. 10
0
def test_cleaning_policies_in_write_back(cleaning_policy):
    """
        title: Test for cleaning policy operation in Write-Back cache mode.
        description: |
          Check if ALRU, NOP and ACP cleaning policies preserve their
          parameters when changed and if they flush dirty data properly
          in Write-Back cache mode.
        pass_criteria:
          - Flush parameters preserve their values when changed.
          - Dirty data is flushed or not according to the policy used.
    """

    with TestRun.step("Partition cache and core devices"):
        cache_dev, core_dev = storage_prepare()
        Udev.disable()

    with TestRun.step(
            f"Start cache in Write-Back mode with {cleaning_policy} cleaning policy"
    ):
        cache = casadm.start_cache(cache_dev.partitions[0],
                                   CacheMode.WB,
                                   force=True)
        set_cleaning_policy_and_params(cache, cleaning_policy)

    with TestRun.step("Check for running CAS cleaner"):
        if TestRun.executor.run(
                f"pgrep {cas_cleaner_process_name}").exit_code != 0:
            TestRun.fail("CAS cleaner process is not running!")

    with TestRun.step(f"Add {cores_count} cores to the cache"):
        core = []
        for i in range(cores_count):
            core.append(cache.add_core(core_dev.partitions[i]))

    with TestRun.step("Run 'fio'"):
        fio = fio_prepare()
        for i in range(cores_count):
            fio.add_job().target(core[i].path)
        fio.run()
        time.sleep(3)
        core_writes_before_wait_for_cleaning = (
            cache.get_statistics().block_stats.core.writes)

    with TestRun.step(f"Wait {time_to_wait} seconds"):
        time.sleep(time_to_wait)

    with TestRun.step("Check write statistics for core device"):
        core_writes_after_wait_for_cleaning = (
            cache.get_statistics().block_stats.core.writes)
        check_cleaning_policy_operation(
            cleaning_policy,
            core_writes_before_wait_for_cleaning,
            core_writes_after_wait_for_cleaning,
        )

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
        Udev.enable()
Esempio n. 11
0
def prepare_and_cleanup(request):
    """
    This fixture returns the dictionary, which contains DUT ip, IPMI, spider, list of disks.
    This fixture also returns the executor of commands
    """

    # There should be dut config file added to config package and
    # pytest should be executed with option --dut-config=conf_name'.
    #
    # 'ip' field should be filled with valid IP string to use remote ssh executor
    # or it should be commented out when user want to execute tests on local machine
    #
    # User can also have own test wrapper, which runs test prepare, cleanup, etc.
    # Then in the config/configuration.py file there should be added path to it:
    # test_wrapper_dir = 'wrapper_path'
    LOGGER.info(f"**********Test {request.node.name} started!**********")
    try:
        dut_config = importlib.import_module(f"config.{request.config.getoption('--dut-config')}")
    except:
        dut_config = None

    if os.path.exists(c.test_wrapper_dir):
        if hasattr(dut_config, 'ip'):
            try:
                IP(dut_config.ip)
            except ValueError:
                raise Exception("IP address from configuration file is in invalid format.")
        TestProperties.dut = Dut(test_wrapper.prepare(request, dut_config))
    elif dut_config is not None:
        if hasattr(dut_config, 'ip'):
            try:
                IP(dut_config.ip)
                if hasattr(dut_config, 'user') and hasattr(dut_config, 'password'):
                    executor = SshExecutor(dut_config.ip, dut_config.user, dut_config.password)
                    TestProperties.executor = executor
                else:
                    raise Exception("There is no credentials in config file.")
                if hasattr(dut_config, 'disks'):
                    TestProperties.dut = Dut({'ip': dut_config.ip, 'disks': dut_config.disks})
                else:
                    TestProperties.dut = Dut(
                        {'ip': dut_config.ip, 'disks': disk_finder.find_disks()})
            except ValueError:
                raise Exception("IP address from configuration file is in invalid format.")
        elif hasattr(dut_config, 'disks'):
            TestProperties.executor = LocalExecutor()
            TestProperties.dut = Dut({'disks': dut_config.disks})
        else:
            TestProperties.executor = LocalExecutor()
            TestProperties.dut = Dut({'disks': disk_finder.find_disks()})
    else:
        raise Exception(
            "There is neither configuration file nor test wrapper attached to tests execution.")
    yield
    TestProperties.LOGGER.info("Test cleanup")
    casadm.stop_all_caches()
    if os.path.exists(c.test_wrapper_dir):
        test_wrapper.cleanup(TestProperties.dut)
Esempio n. 12
0
def base_prepare(item):
    with TestRun.LOGGER.step("Cleanup before test"):
        TestRun.executor.run("pkill --signal=SIGKILL fsck")
        Udev.enable()
        kill_all_io()
        DeviceMapper.remove_all()

        if installer.check_if_installed():
            try:
                from api.cas.init_config import InitConfig
                InitConfig.create_default_init_config()
                unmount_cas_devices()
                casadm.stop_all_caches()
                casadm.remove_all_detached_cores()
            except Exception:
                pass  # TODO: Reboot DUT if test is executed remotely

        raids = Raid.discover()
        for raid in raids:
            # stop only those RAIDs, which are comprised of test disks
            if all(map(lambda device:
                       any(map(lambda disk_path:
                               disk_path in device.get_device_id(),
                               [bd.get_device_id() for bd in TestRun.dut.disks])),
                       raid.array_devices)):
                raid.umount_all_partitions()
                raid.remove_partitions()
                raid.stop()
                for device in raid.array_devices:
                    Mdadm.zero_superblock(os.path.join('/dev', device.get_device_id()))
                    Udev.settle()

        for disk in TestRun.dut.disks:
            disk_serial = get_disk_serial_number(disk.path)
            if disk.serial_number != disk_serial:
                raise Exception(
                    f"Serial for {disk.path} doesn't match the one from the config."
                    f"Serial from config {disk.serial_number}, actual serial {disk_serial}"
                )

            disk.umount_all_partitions()
            Mdadm.zero_superblock(os.path.join('/dev', disk.get_device_id()))
            TestRun.executor.run_expect_success("udevadm settle")
            disk.remove_partitions()
            create_partition_table(disk, PartitionTable.gpt)

        if get_force_param(item) and not TestRun.usr.already_updated:
            installer.rsync_opencas_sources()
            installer.reinstall_opencas()
        elif not installer.check_if_installed():
            installer.rsync_opencas_sources()
            installer.set_up_opencas()
        TestRun.usr.already_updated = True
        TestRun.LOGGER.add_build_info(f'Commit hash:')
        TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}")
        TestRun.LOGGER.add_build_info(f'Commit message:')
        TestRun.LOGGER.add_build_info(f'{git.get_current_commit_message()}')
Esempio n. 13
0
def test_one_core_release(cache_mode):
    """
        title: Test if OpenCAS dynamically allocates space according to core devices needs.
        description: |
          When one or more core devices are unused in a single cache instance all blocks
          previously occupied should be available to other core devices.
          Test is without pass through mode.
        pass_criteria:
          - No system crash.
          - The remaining core is able to use cache.
          - OpenCAS frees blocks occupied by unused core and allocates it to the remaining core.
    """
    with TestRun.step("Prepare two cache and one core devices."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(512, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(1, Unit.GibiByte)] * 2)
        core_part1 = core_dev.partitions[0]
        core_part2 = core_dev.partitions[1]
        Udev.disable()

    with TestRun.step("Start cache"):
        cache = casadm.start_cache(cache_part, cache_mode, force=True)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")

    with TestRun.step("Add both core devices to cache."):
        core1 = cache.add_core(core_part1)
        core2 = cache.add_core(core_part2)
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 2:
            TestRun.fail(f"Expected cores count: 2; Actual cores count: {cores_count}.")

    with TestRun.step("Change sequential cutoff policy to 'never'."):
        cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)

    with TestRun.step("Fill cache with pages from the first core."):
        dd_builder(cache_mode, core1, cache.size).run()
        core1_occupied_blocks_before = core1.get_occupancy()

    with TestRun.step("Check if the remaining core is able to use cache."):
        dd_builder(cache_mode, core2, Size(100, Unit.MebiByte)).run()
        core1_occupied_blocks_after = core1.get_occupancy()

    with TestRun.step("Check if occupancy from the first core is removed from cache."):
        # The first core's occupancy should be lower than cache's occupancy
        # by the value of the remaining core's occupancy because cache
        # should reallocate blocks from unused core to used core.
        if core1_occupied_blocks_after >= core1_occupied_blocks_before \
                or cache.get_occupancy() <= core1_occupied_blocks_after \
                or not float(core2.get_occupancy().get_value()) > 0:
            TestRun.LOGGER.error("Blocks previously occupied by the first core aren't released.")

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
Esempio n. 14
0
def test_load_cache_with_mounted_core(cache_mode):
    """
        title: Fault injection test for adding mounted core on cache load.
        description: |
          Negative test of the ability of CAS to add to cache while its loading
          core device which is mounted.
        pass_criteria:
          - No system crash while loading cache.
          - Adding mounted core while loading cache fails.
    """
    with TestRun.step("Prepare cache and core devices. Start CAS."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(1, Unit.GibiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(4, Unit.GibiByte)])
        core_part = core_dev.partitions[0]
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Add core device with xfs filesystem to cache and mount it."):
        core_part.create_filesystem(Filesystem.xfs)
        core = cache.add_core(core_part)
        core.mount(mount_point)

    with TestRun.step(f"Create test file in mount point of exported object and check its md5 sum."):
        test_file = fs_utils.create_random_test_file(test_file_path)
        test_file_md5_before = test_file.md5sum()

    with TestRun.step("Unmount core device."):
        core.unmount()

    with TestRun.step("Stop cache."):
        cache.stop()
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 0:
            TestRun.fail(f"Expected caches count: 0; Actual caches count: {caches_count}.")

    with TestRun.step("Mount core device."):
        core_part.mount(mount_point)

    with TestRun.step("Try to load cache."):
        cache = casadm.load_cache(cache.cache_device)
        caches_count = len(casadm_parser.get_caches())
        if caches_count != 1:
            TestRun.fail(f"Expected caches count: 1 Actual caches count: {caches_count}.")
        cores_count = len(casadm_parser.get_cores(cache.cache_id))
        if cores_count != 0:
            TestRun.fail(f"Expected cores count: 0; Actual cores count: {cores_count}.")

    with TestRun.step("Check md5 sum of test file again."):
        if test_file_md5_before != test_file.md5sum():
            TestRun.LOGGER.error("Md5 sum of test file is different.")
        core_part.unmount()

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
Esempio n. 15
0
def test_purge(purge_target):
    """
        title: Call purge without and with `--script` switch
        description: |
          Check if purge is called only when `--script` switch is used.
        pass_criteria:
          - casadm returns an error when `--script` is missing
          - cache is wiped when purge command is used properly
    """
    with TestRun.step("Prepare devices"):
        cache_device = TestRun.disks["cache"]
        core_device = TestRun.disks["core"]

        cache_device.create_partitions([Size(500, Unit.MebiByte)])
        core_device.create_partitions([Size(500, Unit.MebiByte)])

        cache_device = cache_device.partitions[0]
        core_device = core_device.partitions[0]

    with TestRun.step("Prepare cache instance"):
        cache = casadm.start_cache(cache_device, force=True)
        core = casadm.add_core(cache, core_device)

    with TestRun.step("Trigger IO to prepared cache instance"):
        dd = (Dd().input("/dev/zero").output(core.path).count(100).block_size(
            Size(1, Unit.Blocks512)).oflag("direct"))
        dd.run()
        sync()

    with TestRun.step(
            f"Try to call purge-{purge_target} without `--script` switch"):
        original_occupancy = cache.get_statistics().usage_stats.occupancy
        purge_params = f"--cache-id {cache.cache_id} "
        if purge_target == "core":
            purge_params += f"--core-id {core.core_id}"
        TestRun.executor.run_expect_fail(
            f"casadm --purge-{purge_target} {purge_params}")

        if cache.get_statistics().usage_stats.occupancy != original_occupancy:
            TestRun.fail(
                f"Purge {purge_target} should not be possible to use without `--script` switch!"
            )

    with TestRun.step(
            f"Try to call purge-{purge_target} with `--script` switch"):
        TestRun.executor.run_expect_success(
            f"casadm --script --purge-{purge_target} {purge_params}")

        if cache.get_statistics().usage_stats.occupancy.get_value() != 0:
            TestRun.fail(
                f"{cache.get_statistics().usage_stats.occupancy.get_value()}")
            TestRun.fail(
                f"Purge {purge_target} should invalidate all cache lines!")

    with TestRun.step(f"Stop cache"):
        casadm.stop_all_caches()
Esempio n. 16
0
def base_prepare():
    LOGGER.info("Base test prepare")
    LOGGER.info(f"DUT info: {TestProperties.dut}")
    LOGGER.info("Removing partitions")
    for disk in TestProperties.dut.disks:
        disk_utils.remove_partitions(disk)
    if get_force_param() is not "False" and not hasattr(c, "already_updated"):
        installer.reinstall_opencas()
    elif not installer.check_if_installed():
        installer.install_opencas()
    c.already_updated = True  # to skip reinstall every test
    casadm.stop_all_caches()
def test_stress_add_remove_core(cache_mode):
    """
        title: Stress test for adding and removing core.
        description: Validate the ability of CAS to add and remove core in the loop.
        pass_criteria:
          - No system crash while adding and removing core.
          - Core is added and removed successfully.
          - No data corruption.
    """
    with TestRun.step(
            "Prepare cache and core. Create test file and count its checksum."
    ):
        cache, core, file, file_md5sum_before = prepare_with_file_creation(
            cache_mode)

    for _ in TestRun.iteration(
            range(0, iterations_per_config),
            f"Add and remove core {iterations_per_config} times."):
        with TestRun.step("Remove core."):
            core.remove_core()
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(
                    f"Expected caches count: 1; Actual caches count: {caches_count}."
                )
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 0:
                TestRun.fail(
                    f"Expected cores count: 0; Actual cores count: {cores_count}."
                )
        with TestRun.step("Add core."):
            core = cache.add_core(core.core_device)
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(
                    f"Expected caches count: 1; Actual caches count: {caches_count}."
                )
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.fail(
                    f"Expected cores count: 1; Actual cores count: {cores_count}."
                )

    with TestRun.step("Check md5 sum of test file."):
        core.mount(mount_point)
        file_md5sum_after = file.md5sum()
        if file_md5sum_after != file_md5sum_before:
            TestRun.LOGGER.error("Md5 sum of test file is different.")
        core.unmount()

    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
def test_stress_service(cache_mode):
    """
        title: Stress test for starting and stopping CAS service.
        description: |
          Validate the ability of CAS to restart CAS service
          and load CAS device in the loop.
        pass_criteria:
          - No system crash while restarting CAS service or loading cache.
          - CAS service restarts with no errors.
          - CAS device loads successfully.
    """
    with TestRun.step("Prepare cache and core."):
        cache_dev, core_dev = prepare()
    with TestRun.step("Start cache and add core."):
        cache = casadm.start_cache(cache_dev, cache_mode, force=True)
        casadm.add_core(cache, core_dev)

    for _ in TestRun.iteration(
            range(0, iterations_per_config),
            f"Stop and start CAS service {iterations_per_config} times."):
        with TestRun.step(
                "Create CAS init config based on current running CAS configuration."
        ):
            InitConfig.create_init_config_from_running_configuration()
        with TestRun.step("Stop CAS service."):
            casctl.stop()
        with TestRun.step("Check if service stopped successfully."):
            if len(casadm_parser.get_caches()) != 0:
                TestRun.fail(
                    "There are still running caches after stopping service.")
            if len(casadm_parser.get_cores(cache.cache_id)) != 0:
                TestRun.fail(
                    "There are still running cores after stopping service.")
        with TestRun.step("Start CAS service."):
            casctl.start()
            time.sleep(1)  # Time for CAS devices to start
        with TestRun.step("Check if CAS configuration loaded successfully."):
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(
                    f"Expected caches count: 1; Actual caches count: {caches_count}."
                )
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.fail(
                    f"Expected cores count: 1; Actual cores count: {cores_count}."
                )

    with TestRun.step("Stop caches and create default init config file."):
        casadm.stop_all_caches()
        InitConfig.create_default_init_config()
Esempio n. 19
0
def test_cache_mode_switching_during_io(cache_mode_1, cache_mode_2, flush, io_mode):
    """
        title: Test for dynamic cache mode switching during IO.
        description: |
          Validate the ability of CAS to switch cache modes
          during working IO on CAS device.
        pass_criteria:
          - Cache mode is switched without errors.
    """

    with TestRun.step("Partition cache and core devices"):
        cache_dev, core_dev = storage_prepare()

    with TestRun.step(f"Start cache in {cache_mode_1} mode"):
        cache = casadm.start_cache(cache_dev, cache_mode_1, force=True)

    with TestRun.step("Add core to the cache"):
        core = cache.add_core(core_dev)

    with TestRun.step("Run 'fio'"):
        fio = (
            fio_prepare(core, io_mode)
            .verify(VerifyMethod.sha1)
            .run_time(timedelta(minutes=4))
            .time_based()
        )
        fio_pid = fio.run_in_background()
        time.sleep(5)

    with TestRun.step(
        f"Change cache mode to {cache_mode_2} with flush cache option set to: {flush}"
    ):
        cache_mode_switch_output = cache.set_cache_mode(cache_mode_2, flush)
        if cache_mode_switch_output.exit_code != 0:
            TestRun.fail("Cache mode switch failed!")

    with TestRun.step(f"Check if cache mode has switched properly during IO"):
        cache_mode_after_switch = cache.get_cache_mode()
        if cache_mode_after_switch != cache_mode_2:
            TestRun.fail(
                f"Cache mode did not switch properly! "
                f"Cache mode after switch is: {cache_mode_after_switch}, "
                f"should be: {cache_mode_2}"
            )

    with TestRun.step("Stop 'fio'"):
        TestRun.executor.kill_process(fio_pid)

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()
def test_add_cached_core(cache_mode):
    """
        title: Fault injection test for adding already used core to a cache.
        description: |
          Negative test of the ability to add the core to the cache twice to the same cache
          and while the core is already used by the another cache instance.
        pass_criteria:
          - No system crash.
          - Adding already used core to another cache instance fails.
          - The same core device cannot be used twice in one cache instance.
    """
    with TestRun.step("Prepare two caches and one core device."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions(
            [Size(2, Unit.GibiByte),
             Size(2, Unit.GibiByte)])
        cache_part1 = cache_dev.partitions[0]
        cache_part2 = cache_dev.partitions[1]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(4, Unit.GibiByte)])
        core_part = core_dev.partitions[0]

    with TestRun.step("Start the first cache instance"):
        cache1 = casadm.start_cache(cache_part1, cache_mode, force=True)

    with TestRun.step("Add core device to first cache instance."):
        core = cache1.add_core(core_part)

    with TestRun.step("Start the second cache instance"):
        cache2 = casadm.start_cache(cache_part2, cache_mode, force=True)

    with TestRun.step(
            "Try adding the same core device to the second cache instance."):
        output = TestRun.executor.run_expect_fail(
            cli.add_core_cmd(cache_id=str(cache2.cache_id),
                             core_dev=str(core_part.system_path),
                             core_id=str(core.core_id)))
        cli_messages.check_msg(output, cli_messages.add_cached_core)

    with TestRun.step(
            "Try adding the same core device to the same cache for the second time."
    ):
        output = TestRun.executor.run_expect_fail(
            cli.add_core_cmd(cache_id=str(cache1.cache_id),
                             core_dev=str(core_part.system_path)))
        cli_messages.check_msg(output, cli_messages.add_cached_core)

    with TestRun.step("Stop caches."):
        casadm.stop_all_caches()
Esempio n. 21
0
def test_stress_reload_module(cache_mode):
    """
        title: Stress test for reloading CAS modules.
        description: Validate the ability of CAS to reload modules in the loop.
        pass_criteria:
          - No system crash while reloading CAS modules.
          - CAS modules reloads with no errors.
          - No data corruption.
    """
    with TestRun.step("Prepare cache and core. Create test file and count its checksum."):
        cache, core, file, file_md5sum_before = prepare_with_file_creation(cache_mode)

    with TestRun.step("Save current cache configuration."):
        cache_config = cache.get_cache_config()

    for _ in TestRun.iteration(range(0, iterations_per_config),
                               f"Reload CAS modules and check loaded "
                               f"cache configuration {iterations_per_config} times."):
        with TestRun.step("Stop cache."):
            cache.stop()
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 0:
                TestRun.fail(f"Expected caches count: 0; Actual caches count: {caches_count}.")
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 0:
                TestRun.fail(f"Expected cores count: 0; Actual cores count: {cores_count}.")
        with TestRun.step("Reload CAS modules."):
            cas_module.reload_all_cas_modules()
        with TestRun.step("Load cache."):
            cache = casadm.load_cache(cache.cache_device)
            caches_count = len(casadm_parser.get_caches())
            if caches_count != 1:
                TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.")
            cores_count = len(casadm_parser.get_cores(cache.cache_id))
            if cores_count != 1:
                TestRun.fail(f"Expected cores count: 1; Actual cores count: {cores_count}.")
        with TestRun.step("Validate cache configuration."):
            if cache.get_cache_config() != cache_config:
                TestRun.fail("Cache configuration is different than before reloading modules.")

    with TestRun.step("Check md5 sum of test file."):
        core.mount(mount_point)
        file_md5sum_after = file.md5sum()
        if file_md5sum_after != file_md5sum_before:
            TestRun.LOGGER.error("Md5 sum of test file is different.")
        core.unmount()

    with TestRun.step("Stop all caches."):
        casadm.stop_all_caches()
Esempio n. 22
0
def prepare_and_cleanup(request):
    """
    This fixture returns the dictionary, which contains DUT ip, IPMI, spider, list of disks.
    This fixture also returns the executor of commands
    """

    # There should be dut config file added to config package and
    # pytest should be executed with option --dut-config=conf_name'.
    #
    # 'ip' field should be filled with valid IP string to use remote ssh executor
    # or it should be commented out when user want to execute tests on local machine
    #
    # User can also have own test wrapper, which runs test prepare, cleanup, etc.
    # Then in the config/configuration.py file there should be added path to it:
    # test_wrapper_dir = 'wrapper_path'

    try:
        with open(request.config.getoption('--dut-config')) as cfg:
            dut_config = yaml.safe_load(cfg)
    except Exception:
        dut_config = {}

    if 'test_wrapper' in sys.modules:
        if 'ip' in dut_config:
            try:
                IP(dut_config['ip'])
            except ValueError:
                raise Exception(
                    "IP address from configuration file is in invalid format.")
        dut_config = test_wrapper.prepare(request.param, dut_config)

    TestRun.prepare(dut_config)

    TestRun.plugins['opencas'] = {'already_updated': False}

    TestRun.LOGGER.info(
        f"**********Test {request.node.name} started!**********")
    yield

    TestRun.LOGGER.info("Test cleanup")
    Udev.enable()
    unmount_cas_devices()
    casadm.stop_all_caches()
    if 'test_wrapper' in sys.modules:
        test_wrapper.cleanup()
def test_stats_values():
    """
        title: Check for proper statistics values.
        description: |
          Check if CAS displays proper usage, request, block and error statistics values
          for core devices in every cache mode - at the start, after IO and after cache
          reload. Also check if core's statistics match cache's statistics.
        pass_criteria:
          - Usage, request, block and error statistics have proper values.
          - Core's statistics match cache's statistics.
    """

    with TestRun.step("Partition cache and core devices"):
        cache_dev, core_dev = storage_prepare()
        Udev.disable()

    with TestRun.step(
        f"Start {caches_count} caches (one for every cache mode) "
        f"and add {cores_per_cache} cores per cache"
    ):
        caches, cores = cache_prepare(cache_dev, core_dev)

    with TestRun.step("Check initial statistics values for each core"):
        check_stats_initial(caches, cores)

    with TestRun.step("Run 'fio'"):
        fio = fio_prepare()
        for i in range(caches_count):
            for j in range(cores_per_cache):
                fio.add_job().target(cores[i][j].path)
        fio.run()
        sleep(3)

    with TestRun.step("Check statistics values after IO"):
        check_stats_after_io(caches, cores)

    with TestRun.step("Check if cache's statistics match core's statistics"):
        check_stats_sum(caches, cores)

    with TestRun.step("Stop and load caches back"):
        casadm.stop_all_caches()
        caches = cache_load(cache_dev)

    with TestRun.step("Check statistics values after reload"):
        check_stats_after_io(caches, cores, after_reload=True)
def test_load_corrupted():
    """
    title: Standby-load corrupted metadata
    description: |
      Try to load standby instance from corrupted metadata
    pass_criteria:
      - Kernel panic doesn't occur
    """
    with TestRun.step("Prepare devices for the cache and core."):
        cache_device = TestRun.disks["cache"]
        cache_device.create_partitions([Size(200, Unit.MebiByte)])
        cache_device = cache_device.partitions[0]
        core_device = TestRun.disks["core"]
        core_device.create_partitions([Size(500, Unit.MebiByte)])
        core_device = core_device.partitions[0]

    with TestRun.step("Prepare metadata dump"):
        cache_id = 1
        cls = CacheLineSize.LINE_32KiB
        md_dump = prepare_md_dump(cache_device, core_device, cls, cache_id)

    for offset in get_offsets_to_corrupt(md_dump.size, block_size):

        with TestRun.step(
                f"Corrupt {block_size} on the offset {offset*block_size}"):
            corrupted_md = prepare_corrupted_md(md_dump, offset, block_size)

        with TestRun.step(
                f"Copy corrupted metadata to the cache-to-be device"):
            Dd().input(corrupted_md.full_path).output(cache_device.path).run()
            sync()

        with TestRun.step("Try to load cache instance"):
            output = TestRun.executor.run(
                standby_load_cmd(cache_dev=cache_device.path))

        with TestRun.step("Per iteration cleanup"):
            if output.exit_code:
                casadm.stop_all_caches()
            corrupted_md.remove(force=True, ignore_errors=True)

    with TestRun.step("Test cleanup"):
        md_dump.remove()
Esempio n. 25
0
def test_stop_cache_with_mounted_partition(cache_mode):
    """
        title: Fault injection test for removing core and stopping cache with mounted core.
        description: |
          Negative test of the ability of CAS to remove core and stop cache while core
          is still mounted.
        pass_criteria:
          - No system crash.
          - Unable to stop cache when partition is mounted.
          - Unable to remove core when partition is mounted.
    """
    with TestRun.step("Prepare cache and core devices. Start CAS."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(1, Unit.GibiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(4, Unit.GibiByte)])
        core_part = core_dev.partitions[0]
        cache = casadm.start_cache(cache_part, cache_mode, force=True)

    with TestRun.step("Add core device with xfs filesystem and mount it."):
        core_part.create_filesystem(Filesystem.xfs)
        core = cache.add_core(core_part)
        core.mount(mount_point)

    with TestRun.step("Try to remove core from cache."):
        output = TestRun.executor.run_expect_fail(
            cli.remove_core_cmd(cache_id=str(cache.cache_id),
                                core_id=str(core.core_id)))
        cli_messages.check_stderr_msg(output, cli_messages.remove_mounted_core)

    with TestRun.step("Try to stop CAS."):
        output = TestRun.executor.run_expect_fail(
            cli.stop_cmd(cache_id=str(cache.cache_id)))
        cli_messages.check_stderr_msg(output,
                                      cli_messages.stop_cache_mounted_core)

    with TestRun.step("Unmount core device."):
        core.unmount()

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
def test_remove_multilevel_core():
    """
        title: Test of the ability to remove a core used in a multilevel cache.
        description: |
          Negative test if OpenCAS does not allow to remove a core when the related exported object
          is used as a core device for another cache instance.
        pass_criteria:
          - No system crash.
          - OpenCAS does not allow removing a core used in a multilevel cache instance.
    """
    with TestRun.step("Prepare two devices for cache and one for core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(512, Unit.MebiByte)] * 2)
        cache_part1 = cache_dev.partitions[0]
        cache_part2 = cache_dev.partitions[1]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(1, Unit.GibiByte)])
        core_dev = core_dev.partitions[0]

    with TestRun.step("Start the first cache instance"):
        cache1 = casadm.start_cache(cache_part1, force=True)

    with TestRun.step("Add a core device to the first cache instance."):
        core1 = cache1.add_core(core_dev)

    with TestRun.step("Start the second cache instance"):
        cache2 = casadm.start_cache(cache_part2, force=True)

    with TestRun.step("Add the first cache's exported object as a core "
                      "to the second cache instance."):
        cache2.add_core(core1)

    with TestRun.step("Try to remove core from the first level cache."):
        output = TestRun.executor.run_expect_fail(
            cli.remove_core_cmd(cache_id=str(cache1.cache_id),
                                core_id=str(core1.core_id),
                                force=True))
        cli_messages.check_stderr_msg(output,
                                      cli_messages.remove_multilevel_core)

    with TestRun.step("Stop cache."):
        casadm.stop_all_caches()
Esempio n. 27
0
def base_prepare():
    LOGGER.info("Base test prepare")
    LOGGER.info(f"DUT info: {TestProperties.dut}")

    if installer.check_if_installed():
        try:
            unmount_cas_devices()
            casadm.stop_all_caches()
        except Exception:
            pass  # TODO: Reboot DUT if test is executed remotely
    for disk in TestProperties.dut.disks:
        if disk.is_mounted():
            disk.unmount()
        disk.remove_partitions()

    if get_force_param() is not "False" and not hasattr(c, "already_updated"):
        installer.reinstall_opencas()
    elif not installer.check_if_installed():
        installer.install_opencas()
    c.already_updated = True  # to skip reinstall every test
def test_cas_init_with_changed_mode(cache_mode_pair):
    """
    title: Check starting cache in other cache mode by initializing OpenCAS service from config.
    description: |
      Start cache, create config based on running configuration but with another cache mode,
      reinitialize OpenCAS service with '--force' option and check if cache defined
      in config file starts properly.
      Check all cache modes.
    pass_criteria:
      - Cache starts with attached core
      - Cache starts in mode saved in configuration file.
    """
    with TestRun.step("Prepare partitions for cache and core."):
        cache_dev = TestRun.disks['cache']
        cache_dev.create_partitions([Size(200, Unit.MebiByte)])
        cache_part = cache_dev.partitions[0]
        core_dev = TestRun.disks['core']
        core_dev.create_partitions([Size(400, Unit.MebiByte)])
        core_part = core_dev.partitions[0]

    with TestRun.step(
            f"Start cache in the {cache_mode_pair[0]} mode and add core."):
        cache = casadm.start_cache(cache_part, cache_mode_pair[0], force=True)
        core = cache.add_core(core_part)

    with TestRun.step(
            f"Create the configuration file with a different cache mode ({cache_mode_pair[1]})"
    ):
        init_conf = InitConfig()
        init_conf.add_cache(cache.cache_id, cache.cache_device,
                            cache_mode_pair[1])
        init_conf.add_core(cache.cache_id, core.core_id, core.core_device)
        init_conf.save_config_file()

    with TestRun.step("Reinitialize OpenCAS service with '--force' option."):
        casadm.stop_all_caches()
        casctl.init(True)

    with TestRun.step(
            "Check if cache started in correct mode with core attached."):
        validate_cache(cache_mode_pair[1])
Esempio n. 29
0
def base_prepare():
    TestRun.LOGGER.info("Base test prepare")
    TestRun.LOGGER.info(f"DUT info: {TestRun.dut}")

    Udev.enable()

    kill_all_io()

    if installer.check_if_installed():
        try:
            unmount_cas_devices()
            casadm.stop_all_caches()
        except Exception:
            pass  # TODO: Reboot DUT if test is executed remotely

    if get_force_param(
    ) is not "False" and not TestRun.plugins['opencas']['already_updated']:
        installer.reinstall_opencas()
    elif not installer.check_if_installed():
        installer.install_opencas()
    TestRun.plugins['opencas']['already_updated'] = True
Esempio n. 30
0
def test_another_core_with_same_id():
    """
        title: Test for creating another core device with the same ID.
        description: |
          Checking if adding another core device and setting
          the same core ID as the previous one fails.
        pass_criteria:
          - No additional core device added.
    """
    with TestRun.step("Start cache device"):
        cache_dev = TestRun.disks["cache"]
        cache_dev.create_partitions([Size(2, Unit.GibiByte)])
        cache = casadm.start_cache(cache_dev.partitions[0], force=True)

    with TestRun.step("Add core with ID = 1"):
        core_dev_1 = TestRun.disks["core_1"]
        core_dev_1.create_partitions([Size(1, Unit.GibiByte)])
        TestRun.executor.run_expect_success(
            cli.add_core_cmd(
                cache_id=f"{cache.cache_id}",
                core_dev=f"{core_dev_1.partitions[0].path}",
                core_id="1",
            )
        )

    with TestRun.step("Try to add another core with the same ID = 1"):
        core_dev_2 = TestRun.disks["core_2"]
        core_dev_2.create_partitions([Size(1, Unit.GibiByte)])
        TestRun.executor.run_expect_fail(
            cli.add_core_cmd(
                cache_id=f"{cache.cache_id}",
                core_dev=f"{core_dev_2.partitions[0].path}",
                core_id="1",
            )
        )

    with TestRun.step("Stop all caches"):
        casadm.stop_all_caches()