def test_raid_example(): """ title: Example test using RAID API. description: Create and discover RAID volumes. pass_criteria: - RAID created. - RAID discovered. """ with TestRun.step("Prepare"): test_disk_1 = TestRun.disks['cache1'] test_disk_2 = TestRun.disks['cache2'] with TestRun.step("Create RAID"): config = RaidConfiguration(level=Level.Raid1, metadata=MetadataVariant.Imsm, number_of_devices=2, size=Size(20, Unit.GiB)) raid = Raid.create(config, [test_disk_1, test_disk_2]) with TestRun.group("Discover RAIDs"): raids = Raid.discover() with TestRun.group("Check if created RAID was discovered"): if raid not in raids: TestRun.LOGGER.error("Created RAID not discovered in system!")
def test_many_cores_raid_as_cache(cache_mode): """ title: Test if CAS is working with many core devices using RAID0 as cache device. description: | Test if CAS is working properly with many core devices using RAID0 as cache device and verification of data integrity of files copied to cores. pass_criteria: - No system crash. - Successful creation of RAID0 and using it as cache for CAS device - Successful addition of first and second core to CAS device - Successful creation and copy files to each core and verification of theirs md5sum. """ with TestRun.step("Create cache with RAID0 as caching device."): raid_disk = TestRun.disks['cache1'] raid_disk.create_partitions([Size(2, Unit.GibiByte)]) raid_disk_1 = raid_disk.partitions[0] raid_disk2 = TestRun.disks['cache2'] raid_disk2.create_partitions([Size(2, Unit.GibiByte)]) raid_disk_2 = raid_disk2.partitions[0] config = RaidConfiguration( level=Level.Raid0, metadata=MetadataVariant.Legacy, number_of_devices=2, size=Size(1, Unit.GiB)) raid_volume = Raid.create(config, [raid_disk_1, raid_disk_2]) TestRun.LOGGER.info(f"RAID created successfully.") cache = casadm.start_cache(raid_volume, cache_mode, force=True) with TestRun.step("Add core device to cache, create filesystem and mount it."): core_disk1 = TestRun.disks['core1'] core_disk1.create_partitions([Size(2, Unit.GibiByte)]) core_dev1 = core_disk1.partitions[0] core1 = cache.add_core(core_dev1) core1.create_filesystem(Filesystem.ext3) core1.mount(mount_point) with TestRun.step("Add second core device to cache, create filesystem and mount it."): core_disk2 = TestRun.disks['core2'] core_disk2.create_partitions([Size(2, Unit.GibiByte)]) core_dev2 = core_disk2.partitions[0] core2 = cache.add_core(core_dev2) core2.create_filesystem(Filesystem.ext3) core2.mount(mount_point2) with TestRun.step("Create files with checksum on first core."): core1_md5sums = create_files_with_md5sums(mount_point, number_of_files) with TestRun.step("Create files with checksum on second core."): core2_md5sums = create_files_with_md5sums(mount_point2, number_of_files) with TestRun.step("Compare checksum on first core."): compare_md5sums(core1_md5sums, mount_point) with TestRun.step("Compare checksum on second core."): compare_md5sums(core2_md5sums, mount_point2)
def base_prepare(item): with TestRun.LOGGER.step("Cleanup before test"): TestRun.executor.run("pkill --signal=SIGKILL fsck") Udev.enable() kill_all_io() DeviceMapper.remove_all() if installer.check_if_installed(): try: from api.cas.init_config import InitConfig InitConfig.create_default_init_config() unmount_cas_devices() casadm.stop_all_caches() casadm.remove_all_detached_cores() except Exception: pass # TODO: Reboot DUT if test is executed remotely raids = Raid.discover() for raid in raids: # stop only those RAIDs, which are comprised of test disks if all(map(lambda device: any(map(lambda disk_path: disk_path in device.get_device_id(), [bd.get_device_id() for bd in TestRun.dut.disks])), raid.array_devices)): raid.umount_all_partitions() raid.remove_partitions() raid.stop() for device in raid.array_devices: Mdadm.zero_superblock(os.path.join('/dev', device.get_device_id())) Udev.settle() for disk in TestRun.dut.disks: disk_serial = get_disk_serial_number(disk.path) if disk.serial_number != disk_serial: raise Exception( f"Serial for {disk.path} doesn't match the one from the config." f"Serial from config {disk.serial_number}, actual serial {disk_serial}" ) disk.umount_all_partitions() Mdadm.zero_superblock(os.path.join('/dev', disk.get_device_id())) TestRun.executor.run_expect_success("udevadm settle") disk.remove_partitions() create_partition_table(disk, PartitionTable.gpt) if get_force_param(item) and not TestRun.usr.already_updated: installer.rsync_opencas_sources() installer.reinstall_opencas() elif not installer.check_if_installed(): installer.rsync_opencas_sources() installer.set_up_opencas() TestRun.usr.already_updated = True TestRun.LOGGER.add_build_info(f'Commit hash:') TestRun.LOGGER.add_build_info(f"{git.get_current_commit_hash()}") TestRun.LOGGER.add_build_info(f'Commit message:') TestRun.LOGGER.add_build_info(f'{git.get_current_commit_message()}')
def test_raid_as_cache(cache_mode): """ title: Test if SW RAID1 can be a cache device. description: | Test if SW RAID1 can be a cache for CAS device. pass_criteria: - Successful creation of RAID and building CAS device with it. - Files copied successfully, the md5sum match the origin one. """ with TestRun.step("Create RAID1."): raid_disk = TestRun.disks['cache1'] raid_disk.create_partitions([Size(2, Unit.GibiByte)]) raid_disk_1 = raid_disk.partitions[0] raid_disk2 = TestRun.disks['cache2'] raid_disk2.create_partitions([Size(2, Unit.GibiByte)]) raid_disk_2 = raid_disk2.partitions[0] config = RaidConfiguration( level=Level.Raid1, metadata=MetadataVariant.Legacy, number_of_devices=2) raid_volume = Raid.create(config, [raid_disk_1, raid_disk_2]) TestRun.LOGGER.info(f"RAID created successfully.") with TestRun.step("Prepare core device."): core_disk = TestRun.disks['core'] core_disk.create_partitions([Size(2, Unit.GibiByte)]) core_dev = core_disk.partitions[0] with TestRun.step("Create CAS device with RAID1 as cache."): cache = casadm.start_cache(raid_volume, cache_mode, force=True) core = cache.add_core(core_dev) core.create_filesystem(Filesystem.ext3) core.mount(mount_point) with TestRun.step("Copy files to cache and check md5sum."): for i in range(0, files_number): test_file = fs_utils.create_random_test_file(test_file_tmp_path, test_file_size) test_file_copied = test_file.copy(test_file_path, force=True) if test_file.md5sum() != test_file_copied.md5sum(): TestRun.LOGGER.error("Checksums are different.") fs_utils.remove(test_file.full_path, True) fs_utils.remove(test_file_copied.full_path, True) TestRun.LOGGER.info(f"Successful verification.")
def test_udev_raid_core(): """ title: CAS udev rule execution for core after recreating RAID device existing in configuration file as core. description: | Verify if CAS udev rule is executed for RAID volume recreated after soft reboot. pass_criteria: - No kernel error - After reboot, the RAID volume is added to the cache instance and is in 'active' state """ with TestRun.step("Test prepare."): cache_disk = TestRun.disks["cache"] cache_disk.create_partitions([Size(1, Unit.GibiByte)]) cache_dev = cache_disk.partitions[0] core_disk = TestRun.disks["core"] core_disk.create_partitions([Size(2, Unit.GibiByte)]) core_disk = core_disk.partitions[0] core_disk2 = TestRun.disks["core2"] core_disk2.create_partitions([Size(2, Unit.GibiByte)]) core_disk2 = core_disk2.partitions[0] with TestRun.step("Create RAID0 volume."): config = RaidConfiguration(level=Level.Raid0, metadata=MetadataVariant.Legacy, number_of_devices=2) core_dev = Raid.create(config, [core_disk, core_disk2]) with TestRun.step("Start cache and add core."): cache = casadm.start_cache(cache_dev, force=True) core = cache.add_core(core_dev) with TestRun.step("Create init config from running CAS configuration."): InitConfig.create_init_config_from_running_configuration() with TestRun.step("Reboot system."): TestRun.executor.reboot() with TestRun.step( "Check if core device is active and not in the core pool."): check_if_dev_in_core_pool(core_dev, False) if core.get_status() != CoreStatus.active: TestRun.fail( f"Core status is {core.get_status()} instead of active.")