def post_setup(self): print("VDBench plugin post setup") if not self.reinstall and fs_utils.check_if_directory_exists(self.working_dir): return if fs_utils.check_if_directory_exists(self.working_dir): fs_utils.remove(self.working_dir, True, True) fs_utils.create_directory(self.working_dir) TestRun.LOGGER.info("Copying vdbench to working dir.") fs_utils.copy(os.path.join(self.source_dir, "*"), self.working_dir, True, True) pass
def mount_ramfs(): """Mounts ramfs to enable allocating memory space""" if not check_if_directory_exists(MEMORY_MOUNT_POINT): create_directory(MEMORY_MOUNT_POINT) if not is_mounted(MEMORY_MOUNT_POINT): TestRun.executor.run_expect_success( f"mount -t ramfs ramfs {MEMORY_MOUNT_POINT}")
def create_temp_directory(parent_dir_path: str = "/tmp"): command = f"mktemp --directory --tmpdir={parent_dir_path}" output = TestRun.executor.run_expect_success(command) if not check_if_directory_exists(output.stdout): TestRun.LOGGER.exception( "'mktemp' succeeded, but created directory does not exist") return Directory(output.stdout)
def move(self, destination, force: bool = False): target_dir_exists = fs_utils.check_if_directory_exists(destination) fs_utils.move(str(self), destination, force) if target_dir_exists: self.full_path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}" else: self.full_path = destination self.refresh_item() return self
def mount(device, mount_point): if not fs_utils.check_if_directory_exists(mount_point): fs_utils.create_directory(mount_point, True) TestRun.LOGGER.info( f"Mounting device {device.system_path} to {mount_point}.") cmd = f"mount {device.system_path} {mount_point}" output = TestRun.executor.run(cmd) if output.exit_code != 0: raise Exception( f"Failed to mount {device.system_path} to {mount_point}") device.mount_point = mount_point
def copy(self, destination, force: bool = False, recursive: bool = False, dereference: bool = False): fs_utils.copy(str(self), destination, force, recursive, dereference) if fs_utils.check_if_directory_exists(destination): path = f"{destination}{'/' if destination[-1] != '/' else ''}{self.name}" else: path = destination output = fs_utils.ls_item(f"{path}") return fs_utils.parse_ls_output(output)[0]
def mount(device, mount_point, options: [str] = None): if not fs_utils.check_if_directory_exists(mount_point): fs_utils.create_directory(mount_point, True) TestRun.LOGGER.info(f"Mounting device {device.path} ({device.get_device_id()}) " f"to {mount_point}.") cmd = f"mount {device.path} {mount_point}" if options: cmd = f"{cmd} -o {','.join(options)}" output = TestRun.executor.run(cmd) if output.exit_code != 0: raise Exception(f"Failed to mount {device.path} to {mount_point}") device.mount_point = mount_point
def check_for_mem_leaks(cls, module): if not cls.is_installed(): raise Exception("Kedr is not installed!") if not cls.is_loaded(): raise Exception("Kedr is not loaded!") if fs_utils.check_if_directory_exists(f"{LEAKS_LOGS_PATH}/{module}"): logs_path = f"{LEAKS_LOGS_PATH}/{module}" elif fs_utils.check_if_directory_exists(f"{DEBUGFS_MOUNT_POINT}"): logs_path = f"{LEAKS_LOGS_PATH}" else: raise Exception("Couldn't find kedr logs dir!") leaks = fs_utils.read_file(f"{logs_path}/possible_leaks") frees = fs_utils.read_file(f"{logs_path}/unallocated_frees") summary = fs_utils.read_file(f"{logs_path}/info") if leaks or frees: raise Exception("Memory leaks found!\n" f"Kedr summary: {summary}\n" f"Possible memory leaks: {leaks}\n" f"Unallocated frees: {frees}\n")
def mount(device, mount_point): if not fs_utils.check_if_directory_exists(mount_point): fs_utils.create_directory(mount_point, True) TestProperties.LOGGER.info( f"Mounting device {device.system_path} to {mount_point}.") cmd = f"mount {device.system_path} {mount_point}" output = TestProperties.executor.execute(cmd) if output.exit_code != 0: TestProperties.LOGGER.error( f"Failed to mount {device.system_path} to {mount_point}") return False device.mount_point = mount_point return True
def set_trace_repository_path(trace_path: str, shortcut: bool = False): """ :param trace_path: trace path :param shortcut: Use shorter command :type trace_path: str :type shortcut: bool :raises Exception: if setting path fails """ if not check_if_directory_exists(trace_path): create_directory(trace_path) command = 'iotrace' + (' -C' if shortcut else ' --trace-config') command += ' -S ' if shortcut else ' --set-trace-repository-path ' command += (' -p ' if shortcut else ' --path ') + f'{trace_path}' output = TestRun.executor.run(command) if output.exit_code == 0: return error_output = parse_json(output.stderr)[0]["trace"] if error_output == "No access to trace directory": raise CmdException("Invalid setting of the trace repository path", output)
def get_sys_block_path(): sys_block = "/sys/class/block" if not check_if_directory_exists(sys_block): sys_block = "/sys/block" return sys_block
def test_data_integrity_5d_dss(filesystems): """ title: | Data integrity test on three cas instances with different file systems with duration time equal to 5 days description: | Create 3 cache instances on caches equal to 50GB and cores equal to 150GB with different file systems, and run workload with data verification. pass_criteria: - System does not crash. - All operations complete successfully. - Data consistency is being preserved. """ with TestRun.step("Prepare cache and core devices"): cache_devices, core_devices = prepare_devices() with TestRun.step( "Run 4 cache instances in different cache modes, add single core to each" ): cache_modes = [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WO] caches = [] cores = [] for i in range(4): cache, core = start_instance(cache_devices[i], core_devices[i], cache_modes[i]) caches.append(cache) cores.append(core) with TestRun.step("Load default io class config for each cache"): for cache in caches: cache.load_io_class("/etc/opencas/ioclass-config.csv") with TestRun.step("Create filesystems and mount cores"): for i, core in enumerate(cores): mount_point = core.path.replace('/dev/', '/mnt/') if not fs_utils.check_if_directory_exists(mount_point): fs_utils.create_directory(mount_point) TestRun.LOGGER.info( f"Create filesystem {filesystems[i].name} on {core.path}") core.create_filesystem(filesystems[i]) TestRun.LOGGER.info( f"Mount filesystem {filesystems[i].name} on {core.path} to " f"{mount_point}") core.mount(mount_point) sync() with TestRun.step("Run test workloads on filesystems with verification"): fio_run = Fio().create_command() fio_run.io_engine(IoEngine.libaio) fio_run.direct() fio_run.time_based() fio_run.nr_files(4096) fio_run.file_size_range([(file_min_size, file_max_size)]) fio_run.do_verify() fio_run.verify(VerifyMethod.md5) fio_run.verify_dump() fio_run.run_time(runtime) fio_run.read_write(ReadWrite.randrw) fio_run.io_depth(128) fio_run.blocksize_range([(start_size, stop_size)]) for core in cores: fio_job = fio_run.add_job() fio_job.directory(core.mount_point) fio_job.size(core.size) fio_run.run() with TestRun.step("Unmount cores"): for core in cores: core.unmount() with TestRun.step("Calculate md5 for each core"): core_md5s = [File(core.full_path).md5sum() for core in cores] with TestRun.step("Stop caches"): for cache in caches: cache.stop() with TestRun.step("Calculate md5 for each core"): dev_md5s = [File(dev.full_path).md5sum() for dev in core_devices] with TestRun.step("Compare md5 sums for cores and core devices"): for core_md5, dev_md5, mode, fs in zip(core_md5s, dev_md5s, cache_modes, filesystems): if core_md5 != dev_md5: TestRun.fail(f"MD5 sums of core and core device do not match! " f"Cache mode: {mode} Filesystem: {fs}")