def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Test if eviction works correctly when remapping cachelines between distinct cores.""" cache_device = Volume(Size.from_MiB(20)) core_device1 = Volume(Size.from_MiB(40)) core_device2 = Volume(Size.from_MiB(40)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) cache_size = cache.get_stats()["conf"]["size"] core_exported1 = Core.using_device(core_device1, name="core1") core_exported2 = Core.using_device(core_device2, name="core2") cache.add_core(core_exported1) cache.add_core(core_exported2) valid_io_size = Size.from_B(cache_size.B) test_data = Data(valid_io_size) send_io(core_exported1, test_data) send_io(core_exported2, test_data) stats1 = core_exported1.get_stats() stats2 = core_exported2.get_stats() # IO to the second core should evict all the data from the first core assert stats1["usage"]["occupancy"]["value"] == 0 assert stats2["usage"]["occupancy"]["value"] == valid_io_size.blocks_4k
def test_neg_cache_set_seq_cut_off_threshold(pyocf_ctx, cm, cls): """ Test whether it is possible to change cache seq cut-off threshold to invalid value :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with :return: """ # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls) # Create 2 core devices core_device1 = Volume(S.from_MiB(10)) core1 = Core.using_device(core_device1, name="core1") core_device2 = Volume(S.from_MiB(10)) core2 = Core.using_device(core_device2, name="core2") # Add cores cache.add_core(core1) cache.add_core(core2) # Change cache seq cut off policy to invalid one and check if failed for i in RandomGenerator(DefaultRanges.UINT32): if i in ConfValidValues.seq_cutoff_threshold_rage: continue with pytest.raises( OcfError, match="Error setting cache seq cut off policy threshold"): cache.set_seq_cut_off_threshold(i) print(f"\n{i}")
def test_try_add_core_with_changed_size(pyocf_ctx, cache_mode, cls): """ Test changing volume size before load :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with """ # Start cache device cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls) # Add core to cache core_device = RamVolume(S.from_MiB(10)) core = Core.using_device(core_device) cache.add_core(core) # Stop cache cache.stop() # Change core device size core_device.resize(S.from_MiB(12)) # Load cache with changed core size cache = Cache.load_from_device(cache_device, open_cores=False) core = Core(device=core_device) with pytest.raises(OcfError, match="OCF_ERR_CORE_SIZE_MISMATCH"): cache.add_core(core, try_add=True)
def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls): # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device( cache_device, cache_mode=cm, cache_line_size=cls ) # Create 2 core devices core_device1 = Volume(S.from_MiB(10)) core1 = Core.using_device(core_device1, name="core1") core_device2 = Volume(S.from_MiB(10)) core2 = Core.using_device(core_device2, name="core2") # Add cores cache.add_core(core1) cache.add_core(core2) # Check all possible seq cut off policy switches for seq_from in SeqCutOffPolicy: for seq_to in SeqCutOffPolicy: cache.set_seq_cut_off_policy(seq_from.value) # Check if seq cut off policy is correct stats = core1.get_stats() assert stats["seq_cutoff_policy"] == seq_from.value stats = core2.get_stats() assert stats["seq_cutoff_policy"] == seq_from.value cache.set_seq_cut_off_policy(seq_to.value) # Check if seq cut off policy is correct stats = core1.get_stats() assert stats["seq_cutoff_policy"] == seq_to.value stats = core2.get_stats() assert stats["seq_cutoff_policy"] == seq_to.value
def test_surprise_shutdown_swap_core(pyocf_ctx): core_device_1 = RamVolume(S.from_MiB(10), uuid="dev1") core_device_2 = RamVolume(S.from_MiB(10), uuid="dev2") core1 = Core.using_device(core_device_1, name="core1") core2 = Core.using_device(core_device_2, name="core2") def prepare(cache): cache.add_core(core1) cache.save() cache.remove_core(core1) cache.save() def tested_func(cache): cache.add_core(core2) def check_func(cache, error_triggered): stats = cache.get_stats() assert stats["conf"]["core_count"] == (0 if error_triggered else 1) with pytest.raises(OcfError): core1 = cache.get_core_by_name("core1") if error_triggered: with pytest.raises(OcfError): core2 = cache.get_core_by_name("core2") else: core2 = cache.get_core_by_name("core2") assert core2.device.uuid == "dev2" mngmt_op_surprise_shutdown_test(pyocf_ctx, tested_func, prepare, check_func)
def test_neg_cache_set_seq_cut_off_policy(pyocf_ctx, cm, cls): """ Test whether it is possible to change cache seq cut-off policy to invalid value :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with :return: """ # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device( cache_device, cache_mode=cm, cache_line_size=cls ) # Create 2 core devices core_device1 = Volume(S.from_MiB(10)) core1 = Core.using_device(core_device1) core_device2 = Volume(S.from_MiB(10)) core2 = Core.using_device(core_device2) # Add cores cache.add_core(core1) cache.add_core(core2) # Change cache seq cut off policy to invalid one and check if failed for i in generate_random_numbers(c_uint32): if i in [item.value for item in SeqCutOffPolicy]: continue with pytest.raises(OcfError, match="Error setting cache seq cut off policy"): cache.set_seq_cut_off_policy(i)
def test_surprise_shutdown_stop_cache(pyocf_ctx): core_device = RamVolume(S.from_MiB(10)) error_triggered = True error_io_seq_no = 0 io_offset = mngmt_op_surprise_shutdown_test_io_offset while error_triggered: # Start cache device without error injection error_io = {IoDir.WRITE: error_io_seq_no} device = ErrorDevice( mngmt_op_surprise_shutdown_test_cache_size, error_seq_no=error_io, armed=False ) # setup cache and insert some data cache = Cache.start_on_device(device, cache_mode=CacheMode.WB) core = Core(device=core_device) cache.add_core(core) vol = CoreVolume(core, open=True) ocf_write(vol, cache.get_default_queue(), 0xAA, io_offset) # start error injection device.arm() try: cache.stop() status = OcfErrorCode.OCF_OK except OcfError as ex: status = ex.error_code # if error was injected we expect mngmt op error error_triggered = device.error_triggered() if error_triggered: assert status == OcfErrorCode.OCF_ERR_WRITE_CACHE else: assert status == 0 if not error_triggered: break # disable error injection and load the cache device.disarm() cache = None assert core_device.get_bytes()[io_offset] == VOLUME_POISON cache = Cache.load_from_device(device, open_cores=False) stats = cache.get_stats() if stats["conf"]["core_count"] == 1: assert stats["usage"]["occupancy"]["value"] == 1 core = Core(device=core_device) cache.add_core(core, try_add=True) vol = CoreVolume(core, open=True) assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA cache.stop() # advance error injection point error_io_seq_no += 1
def check_md5_sums(core: Core, mode: CacheMode): if mode.lazy_write(): assert core.device.md5() != core.get_front_volume().md5(), \ "MD5 check: core device vs exported object without flush" core.cache.flush() assert core.device.md5() == core.get_front_volume().md5(), \ "MD5 check: core device vs exported object after flush" else: assert core.device.md5() == core.get_front_volume().md5(), \ "MD5 check: core device vs exported object"
def check_md5_sums(exported_obj: Core, mode: CacheMode): if mode.lazy_write(): assert exported_obj.device.md5() != exported_obj.exp_obj_md5(), \ "MD5 check: core device vs exported object without flush" exported_obj.cache.flush() assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \ "MD5 check: core device vs exported object after flush" else: assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \ "MD5 check: core device vs exported object"
def test_adding_cores(pyocf_ctx): cache_device = Volume(S.from_MiB(200)) core1_device = Volume(S.from_MiB(400)) core2_device = Volume(S.from_MiB(400)) cache = Cache.start_on_device(cache_device) core1 = Core.using_device(core1_device) core2 = Core.using_device(core2_device) cache.add_core(core1) cache.add_core(core2)
def io_to_core(exported_obj: Core, data: Data, offset: int, to_core_device=False): io = exported_obj.new_core_io() if to_core_device else exported_obj.new_io() io.set_data(data) io.configure(offset, data.size, IoDir.WRITE, 0, 0) io.set_queue(exported_obj.cache.get_default_queue()) completion = OcfCompletion([("err", c_int)]) io.callback = completion.callback io.submit() completion.wait() assert completion.results["err"] == 0, "IO to exported object completion"
def test_simple_wt_write(pyocf_ctx): cache_device = Volume(S.from_MiB(30)) core_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) cache.add_core(core) cache_device.reset_stats() core_device.reset_stats() write_data = Data.from_string("This is test data") io = core.new_io(cache.get_default_queue(), S.from_sector(1).B, write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() assert cmpl.results["err"] == 0 assert cache_device.get_stats()[IoDir.WRITE] == 1 stats = cache.get_stats() assert stats["req"]["wr_full_misses"]["value"] == 1 assert stats["usage"]["occupancy"]["value"] == 1 assert core.exp_obj_md5() == core_device.md5() cache.stop()
def test_adding_to_random_cache(pyocf_ctx): cache_devices = [] core_devices = {} cache_amount = 5 core_amount = 30 # Create 5 cache devices for i in range(0, cache_amount): cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device) cache_devices.append(cache) # Create 50 core devices and add to random cache for i in range(0, core_amount): core_device = Volume(S.from_MiB(10)) core = Core.using_device(core_device) core_devices[core] = randint(0, cache_amount - 1) cache_devices[core_devices[core]].add_core(core) # Count expected number of cores per cache count_dict = {} for i in range(0, cache_amount): count_dict[i] = sum(k == i for k in core_devices.values()) # Check if cache statistics are as expected for i in range(0, cache_amount): stats = cache_devices[i].get_stats() assert stats["conf"]["core_count"] == count_dict[i]
def test_neg_core_set_seq_cut_off_policy(pyocf_ctx, cm, cls): """ Test whether it is possible to change core seq cut-off policy to invalid value :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with :return: """ # Start cache device cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls) # Create core device core_device = RamVolume(S.from_MiB(10)) core = Core.using_device(core_device) # Add core cache.add_core(core) # Change core seq cut off policy to invalid one and check if failed for i in RandomGenerator(DefaultRanges.UINT32): if i in [item.value for item in SeqCutOffPolicy]: continue with pytest.raises(OcfError, match="Error setting core seq cut off policy"): core.set_seq_cut_off_policy(i) print(f"\n{i}")
def test_adding_core_already_used(pyocf_ctx, cache_mode, cls): # Start first cache device cache_device1 = Volume(S.from_MiB(30)) cache1 = Cache.start_on_device(cache_device1, cache_mode=cache_mode, cache_line_size=cls) # Start second cache device cache_device2 = Volume(S.from_MiB(30)) cache2 = Cache.start_on_device(cache_device2, cache_mode=cache_mode, cache_line_size=cls) # Create core device core_device = Volume(S.from_MiB(10)) core = Core.using_device(core_device) # Add core to first cache cache1.add_core(core) # Check that it is not possible to add core to second cache with pytest.raises(OcfError): cache2.add_core(core) # Check that core count is as expected stats = cache1.get_stats() assert stats["conf"]["core_count"] == 1 stats = cache2.get_stats() assert stats["conf"]["core_count"] == 0
def test_10add_remove_with_io(pyocf_ctx): # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device) # Create core device core_device = Volume(S.from_MiB(10)) core = Core.using_device(core_device) # Add and remove core 10 times in a loop with io in between for i in range(0, 10): cache.add_core(core) stats = cache.get_stats() assert stats["conf"]["core_count"] == 1 write_data = Data.from_string("Test data") io = core.new_io() io.set_data(write_data) io.configure(20, write_data.size, IoDir.WRITE, 0, 0) io.set_queue(cache.get_default_queue()) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() cache.remove_core(core) stats = cache.get_stats() assert stats["conf"]["core_count"] == 0
def test_surprise_shutdown_set_promotion_policy_param(pyocf_ctx): core_device = RamVolume(S.from_MiB(10)) core = Core(device=core_device) for pp in PromotionPolicy: if pp == PromotionPolicy.ALWAYS: continue if pp == PromotionPolicy.NHIT: params = NhitParams else: # add handler for new policy here assert False for p in params: def prepare(cache): cache.add_core(core) cache.set_promotion_policy(pp) cache.save() def test(cache): val = None if pp == PromotionPolicy.NHIT: if p == NhitParams.INSERTION_THRESHOLD: val = 500 elif p == NhitParams.TRIGGER_THRESHOLD: val = 50 else: # add handler for new param here assert False cache.set_promotion_policy_param(pp, p, val) cache.save() mngmt_op_surprise_shutdown_test(pyocf_ctx, test, prepare, None)
def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool): """Stopping cache. Check if cache is stopped properly in different modes with or without preceding flush operation. """ cache_device = Volume(Size.from_MiB(20)) core_device = Volume(Size.from_MiB(5)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) core_exported = Core.using_device(core_device) cache.add_core(core_exported) cls_no = 10 run_io_and_cache_data_if_possible(core_exported, mode, cls, cls_no) stats = cache.get_stats() assert int(stats["conf"]["dirty"]) == (cls_no if mode.lazy_write() else 0),\ "Dirty data before MD5" md5_exported_core = core_exported.exp_obj_md5() if with_flush: cache.flush() cache.stop() if mode.lazy_write() and not with_flush: assert core_device.md5() != md5_exported_core, \ "MD5 check: core device vs exported object with dirty data" else: assert core_device.md5() == md5_exported_core, \ "MD5 check: core device vs exported object with clean data"
def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Test starting cache in different modes with different cache line sizes. After start check proper cache mode behaviour, starting with write operation. """ cache_device = Volume(Size.from_MiB(40)) core_device = Volume(Size.from_MiB(10)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) core_exported = Core.using_device(core_device) cache.add_core(core_exported) logger.info("[STAGE] Initial write to exported object") cache_device.reset_stats() core_device.reset_stats() test_data = Data.from_string("This is test data") io_to_core(core_exported, test_data, Size.from_sector(1).B) check_stats_write_empty(core_exported, mode, cls) logger.info("[STAGE] Read from exported object after initial write") io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B) check_stats_read_after_write(core_exported, mode, cls, True) logger.info("[STAGE] Write to exported object after read") cache_device.reset_stats() core_device.reset_stats() test_data = Data.from_string("Changed test data") io_to_core(core_exported, test_data, Size.from_sector(1).B) check_stats_write_after_read(core_exported, mode, cls) check_md5_sums(core_exported, mode)
def test_add_remove_30core(pyocf_ctx): # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device) core_devices = [] core_amount = 30 # Add 50 cores and check stats after each addition for i in range(0, core_amount): stats = cache.get_stats() assert stats["conf"]["core_count"] == i core_device = Volume(S.from_MiB(10)) core = Core.using_device(core_device) core_devices.append(core) cache.add_core(core) # Remove 50 cores and check stats before each removal for i in range(0, core_amount): stats = cache.get_stats() assert stats["conf"]["core_count"] == core_amount - i cache.remove_core(core_devices[i]) # Check statistics stats = cache.get_stats() assert stats["conf"]["core_count"] == 0
def test_simple_wt_write(pyocf_ctx): cache_device = Volume(S.from_MiB(100)) core_device = Volume(S.from_MiB(200)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) queue = Queue(cache) cache.add_core(core) cache_device.reset_stats() core_device.reset_stats() write_data = Data.from_string("This is test data") io = core.new_io() io.set_data(write_data) io.configure(20, write_data.size, IoDir.WRITE, 0, 0) io.set_queue(queue) io.submit() assert cache_device.get_stats()[IoDir.WRITE] == 1 stats = cache.get_stats() assert stats["req"]["wr_full_misses"]["value"] == 1 assert stats["usage"]["occupancy"]["value"] == 1 assert core.exp_obj_md5() == core_device.md5()
def test_secure_erase_simple_io_cleaning(): """ Perform simple IO which will trigger WB cleaning. Track all the data from cleaner (locked) and make sure they are erased and unlocked after use. 1. Start cache in WB mode 2. Write single sector at LBA 0 3. Read whole cache line at LBA 0 4. Assert that 3. triggered cleaning 5. Check if all locked Data copies were erased and unlocked """ ctx = OcfCtx( OcfLib.getInstance(), b"Security tests ctx", DefaultLogger(LogLevel.WARN), DataCopyTracer, Cleaner, ) ctx.register_volume_type(RamVolume) cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB) core_device = RamVolume(S.from_MiB(100)) core = Core.using_device(core_device) cache.add_core(core) vol = CoreVolume(core, open=True) queue = cache.get_default_queue() read_data = Data(S.from_sector(1).B) io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() read_data = Data(S.from_sector(8).B) io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() stats = cache.get_stats() ctx.exit() assert (len(DataCopyTracer.needs_erase) == 0 ), "Not all locked Data instances were secure erased!" assert (len(DataCopyTracer.locked_instances) == 0 ), "Not all locked Data instances were unlocked!" assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
def test_partial_hit_promotion(pyocf_ctx): """ Check if NHIT promotion policy doesn't prevent partial hits from getting promoted to cache 1. Create core/cache pair with promotion policy ALWAYS 2. Issue one-sector IO to cache to insert partially valid cache line 3. Set NHIT promotion policy with trigger=0 (always triggered) and high insertion threshold 4. Issue a request containing partially valid cache line and next cache line * occupancy should rise - partially hit request should bypass nhit criteria """ # Step 1 cache_device = Volume(Size.from_MiB(30)) core_device = Volume(Size.from_MiB(30)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) cache.add_core(core) # Step 2 comp = OcfCompletion([("error", c_int)]) write_data = Data(Size.from_sector(1)) io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.READ, 0, 0) io.set_data(write_data) io.callback = comp.callback io.submit() comp.wait() stats = cache.get_stats() cache_lines = stats["conf"]["size"] assert stats["usage"]["occupancy"]["value"] == 1 # Step 3 cache.set_promotion_policy(PromotionPolicy.NHIT) cache.set_promotion_policy_param( PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0 ) cache.set_promotion_policy_param( PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100 ) # Step 4 comp = OcfCompletion([("error", c_int)]) write_data = Data(2 * cache_lines.line_size) io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) io.callback = comp.callback io.submit() comp.wait() stats = cache.get_stats() assert ( stats["usage"]["occupancy"]["value"] == 2 ), "Second cache line should be mapped"
def test_io_flags(pyocf_ctx, cache_mode): """ Verify that I/O flags provided at the top volume interface are propagated down to bottom volumes for all associated I/Os (including metadata writes to cache volume). """ flags = 0x239482 block_size = 4096 data = bytes(block_size) cache_device = FlagsValVolume(Size.from_MiB(50), flags) core_device = FlagsValVolume(Size.from_MiB(50), flags) cache = Cache.start_on_device(cache_device, cache_mode=cache_mode) core = Core.using_device(core_device) cache.add_core(core) vol = CoreVolume(core, open=True) cache_device.set_check(True) core_device.set_check(True) # write miss io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, flags) assert not cache_device.fail assert not core_device.fail # read miss io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.READ, flags) assert not cache_device.fail assert not core_device.fail # "dirty" read hit io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.READ, flags) assert not cache_device.fail assert not core_device.fail # "clean" read hit io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.READ, flags) assert not cache_device.fail assert not core_device.fail # "dirty" write hit io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, flags) assert not cache_device.fail assert not core_device.fail # "clean" write hit io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.WRITE, flags) assert not cache_device.fail assert not core_device.fail
def test_surprise_shutdown_set_io_class_config(pyocf_ctx): core_device = RamVolume(S.from_MiB(10)) core = Core(device=core_device) class_range = range(0, IoClassesInfo.MAX_IO_CLASSES) old_ioclass = [ { "_class_id": i, "_name": f"old_{i}" if i > 0 else "unclassified", "_max_size": i, "_priority": i, "_cache_mode": int(CacheMode.WB), } for i in range(IoClassesInfo.MAX_IO_CLASSES) ] new_ioclass = [ { "_class_id": i, "_name": f"new_{i}" if i > 0 else "unclassified", "_max_size": 2 * i, "_priority": 2 * i, "_cache_mode": int(CacheMode.WT), } for i in range(IoClassesInfo.MAX_IO_CLASSES) ] keys = old_ioclass[0].keys() def set_io_class_info(cache, desc): ioclasses_info = IoClassesInfo() for i in range(IoClassesInfo.MAX_IO_CLASSES): ioclasses_info._config[i]._class_id = i ioclasses_info._config[i]._name = desc[i]["_name"].encode("utf-8") ioclasses_info._config[i]._priority = desc[i]["_priority"] ioclasses_info._config[i]._cache_mode = desc[i]["_cache_mode"] ioclasses_info._config[i]._max_size = desc[i]["_max_size"] OcfLib.getInstance().ocf_mngt_cache_io_classes_configure( cache, byref(ioclasses_info) ) def prepare(cache): cache.add_core(core) set_io_class_info(cache, old_ioclass) cache.save() def test(cache): set_io_class_info(cache, new_ioclass) cache.save() def check(cache, error_triggered): curr_ioclass = [ {k: info[k] for k in keys} for info in [cache.get_partition_info(i) for i in class_range] ] assert curr_ioclass == old_ioclass or curr_ioclass == new_ioclass mngmt_op_surprise_shutdown_test(pyocf_ctx, test, prepare, check)
def test_adding_core_twice(pyocf_ctx): cache_device = Volume(S.from_MiB(200)) core_device = Volume(S.from_MiB(400)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) cache.add_core(core) with pytest.raises(OcfError): cache.add_core(core)
def _io_to_core(exported_obj: Core, data: Data): io = exported_obj.new_io(exported_obj.cache.get_default_queue(), 0, data.size, IoDir.WRITE, 0, 0) io.set_data(data) completion = OcfCompletion([("err", c_int)]) io.callback = completion.callback io.submit() completion.wait() assert completion.results["err"] == 0, "IO to exported object completion"
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Test if eviction does not occur when IO greater than cache size is submitted. """ cache_device = Volume(Size.from_MiB(20)) core_device = Volume(Size.from_MiB(5)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) cache_size = cache.get_stats()['conf']['size'] core_exported = Core.using_device(core_device) cache.add_core(core_exported) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) valid_io_size = Size.from_B(cache_size.B // 2) test_data = Data(valid_io_size) send_io(core_exported, test_data) stats = core_exported.cache.get_stats() first_block_sts = stats['block'] first_usage_sts = stats['usage'] pt_writes_first = stats['req']['wr_pt'] assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\ "Occupancy after first IO" prev_writes_to_core = stats["block"]["core_volume_wr"]["value"] # Anything below 5 MiB is a valid size (less than core device size) # Writing over cache size (to the offset above first io) in this case should go # directly to core and shouldn't trigger eviction io_size_bigger_than_cache = Size.from_MiB(2) io_offset = valid_io_size test_data = Data(io_size_bigger_than_cache) send_io(core_exported, test_data, io_offset) if mode is not CacheMode.WT: # Flush first write cache.flush() stats = core_exported.cache.get_stats() second_block_sts = stats['block'] second_usage_sts = stats['usage'] pt_writes_second = stats['req']['wr_pt'] # Second write shouldn't affect cache and should go directly to core. # Cache occupancy shouldn't change # Second IO should go in PT assert first_usage_sts['occupancy'] == \ second_usage_sts['occupancy'] assert pt_writes_first['value'] == 0 assert pt_writes_second['value'] == 1 assert second_block_sts['cache_volume_wr'][ 'value'] == valid_io_size.blocks_4k assert second_block_sts['core_volume_wr']['value'] == valid_io_size.blocks_4k + \ io_size_bigger_than_cache.blocks_4k
def _test_surprise_shutdown_mngmt_generic(pyocf_ctx, func): core_device = RamVolume(S.from_MiB(10)) core = Core(device=core_device) def prepare(cache): cache.add_core(core) def test(cache): func(cache, core) cache.save() mngmt_op_surprise_shutdown_test(pyocf_ctx, test, prepare, None)
def test_surprise_shutdown_swap_core_with_data(pyocf_ctx): core_device_1 = RamVolume(S.from_MiB(10), uuid="dev1") core_device_2 = RamVolume(S.from_MiB(10), uuid="dev2") core1 = Core.using_device(core_device_1, name="core1") core2 = Core.using_device(core_device_2, name="core2") def prepare(cache): cache.add_core(core1) vol = CoreVolume(core1, open=True) cache.save() ocf_write(vol, cache.get_default_queue(), 0xAA, mngmt_op_surprise_shutdown_test_io_offset) cache.remove_core(core1) cache.save() def tested_func(cache): cache.add_core(core2) def check_func(cache, error_triggered): stats = cache.get_stats() assert stats["conf"]["core_count"] == (0 if error_triggered else 1) with pytest.raises(OcfError): core1 = cache.get_core_by_name("core1") core2 = None if error_triggered: with pytest.raises(OcfError): core2 = cache.get_core_by_name("core2") else: core2 = cache.get_core_by_name("core2") if core2 is not None: vol2 = CoreVolume(core2, open=True) assert core2.device.uuid == "dev2" assert ( ocf_read(vol2, cache.get_default_queue(), mngmt_op_surprise_shutdown_test_io_offset) == VOLUME_POISON ) mngmt_op_surprise_shutdown_test(pyocf_ctx, tested_func, prepare, check_func)