def test_secure_erase_simple_io_cleaning(): """ Perform simple IO which will trigger WB cleaning. Track all the data from cleaner (locked) and make sure they are erased and unlocked after use. """ ctx = OcfCtx( OcfLib.getInstance(), b"Security tests ctx", DefaultLogger(LogLevel.WARN), DataCopyTracer, MetadataUpdater, Cleaner, ) ctx.register_volume_type(Volume) cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB) core_device = Volume(S.from_MiB(100)) core = Core.using_device(core_device) cache.add_core(core) cmpls = [] for i in range(10000): read_data = Data(S.from_KiB(120)) io = core.new_io(cache.get_default_queue(), (i * 1259) % int(core_device.size), read_data.size, IoDir.WRITE, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback cmpls.append(cmpl) io.submit() for c in cmpls: c.wait() stats = cache.get_stats() ctx.exit() assert (len(DataCopyTracer.needs_erase) == 0 ), "Not all locked Data instances were secure erased!" assert (len(DataCopyTracer.locked_instances) == 0 ), "Not all locked Data instances were unlocked!" assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
def test_neg_set_acp_param_value(pyocf_ctx, cm, cls, param): """ Test whether it is possible to set invalid value to any of acp cleaning policy params :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with :param param: acp parameter to fuzz :return: """ # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls) cache.set_cleaning_policy(CleaningPolicy.ACP) # Set to invalid acp param value and check if failed valid_range = get_acp_param_valid_rage(param) for i in RandomGenerator(DefaultRanges.UINT32): if i in valid_range: continue with pytest.raises(OcfError, match="Error setting cleaning policy param"): cache.set_cleaning_policy_param(CleaningPolicy.ACP, param, i) print(f"\n{i}")
def test_neg_write_too_far(pyocf_ctx, c_uint16_randomize): """ Check if writing data which would normally fit on exported object is blocked when offset is set so that data goes over exported device end """ limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1) vol, queue = prepare_cache_and_core(Size.from_MiB(4)) data = Data(int(Size.from_KiB(limited_size))) completion = io_operation(vol, queue, data, IoDir.WRITE, int(Size.from_MiB(3))) if limited_size > 1024: assert completion.results["err"] != 0 else: assert completion.results["err"] == 0
def test_neg_set_ioclass_name(pyocf_ctx): """ Test whether it is possible to add ioclass with invaild name :param pyocf_ctx: basic pyocf context fixture :return: """ invalid_chars = [ chr(c) for c in range(256) if chr(c) not in string.printable ] invalid_chars += [",", '"'] # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT, cache_line_size=CacheLineSize.LINE_4KiB) # Set invalid name and check if failed for name in RandomStringGenerator(len_range=Range(0, 1024), count=10000, extra_chars=invalid_chars): if not any(c for c in invalid_chars if c in name): continue with pytest.raises(OcfError, match="Error adding partition to cache"): cache.configure_partition(part_id=1, name=name, max_size=100, priority=1) print(f"\n{name}")
def test_neg_set_ioclass_cache_mode(pyocf_ctx, cm, cls): """ Test whether it is possible to add ioclass with invaild cache mode :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with :return: """ # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls) # Set invalid cache mode and check if failed for i in RandomGenerator(DefaultRanges.INT): if i in list(CacheMode) + [CACHE_MODE_NONE]: continue with pytest.raises(OcfError, match="Error adding partition to cache"): cache.configure_partition(part_id=1, name="unclassified", max_size=100, priority=1, cache_mode=i) print(f"\n{i}")
def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls): """ Test whether it is possible to start cache with various cache name value. :param pyocf_ctx: basic pyocf context fixture :param string_randomize: fuzzed cache name value to start cache with :param cm: cache mode value to start cache with :param cls: cache line size value to start cache with """ cache_device = Volume(Size.from_MiB(30)) incorrect_values = [''] try: cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm, cache_line_size=cls) except OcfError: if string_randomize not in incorrect_values: logger.error( f"Cache did not start properly with correct name value: '{string_randomize}'" ) return if string_randomize in incorrect_values: logger.error( f"Cache started with incorrect name value: '{string_randomize}'") cache.stop()
def test_load_cache(pyocf_ctx): cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device) cache.stop() cache = Cache.load_from_device(cache_device)
def test_start_stop_multiple(pyocf_ctx): """Starting/stopping multiple caches. Check whether OCF allows for starting multiple caches and stopping them in random order """ caches = [] caches_no = randrange(6, 11) for i in range(1, caches_no): cache_device = Volume(Size.from_MiB(20)) cache_name = f"cache{i}" cache_mode = CacheMode(randrange(0, len(CacheMode))) size = 4096 * 2**randrange(0, len(CacheLineSize)) cache_line_size = CacheLineSize(size) cache = Cache.start_on_device( cache_device, name=cache_name, cache_mode=cache_mode, cache_line_size=cache_line_size) caches.append(cache) stats = cache.get_stats() assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode" assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size" assert stats["conf"]["cache_name"] == cache_name, "Cache name" caches.sort(key=lambda e: randrange(1000)) for cache in caches: logger.info("Getting stats before stopping cache") stats = cache.get_stats() cache_name = stats["conf"]["cache_name"] cache.stop() assert get_cache_by_name(pyocf_ctx, cache_name) != 0, "Try getting cache after stopping it"
def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: MetadataLayout): """Starting cache with different parameters. Check if cache starts without errors. If possible check whether cache reports properly set parameters. """ cache_device = Volume(Size.from_MiB(20)) queue_size = randrange(60000, 2**32) unblock_size = randrange(1, queue_size) volatile_metadata = randrange(2) == 1 unaligned_io = randrange(2) == 1 submit_fast = randrange(2) == 1 name = "test" logger.info("[STAGE] Start cache") cache = Cache.start_on_device( cache_device, cache_mode=mode, cache_line_size=cls, name=name, metadata_layout=MetadataLayout.SEQUENTIAL, metadata_volatile=volatile_metadata, max_queue_size=queue_size, queue_unblock_size=unblock_size, pt_unaligned_io=unaligned_io, use_submit_fast=submit_fast) stats = cache.get_stats() assert stats["conf"]["cache_mode"] == mode, "Cache mode" assert stats["conf"]["cache_line_size"] == cls, "Cache line size" assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT, "Eviction policy" assert cache.get_name() == name, "Cache name"
def test_surprise_shutdown_set_promotion_policy_param(pyocf_ctx): core_device = RamVolume(S.from_MiB(10)) core = Core(device=core_device) for pp in PromotionPolicy: if pp == PromotionPolicy.ALWAYS: continue if pp == PromotionPolicy.NHIT: params = NhitParams else: # add handler for new policy here assert False for p in params: def prepare(cache): cache.add_core(core) cache.set_promotion_policy(pp) cache.save() def test(cache): val = None if pp == PromotionPolicy.NHIT: if p == NhitParams.INSERTION_THRESHOLD: val = 500 elif p == NhitParams.TRIGGER_THRESHOLD: val = 50 else: # add handler for new param here assert False cache.set_promotion_policy_param(pp, p, val) cache.save() mngmt_op_surprise_shutdown_test(pyocf_ctx, test, prepare, None)
def test_neg_set_nhit_promotion_policy_param_threshold(pyocf_ctx, cm, cls): """ Test whether it is possible to set invalid promotion policy param INSERTION_THRESHOLD for nhit promotion policy :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with :return: """ # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device( cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT, ) # Set to invalid promotion policy insertion threshold and check if failed for i in RandomGenerator(DefaultRanges.UINT32): if i in ConfValidValues.promotion_nhit_insertion_threshold_range: continue with pytest.raises(OcfError, match="Error setting promotion policy parameter"): cache.set_promotion_policy_param(PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, i) print(f"\n{i}")
def test_neg_set_nhit_promotion_policy_param(pyocf_ctx, cm, cls): """ Test whether it is possible to set invalid promotion policy param id for nhit promotion policy :param pyocf_ctx: basic pyocf context fixture :param cm: cache mode we start with :param cls: cache line size we start with :return: """ # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device( cache_device, cache_mode=cm, cache_line_size=cls, promotion_policy=PromotionPolicy.NHIT, ) # Set invalid promotion policy param id and check if failed for i in RandomGenerator(DefaultRanges.UINT8): if i in [item.value for item in NhitParams]: continue with pytest.raises(OcfError, match="Error setting promotion policy parameter"): cache.set_promotion_policy_param(PromotionPolicy.NHIT, i, 1) print(f"\n{i}")
def test_attach_different_size( pyocf_ctx, new_cache_size, mode: CacheMode, cls: CacheLineSize ): """Start cache and add partition with limited occupancy. Fill partition with data, attach cache with different size and trigger IO. Verify if occupancy thresold is respected with both original and new cache device. """ cache_device = Volume(Size.from_MiB(35)) core_device = Volume(Size.from_MiB(100)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) core = Core.using_device(core_device) cache.add_core(core) cache.configure_partition( part_id=1, name="test_part", min_size=0, max_size=50, priority=1 ) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) cache_size = cache.get_stats()["conf"]["size"] block_size = 4096 data = bytes(block_size) for i in range(cache_size.blocks_4k): io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0) part_current_size = CacheLines( cache.get_partition_info(part_id=1)["_curr_size"], cls ) assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5 cache.detach_device() new_cache_device = Volume(Size.from_MiB(new_cache_size)) cache.attach_device(new_cache_device, force=True) cache_size = cache.get_stats()["conf"]["size"] for i in range(cache_size.blocks_4k): io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0) part_current_size = CacheLines( cache.get_partition_info(part_id=1)["_curr_size"], cls ) assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
def test_neg_io_class(pyocf_ctx, c_int_randomize): """ Check that IO operations are blocked when IO class number is not in allowed values {0, ..., 32} """ core = prepare_cache_and_core(Size.from_MiB(2)) data = Data(int(Size.from_MiB(1))) completion = io_operation(core, data, randrange(0, 2), io_class=c_int_randomize) if 0 <= c_int_randomize <= 32: assert completion.results["err"] == 0 else: assert completion.results["err"] != 0
def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Starting cache in different modes with different cache line sizes. After start check proper cache mode behaviour, starting with read operation. """ cache_device = RamVolume(Size.from_MiB(50)) core_device = RamVolume(Size.from_MiB(5)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) core = Core.using_device(core_device) cache.add_core(core) front_vol = CoreVolume(core, open=True) bottom_vol = core.get_volume() queue = cache.get_default_queue() logger.info("[STAGE] Initial write to core device") test_data = Data.from_string("This is test data") io_to_core(bottom_vol, queue, test_data, Size.from_sector(1).B) cache_device.reset_stats() core_device.reset_stats() logger.info("[STAGE] Initial read from exported object") io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B) check_stats_read_empty(core, mode, cls) logger.info("[STAGE] Write to exported object after initial read") cache_device.reset_stats() core_device.reset_stats() test_data = Data.from_string("Changed test data") io_to_core(front_vol, queue, test_data, Size.from_sector(1).B) check_stats_write_after_read(core, mode, cls, True) logger.info("[STAGE] Read from exported object after write") io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B) check_stats_read_after_write(core, mode, cls) check_md5_sums(core, mode)
def test_surprise_shutdown_stop_cache(pyocf_ctx): core_device = RamVolume(S.from_MiB(10)) error_triggered = True error_io_seq_no = 0 io_offset = mngmt_op_surprise_shutdown_test_io_offset while error_triggered: # Start cache device without error injection error_io = {IoDir.WRITE: error_io_seq_no} device = ErrorDevice( mngmt_op_surprise_shutdown_test_cache_size, error_seq_no=error_io, armed=False ) # setup cache and insert some data cache = Cache.start_on_device(device, cache_mode=CacheMode.WB) core = Core(device=core_device) cache.add_core(core) vol = CoreVolume(core, open=True) ocf_write(vol, cache.get_default_queue(), 0xAA, io_offset) # start error injection device.arm() try: cache.stop() status = OcfErrorCode.OCF_OK except OcfError as ex: status = ex.error_code # if error was injected we expect mngmt op error error_triggered = device.error_triggered() if error_triggered: assert status == OcfErrorCode.OCF_ERR_WRITE_CACHE else: assert status == 0 if not error_triggered: break # disable error injection and load the cache device.disarm() cache = None assert core_device.get_bytes()[io_offset] == VOLUME_POISON cache = Cache.load_from_device(device, open_cores=False) stats = cache.get_stats() if stats["conf"]["core_count"] == 1: assert stats["usage"]["occupancy"]["value"] == 1 core = Core(device=core_device) cache.add_core(core, try_add=True) vol = CoreVolume(core, open=True) assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA cache.stop() # advance error injection point error_io_seq_no += 1
def test_flush_after_mngmt(pyocf_ctx): """ Check whether underlying volumes volatile caches (VC) are flushed after management operation """ block_size = 4096 data = bytes(block_size) cache_device = FlushValVolume(Size.from_MiB(30)) core_device = FlushValVolume(Size.from_MiB(30)) # after start cache VC must be cleared cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT) assert cache_device.flush_last # adding core must flush VC core = Core.using_device(core_device) cache.add_core(core) assert cache_device.flush_last # WT I/O to write data to core and cache VC io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0) # WB I/O to produce dirty cachelines in CAS cache.change_cache_mode(CacheMode.WB) io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.WRITE, 0) # after cache flush VCs are expected to be cleared cache.flush() assert cache_device.flush_last assert core_device.flush_last # I/O to write data to cache device VC io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0) # cache save must flush VC cache.save() assert cache_device.flush_last # I/O to write data to cache device VC io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0) # cache stop must flush VC cache.stop() assert cache_device.flush_last
def test_neg_write_offset_outside_of_device(pyocf_ctx, c_int_sector_randomize): """ Check that write operations are blocked when IO offset is located outside of device range """ core = prepare_cache_and_core(Size.from_MiB(2)) data = Data(int(Size.from_KiB(1))) completion = io_operation(core, data, IoDir.WRITE, offset=c_int_sector_randomize) if 0 <= c_int_sector_randomize <= int(Size.from_MiB(2)) - int( Size.from_KiB(1)): assert completion.results["err"] == 0 else: assert completion.results["err"] != 0
def test_neg_read_too_far(pyocf_ctx, c_uint16_randomize): """ Check if reading data which would normally fit on exported object is blocked when offset is set so that data is read beyond exported device end """ limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1) core = prepare_cache_and_core(Size.from_MiB(4)) data = Data(int(Size.from_KiB(limited_size))) completion = io_operation(core, data, IoDir.READ, offset=(Size.from_MiB(3))) if limited_size > 1024: assert completion.results["err"] != 0 else: assert completion.results["err"] == 0
def prepare_cache_and_core(core_size: Size, cache_size: Size = Size.from_MiB(20)): cache_device = Volume(cache_size) core_device = Volume(core_size) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) cache.add_core(core) return core
def test_init_nhit(pyocf_ctx, promotion_policy): """ Check if starting cache with promotion policy is reflected in stats 1. Create core/cache pair with parametrized promotion policy 2. Get cache statistics * verify that promotion policy type is properly reflected in stats """ cache_device = Volume(Size.from_MiB(30)) core_device = Volume(Size.from_MiB(30)) cache = Cache.start_on_device(cache_device, promotion_policy=promotion_policy) core = Core.using_device(core_device) cache.add_core(core) assert cache.get_stats()["conf"]["promotion_policy"] == promotion_policy
def test_surprise_shutdown_set_io_class_config(pyocf_ctx): core_device = RamVolume(S.from_MiB(10)) core = Core(device=core_device) class_range = range(0, IoClassesInfo.MAX_IO_CLASSES) old_ioclass = [ { "_class_id": i, "_name": f"old_{i}" if i > 0 else "unclassified", "_max_size": i, "_priority": i, "_cache_mode": int(CacheMode.WB), } for i in range(IoClassesInfo.MAX_IO_CLASSES) ] new_ioclass = [ { "_class_id": i, "_name": f"new_{i}" if i > 0 else "unclassified", "_max_size": 2 * i, "_priority": 2 * i, "_cache_mode": int(CacheMode.WT), } for i in range(IoClassesInfo.MAX_IO_CLASSES) ] keys = old_ioclass[0].keys() def set_io_class_info(cache, desc): ioclasses_info = IoClassesInfo() for i in range(IoClassesInfo.MAX_IO_CLASSES): ioclasses_info._config[i]._class_id = i ioclasses_info._config[i]._name = desc[i]["_name"].encode("utf-8") ioclasses_info._config[i]._priority = desc[i]["_priority"] ioclasses_info._config[i]._cache_mode = desc[i]["_cache_mode"] ioclasses_info._config[i]._max_size = desc[i]["_max_size"] OcfLib.getInstance().ocf_mngt_cache_io_classes_configure( cache, byref(ioclasses_info) ) def prepare(cache): cache.add_core(core) set_io_class_info(cache, old_ioclass) cache.save() def test(cache): set_io_class_info(cache, new_ioclass) cache.save() def check(cache, error_triggered): curr_ioclass = [ {k: info[k] for k in keys} for info in [cache.get_partition_info(i) for i in class_range] ] assert curr_ioclass == old_ioclass or curr_ioclass == new_ioclass mngmt_op_surprise_shutdown_test(pyocf_ctx, test, prepare, check)
def test_30add_remove(pyocf_ctx): # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device) # Create core device core_device = Volume(S.from_MiB(10)) core = Core.using_device(core_device) # Add and remove core device in a loop 100 times # Check statistics after every operation for i in range(0, 30): cache.add_core(core) stats = cache.get_stats() assert stats["conf"]["core_count"] == 1 cache.remove_core(core) stats = cache.get_stats() assert stats["conf"]["core_count"] == 0
def test_load_cache_recovery(pyocf_ctx): cache_device = RamVolume(S.from_MiB(50)) cache = Cache.start_on_device(cache_device) device_copy = cache_device.get_copy() cache.stop() cache = Cache.load_from_device(device_copy)
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): """Test if eviction does not occur when IO greater than cache size is submitted. """ cache_device = Volume( Size.from_MiB(20)) # this gives about 1.375 MiB actual caching space core_device = Volume(Size.from_MiB(5)) cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) core_exported = Core.using_device(core_device) cache.add_core(core_exported) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) valid_io_size = Size.from_KiB(512) test_data = Data(valid_io_size) send_io(core_exported, test_data) stats = core_exported.cache.get_stats() assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\ "Occupancy after first IO" prev_writes_to_core = stats["block"]["core_volume_wr"]["value"] # Anything below 5 MiB is a valid size (less than core device size) # Writing over 1.375 MiB in this case should go directly to core and shouldn't trigger eviction io_size_bigger_than_cache = Size.from_MiB(2) test_data = Data(io_size_bigger_than_cache) send_io(core_exported, test_data) stats = core_exported.cache.get_stats() # Writes from IO greater than cache size should go directly to core # Writes to core should equal the following: # Previous writes to core + size written + size cleaned (reads from cache) assert stats["block"]["core_volume_wr"]["value"] == \ stats["block"]["cache_volume_rd"]["value"] + \ prev_writes_to_core + io_size_bigger_than_cache.B / Size.from_KiB(4).B, \ "Writes to core after second IO" # Occupancy shouldn't change (no eviction) assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\ "Occupancy after second IO"
def test_start_check_default(pyocf_ctx): """Test if default values are correct after start. """ cache_device = RamVolume(Size.from_MiB(50)) core_device = RamVolume(Size.from_MiB(10)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device) cache.add_core(core) # Check if values are default stats = cache.get_stats() assert stats["conf"]["cleaning_policy"] == CleaningPolicy.DEFAULT assert stats["conf"]["cache_mode"] == CacheMode.DEFAULT assert stats["conf"]["cache_line_size"] == CacheLineSize.DEFAULT core_stats = core.get_stats() assert core_stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
def test_load_cache_with_cores(pyocf_ctx, open_cores): cache_device = RamVolume(S.from_MiB(40)) core_device = RamVolume(S.from_MiB(40)) cache = Cache.start_on_device(cache_device) core = Core.using_device(core_device, name="test_core") cache.add_core(core) vol = CoreVolume(core, open=True) write_data = Data.from_string("This is test data") io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B, write_data.size, IoDir.WRITE, 0, 0) io.set_data(write_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() cache.stop() cache = Cache.load_from_device(cache_device, open_cores=open_cores) if not open_cores: cache.add_core(core, try_add=True) else: core = cache.get_core_by_name("test_core") vol = CoreVolume(core, open=True) read_data = Data(write_data.size) io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B, read_data.size, IoDir.READ, 0, 0) io.set_data(read_data) cmpl = OcfCompletion([("err", c_int)]) io.callback = cmpl.callback io.submit() cmpl.wait() assert read_data.md5() == write_data.md5() assert vol.md5() == core_device.md5()
def test_start_cache_same_id(pyocf_ctx, mode, cls): """Adding two caches with the same name Check that OCF does not allow for 2 caches to be started with the same cache_name """ cache_device1 = Volume(Size.from_MiB(20)) cache_device2 = Volume(Size.from_MiB(20)) cache_name = "cache" cache = Cache.start_on_device(cache_device1, cache_mode=mode, cache_line_size=cls, name=cache_name) cache.get_stats() with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"): cache = Cache.start_on_device(cache_device2, cache_mode=mode, cache_line_size=cls, name=cache_name) cache.get_stats()
def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls): # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device( cache_device, cache_mode=from_cm, cache_line_size=cls ) # Change cache mode and check if stats are as expected cache.change_cache_mode(to_cm) stats_after = cache.get_stats() assert stats_after["conf"]["cache_mode"] == to_cm
def test_removing_core(pyocf_ctx, cache_mode, cls): # Start cache device cache_device = Volume(S.from_MiB(30)) cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls) # Create core device core_device = Volume(S.from_MiB(10)) core = Core.using_device(core_device) # Add core to cache cache.add_core(core) # Remove core from cache cache.remove_core(core) # Check statistics after removing core stats = cache.get_stats() assert stats["conf"]["core_count"] == 0