def _configure_lazyflow_settings(): import lazyflow import lazyflow.request from lazyflow.utility import Memory from lazyflow.operators import cacheMemoryManager if status_interval_secs: memory_logger = logging.getLogger( "lazyflow.operators.cacheMemoryManager") memory_logger.setLevel(logging.DEBUG) cacheMemoryManager.setRefreshInterval(status_interval_secs) if n_threads is not None: logger.info(f"Resetting lazyflow thread pool with {n_threads} " "threads.") lazyflow.request.Request.reset_thread_pool(n_threads) if total_ram_mb > 0: if total_ram_mb < 500: raise Exception("In your current configuration, RAM is " f"limited to {total_ram_mb} MB. Remember " "to specify RAM in MB, not GB.") ram = total_ram_mb * 1024**2 fmt = Memory.format(ram) logger.info("Configuring lazyflow RAM limit to {}".format(fmt)) Memory.setAvailableRam(ram)
def _configure_lazyflow_settings(): import lazyflow import lazyflow.request from lazyflow.utility import Memory from lazyflow.operators.cacheMemoryManager import \ CacheMemoryManager if status_interval_secs: memory_logger = logging.getLogger( 'lazyflow.operators.cacheMemoryManager') memory_logger.setLevel(logging.DEBUG) CacheMemoryManager().setRefreshInterval(status_interval_secs) if n_threads is not None: logger.info(f'Resetting lazyflow thread pool with {n_threads} ' 'threads.') lazyflow.request.Request.reset_thread_pool(n_threads) if total_ram_mb > 0: if total_ram_mb < 500: raise Exception('In your current configuration, RAM is ' f'limited to {total_ram_mb} MB. Remember ' 'to specify RAM in MB, not GB.') ram = total_ram_mb * 1024**2 fmt = Memory.format(ram) logger.info("Configuring lazyflow RAM limit to {}".format(fmt)) Memory.setAvailableRam(ram)
def testSettings(self): assert Memory.getAvailableRam() > 0 assert Memory.getAvailableRamCaches() > 0 ram = 47 * 1111 Memory.setAvailableRam(ram) assert Memory.getAvailableRam() == ram cache_ram = ram // 3 Memory.setAvailableRamCaches(cache_ram) assert Memory.getAvailableRamCaches() == cache_ram
def testSettings(self): assert Memory.getAvailableRam() > 0 assert Memory.getAvailableRamCaches() > 0 ram = 47 * 1111 Memory.setAvailableRam(ram) assert Memory.getAvailableRam() == ram cache_ram = ram // 3 Memory.setAvailableRamCaches(cache_ram) assert Memory.getAvailableRamCaches() == cache_ram
def _configure_lazyflow_settings(): import lazyflow import lazyflow.request if n_threads is not None: logger.info("Resetting lazyflow thread pool with {} threads.".format( n_threads )) lazyflow.request.Request.reset_thread_pool(n_threads) if total_ram_mb > 0: if total_ram_mb < 500: raise Exception("In your current configuration, RAM is limited to {} MB." " Remember to specify RAM in MB, not GB." .format( total_ram_mb )) ram = total_ram_mb * 1024**2 fmt = Memory.format(ram) logger.info("Configuring lazyflow RAM limit to {}".format(fmt)) Memory.setAvailableRam(ram)
def _configure_lazyflow_settings(): import lazyflow import lazyflow.request if n_threads is not None: logger.info( "Resetting lazyflow thread pool with {} threads.".format( n_threads)) lazyflow.request.Request.reset_thread_pool(n_threads) if total_ram_mb > 0: if total_ram_mb < 500: raise Exception( "In your current configuration, RAM is limited to {} MB." " Remember to specify RAM in MB, not GB.".format( total_ram_mb)) ram = total_ram_mb * 1024**2 fmt = Memory.format(ram) logger.info("Configuring lazyflow RAM limit to {}".format(fmt)) Memory.setAvailableRam(ram)
def tearDown(self): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)
def setUp(self): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)
def testBadMemoryConditions(self): """ TestCacheMemoryManager.testBadMemoryConditions This test is a proof of the proposition in https://github.com/ilastik/lazyflow/issue/185 which states that, given certain memory constraints, the cache cleanup strategy in use is inefficient. An advanced strategy should pass the test. """ mgr = _CacheMemoryManager() mgr.setRefreshInterval(0.01) mgr.enable() d = 2 tags = "xy" shape = (999,) * d blockshape = (333,) * d # restrict memory for computation to one block (including fudge # factor 2 of bigRequestStreamer) cacheMem = np.prod(shape) Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem) # restrict cache memory to the whole volume Memory.setAvailableRamCaches(cacheMem) # to ease observation, do everything single threaded Request.reset_thread_pool(num_workers=1) x = np.zeros(shape, dtype=np.uint8) x = vigra.taggedView(x, axistags=tags) g = Graph() pipe = OpArrayPiperWithAccessCount(graph=g) pipe.Input.setValue(x) pipe.Output.meta.ideal_blockshape = blockshape # simulate BlockedArrayCache behaviour without caching # cache = OpSplitRequestsBlockwise(True, graph=g) # cache.BlockShape.setValue(blockshape) # cache.Input.connect(pipe.Output) cache = OpBlockedArrayCache(graph=g) cache.Input.connect(pipe.Output) cache.BlockShape.setValue(blockshape) op = OpEnlarge(graph=g) op.Input.connect(cache.Output) split = OpSplitRequestsBlockwise(True, graph=g) split.BlockShape.setValue(blockshape) split.Input.connect(op.Output) streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape]) streamer.execute() # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe # in the best case, we have 9 np.testing.assert_equal(pipe.accessCount, 9)
def teardown_method(self, method): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)
def setup_method(self, method): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)
def teardown_method(self, method): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)
def setup_method(self, method): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)
def testBadMemoryConditions(self): """ TestCacheMemoryManager.testBadMemoryConditions This test is a proof of the proposition in https://github.com/ilastik/lazyflow/issue/185 which states that, given certain memory constraints, the cache cleanup strategy in use is inefficient. An advanced strategy should pass the test. """ mgr = CacheMemoryManager() mgr.setRefreshInterval(0.01) mgr.enable() d = 2 tags = "xy" shape = (999,) * d blockshape = (333,) * d # restrict memory for computation to one block (including fudge # factor 2 of bigRequestStreamer) cacheMem = np.prod(shape) Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem) # restrict cache memory to the whole volume Memory.setAvailableRamCaches(cacheMem) # to ease observation, do everything single threaded Request.reset_thread_pool(num_workers=1) x = np.zeros(shape, dtype=np.uint8) x = vigra.taggedView(x, axistags=tags) g = Graph() pipe = OpArrayPiperWithAccessCount(graph=g) pipe.Input.setValue(x) pipe.Output.meta.ideal_blockshape = blockshape # simulate BlockedArrayCache behaviour without caching # cache = OpSplitRequestsBlockwise(True, graph=g) # cache.BlockShape.setValue(blockshape) # cache.Input.connect(pipe.Output) cache = OpBlockedArrayCache(graph=g) cache.Input.connect(pipe.Output) cache.BlockShape.setValue(blockshape) op = OpEnlarge(graph=g) op.Input.connect(cache.Output) split = OpSplitRequestsBlockwise(True, graph=g) split.BlockShape.setValue(blockshape) split.Input.connect(op.Output) streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape]) streamer.execute() # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe # in the best case, we have 9 np.testing.assert_equal(pipe.accessCount, 9)
def tearDown(self): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)
def setUp(self): Memory.setAvailableRam(-1) Memory.setAvailableRamCaches(-1)