def testBlockedCacheHandling(self, cacheMemoryManager):
        n, k = 10, 5
        vol = np.zeros((n,) * 5, dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags="txyzc")

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        cache = OpBlockedArrayCache(graph=g)

        # restrict cache memory to 0 Byte
        Memory.setAvailableRamCaches(0)

        # set to frequent cleanup
        cacheMemoryManager.setRefreshInterval(0.01)
        cacheMemoryManager.enable()

        cache.BlockShape.setValue((k,) * 5)
        cache.Input.connect(pipe.Output)
        pipe.Input.setValue(vol)

        a = pipe.accessCount
        cache.Output[...].wait()
        b = pipe.accessCount
        assert b > a, "did not cache"

        # let the manager clean up
        cacheMemoryManager.enable()
        time.sleep(0.5)
        gc.collect()

        cache.Output[...].wait()
        c = pipe.accessCount
        assert c > b, "did not clean up"
    def testBlockedCacheHandling(self):
        n, k = 10, 5
        vol = np.zeros((n,) * 5, dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags="txyzc")

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        cache = OpBlockedArrayCache(graph=g)

        mgr = CacheMemoryManager()

        # restrict cache memory to 0 Byte
        Memory.setAvailableRamCaches(0)

        # set to frequent cleanup
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        cache.BlockShape.setValue((k,) * 5)
        cache.Input.connect(pipe.Output)
        pipe.Input.setValue(vol)

        a = pipe.accessCount
        cache.Output[...].wait()
        b = pipe.accessCount
        assert b > a, "did not cache"

        # let the manager clean up
        mgr.enable()
        time.sleep(0.5)
        gc.collect()

        cache.Output[...].wait()
        c = pipe.accessCount
        assert c > b, "did not clean up"
Example #3
0
        def _configure_lazyflow_settings():
            import lazyflow
            import lazyflow.request
            from lazyflow.utility import Memory
            from lazyflow.operators.cacheMemoryManager import \
                CacheMemoryManager

            if status_interval_secs:
                memory_logger = logging.getLogger(
                    'lazyflow.operators.cacheMemoryManager')
                memory_logger.setLevel(logging.DEBUG)
                CacheMemoryManager().setRefreshInterval(status_interval_secs)

            if n_threads is not None:
                logger.info(f'Resetting lazyflow thread pool with {n_threads} '
                            'threads.')
                lazyflow.request.Request.reset_thread_pool(n_threads)
            if total_ram_mb > 0:
                if total_ram_mb < 500:
                    raise Exception('In your current configuration, RAM is '
                                    f'limited to {total_ram_mb} MB. Remember '
                                    'to specify RAM in MB, not GB.')
                ram = total_ram_mb * 1024**2
                fmt = Memory.format(ram)
                logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
                Memory.setAvailableRam(ram)
    def testCacheHandling(self):
        n, k = 10, 5
        vol = np.zeros((n, ) * 5, dtype=np.uint8)
        vol = vigra.taggedView(vol, axistags='txyzc')

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        cache = OpArrayCache(graph=g)

        mgr = CacheMemoryManager()

        # disallow cache memory
        Memory.setAvailableRamCaches(0)

        # set to frequent cleanup
        mgr.setRefreshInterval(.01)
        mgr.enable()

        cache.blockShape.setValue((k, ) * 5)
        cache.Input.connect(pipe.Output)
        pipe.Input.setValue(vol)

        a = pipe.accessCount
        cache.Output[...].wait()
        b = pipe.accessCount
        assert b > a, "did not cache"

        # let the manager clean up
        mgr.enable()
        time.sleep(.5)
        gc.collect()

        cache.Output[...].wait()
        c = pipe.accessCount
        assert c > b, "did not clean up"
Example #5
0
        def _configure_lazyflow_settings():
            import lazyflow
            import lazyflow.request
            from lazyflow.utility import Memory
            from lazyflow.operators import cacheMemoryManager

            if status_interval_secs:
                memory_logger = logging.getLogger(
                    "lazyflow.operators.cacheMemoryManager")
                memory_logger.setLevel(logging.DEBUG)
                cacheMemoryManager.setRefreshInterval(status_interval_secs)

            if n_threads is not None:
                logger.info(f"Resetting lazyflow thread pool with {n_threads} "
                            "threads.")
                lazyflow.request.Request.reset_thread_pool(n_threads)
            if total_ram_mb > 0:
                if total_ram_mb < 500:
                    raise Exception("In your current configuration, RAM is "
                                    f"limited to {total_ram_mb} MB. Remember "
                                    "to specify RAM in MB, not GB.")
                ram = total_ram_mb * 1024**2
                fmt = Memory.format(ram)
                logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
                Memory.setAvailableRam(ram)
 def teardown_method(self, method):
     # reset cleanup frequency to sane value
     # reset max memory
     Memory.setAvailableRamCaches(-1)
     mgr = CacheMemoryManager()
     mgr.setRefreshInterval(default_refresh_interval)
     mgr.enable()
     Request.reset_thread_pool()
 def teardown_method(self, method):
     # reset cleanup frequency to sane value
     # reset max memory
     Memory.setAvailableRamCaches(-1)
     mgr = _CacheMemoryManager()
     mgr.setRefreshInterval(default_refresh_interval)
     mgr.enable()
     Request.reset_thread_pool()
Example #8
0
 def _configure_lazyflow_settings():
     import lazyflow
     import lazyflow.request
     if n_threads is not None:
         logger.info("Resetting lazyflow thread pool with {} threads.".format( n_threads ))
         lazyflow.request.Request.reset_thread_pool(n_threads)
     if total_ram_mb > 0:
         if total_ram_mb < 500:
             raise Exception("In your current configuration, RAM is limited to {} MB."
                             "  Remember to specify RAM in MB, not GB."
                             .format( total_ram_mb ))
         ram = total_ram_mb * 1024**2
         fmt = Memory.format(ram)
         logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
         Memory.setAvailableRam(ram)
Example #9
0
 def testSettings(self):
     assert Memory.getAvailableRam() > 0
     assert Memory.getAvailableRamCaches() > 0
     ram = 47 * 1111
     Memory.setAvailableRam(ram)
     assert Memory.getAvailableRam() == ram
     cache_ram = ram // 3
     Memory.setAvailableRamCaches(cache_ram)
     assert Memory.getAvailableRamCaches() == cache_ram
Example #10
0
 def testSettings(self):
     assert Memory.getAvailableRam() > 0
     assert Memory.getAvailableRamCaches() > 0
     ram = 47 * 1111
     Memory.setAvailableRam(ram)
     assert Memory.getAvailableRam() == ram
     cache_ram = ram // 3
     Memory.setAvailableRamCaches(cache_ram)
     assert Memory.getAvailableRamCaches() == cache_ram
Example #11
0
 def _configure_lazyflow_settings():
     import lazyflow
     import lazyflow.request
     if n_threads is not None:
         logger.info(
             "Resetting lazyflow thread pool with {} threads.".format(
                 n_threads))
         lazyflow.request.Request.reset_thread_pool(n_threads)
     if total_ram_mb > 0:
         if total_ram_mb < 500:
             raise Exception(
                 "In your current configuration, RAM is limited to {} MB."
                 "  Remember to specify RAM in MB, not GB.".format(
                     total_ram_mb))
         ram = total_ram_mb * 1024**2
         fmt = Memory.format(ram)
         logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
         Memory.setAvailableRam(ram)
Example #12
0
    def _cleanup(self):
        """
        clean up once
        """
        from lazyflow.operators.opCache import ObservableCache

        try:
            # notify subscribed functions about current cache memory
            total = 0

            # Avoid "RuntimeError: Set changed size during iteration"
            with self._first_class_caches_lock:
                first_class_caches = self._first_class_caches.copy()

            for cache in first_class_caches:
                if isinstance(cache, ObservableCache):
                    total += cache.usedMemory()
            self.totalCacheMemory(total)
            cache = None

            # check current memory state
            cache_memory = Memory.getAvailableRamCaches()
            cache_pct = 0.0
            if cache_memory:
                cache_pct = total * 100.0 / cache_memory

            logger.debug(
                "Process memory usage is {:0.2f} GB out of {:0.2f} (caches are {}, {:.1f}% of allowed)".format(
                    Memory.getMemoryUsage() / 2.0 ** 30,
                    Memory.getAvailableRam() / 2.0 ** 30,
                    Memory.format(total),
                    cache_pct,
                )
            )

            if total <= self._max_usage * cache_memory:
                return

            cache_entries = []
            cache_entries += [
                (cache.lastAccessTime(), cache.name, cache.freeMemory) for cache in list(self._managed_caches)
            ]
            cache_entries += [
                (lastAccessTime, f"{cache.name}: {blockKey}", functools.partial(cache.freeBlock, blockKey))
                for cache in list(self._managed_blocked_caches)
                for blockKey, lastAccessTime in cache.getBlockAccessTimes()
            ]
            cache_entries.sort(key=lambda entry: entry[0])

            for lastAccessTime, info, cleanupFun in cache_entries:
                if total <= self._target_usage * cache_memory:
                    break
                mem = cleanupFun()
                logger.debug(f"Cleaned up {info} ({Memory.format(mem)})")
                total -= mem

            # Remove references to cache entries before triggering garbage collection.
            cleanupFun = None
            cache_entries = None
            gc.collect()

            msg = "Done cleaning up, cache memory usage is now at {}".format(Memory.format(total))
            if cache_memory > 0:
                msg += " ({:.1f}% of allowed)".format(total * 100.0 / cache_memory)
            logger.debug(msg)
        except:
            log_exception(logger)
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)

        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = _CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)
        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)
Example #15
0
 def setup_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
Example #16
0
 def teardown_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
Example #17
0
 def setup_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
Example #18
0
 def tearDown(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
Example #19
0
 def setUp(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
Example #20
0
 def teardown_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
Example #21
0
 def tearDown(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
Example #22
0
    def _cleanup(self):
        """
        clean up once
        """
        from lazyflow.operators.opCache import ObservableCache
        try:
            # notify subscribed functions about current cache memory
            total = 0
            
            # Avoid "RuntimeError: Set changed size during iteration"
            with self._first_class_caches_lock:
                first_class_caches = self._first_class_caches.copy()
            
            for cache in first_class_caches:
                if isinstance(cache, ObservableCache):
                    total += cache.usedMemory()
            self.totalCacheMemory(total)
            cache = None

            # check current memory state
            cache_memory = Memory.getAvailableRamCaches()

            if total <= self._max_usage * cache_memory:
                return

            # === we need a cache cleanup ===

            # queue holds time stamps and cleanup functions
            q = PriorityQueue()
            caches = list(self._managed_caches)
            for c in caches:
                q.push((c.lastAccessTime(), c.name, c.freeMemory))
            caches = list(self._managed_blocked_caches)
            for c in caches:
                for k, t in c.getBlockAccessTimes():
                    cleanupFun = functools.partial(c.freeBlock, k)
                    info = "{}: {}".format(c.name, k)
                    q.push((t, info, cleanupFun))
            c = None
            caches = None

            msg = "Caches are using {} memory".format(
                Memory.format(total))
            if cache_memory > 0:
                 msg += " ({:.1f}% of allowed)".format(
                    total*100.0/cache_memory)
            logger.debug(msg)

            while (total > self._target_usage * cache_memory
                   and len(q) > 0):
                t, info, cleanupFun = q.pop()
                mem = cleanupFun()
                logger.debug("Cleaned up {} ({})".format(
                    info, Memory.format(mem)))
                total -= mem
            gc.collect()
            # don't keep a reference until next loop iteration
            cleanupFun = None
            q = None

            msg = ("Done cleaning up, cache memory usage is now at "
                   "{}".format(Memory.format(total)))
            if cache_memory > 0:
                 msg += " ({:.1f}% of allowed)".format(
                    total*100.0/cache_memory)
            logger.debug(msg)
        except:
            log_exception(logger)
Example #23
0
    def _cleanup(self):
        """
        clean up once
        """
        from lazyflow.operators.opCache import ObservableCache

        try:
            # notify subscribed functions about current cache memory
            total = 0

            # Avoid "RuntimeError: Set changed size during iteration"
            with self._first_class_caches_lock:
                first_class_caches = self._first_class_caches.copy()

            for cache in first_class_caches:
                if isinstance(cache, ObservableCache):
                    total += cache.usedMemory()
            self.totalCacheMemory(total)
            cache = None

            # check current memory state
            cache_memory = Memory.getAvailableRamCaches()
            cache_pct = 0.0
            if cache_memory:
                cache_pct = total * 100.0 / cache_memory

            logger.debug(
                "Process memory usage is {:0.2f} GB out of {:0.2f} (caches are {}, {:.1f}% of allowed)".format(
                    Memory.getMemoryUsage() / 2.0 ** 30,
                    Memory.getAvailableRam() / 2.0 ** 30,
                    Memory.format(total),
                    cache_pct,
                )
            )

            if total <= self._max_usage * cache_memory:
                return

            # === we need a cache cleanup ===

            # queue holds time stamps and cleanup functions
            q = PriorityQueue()
            caches = list(self._managed_caches)
            for c in caches:
                q.push((c.lastAccessTime(), c.name, c.freeMemory))
            caches = list(self._managed_blocked_caches)
            for c in caches:
                for k, t in c.getBlockAccessTimes():
                    cleanupFun = functools.partial(c.freeBlock, k)
                    info = "{}: {}".format(c.name, k)
                    q.push((t, info, cleanupFun))
            c = None
            caches = None

            while total > self._target_usage * cache_memory and len(q) > 0:
                t, info, cleanupFun = q.pop()
                mem = cleanupFun()
                logger.debug("Cleaned up {} ({})".format(info, Memory.format(mem)))
                total -= mem
            gc.collect()
            # don't keep a reference until next loop iteration
            cleanupFun = None
            q = None

            msg = "Done cleaning up, cache memory usage is now at {}".format(Memory.format(total))
            if cache_memory > 0:
                msg += " ({:.1f}% of allowed)".format(total * 100.0 / cache_memory)
            logger.debug(msg)
        except:
            log_exception(logger)
Example #24
0
 def setUp(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)