예제 #1
0
파일: app.py 프로젝트: aiporre/ilastik
        def _configure_lazyflow_settings():
            import lazyflow
            import lazyflow.request
            from lazyflow.utility import Memory
            from lazyflow.operators import cacheMemoryManager

            if status_interval_secs:
                memory_logger = logging.getLogger(
                    "lazyflow.operators.cacheMemoryManager")
                memory_logger.setLevel(logging.DEBUG)
                cacheMemoryManager.setRefreshInterval(status_interval_secs)

            if n_threads is not None:
                logger.info(f"Resetting lazyflow thread pool with {n_threads} "
                            "threads.")
                lazyflow.request.Request.reset_thread_pool(n_threads)
            if total_ram_mb > 0:
                if total_ram_mb < 500:
                    raise Exception("In your current configuration, RAM is "
                                    f"limited to {total_ram_mb} MB. Remember "
                                    "to specify RAM in MB, not GB.")
                ram = total_ram_mb * 1024**2
                fmt = Memory.format(ram)
                logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
                Memory.setAvailableRam(ram)
예제 #2
0
        def _configure_lazyflow_settings():
            import lazyflow
            import lazyflow.request
            from lazyflow.utility import Memory
            from lazyflow.operators.cacheMemoryManager import \
                CacheMemoryManager

            if status_interval_secs:
                memory_logger = logging.getLogger(
                    'lazyflow.operators.cacheMemoryManager')
                memory_logger.setLevel(logging.DEBUG)
                CacheMemoryManager().setRefreshInterval(status_interval_secs)

            if n_threads is not None:
                logger.info(f'Resetting lazyflow thread pool with {n_threads} '
                            'threads.')
                lazyflow.request.Request.reset_thread_pool(n_threads)
            if total_ram_mb > 0:
                if total_ram_mb < 500:
                    raise Exception('In your current configuration, RAM is '
                                    f'limited to {total_ram_mb} MB. Remember '
                                    'to specify RAM in MB, not GB.')
                ram = total_ram_mb * 1024**2
                fmt = Memory.format(ram)
                logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
                Memory.setAvailableRam(ram)
예제 #3
0
 def testSettings(self):
     assert Memory.getAvailableRam() > 0
     assert Memory.getAvailableRamCaches() > 0
     ram = 47 * 1111
     Memory.setAvailableRam(ram)
     assert Memory.getAvailableRam() == ram
     cache_ram = ram // 3
     Memory.setAvailableRamCaches(cache_ram)
     assert Memory.getAvailableRamCaches() == cache_ram
예제 #4
0
 def testSettings(self):
     assert Memory.getAvailableRam() > 0
     assert Memory.getAvailableRamCaches() > 0
     ram = 47 * 1111
     Memory.setAvailableRam(ram)
     assert Memory.getAvailableRam() == ram
     cache_ram = ram // 3
     Memory.setAvailableRamCaches(cache_ram)
     assert Memory.getAvailableRamCaches() == cache_ram
예제 #5
0
 def _configure_lazyflow_settings():
     import lazyflow
     import lazyflow.request
     if n_threads is not None:
         logger.info("Resetting lazyflow thread pool with {} threads.".format( n_threads ))
         lazyflow.request.Request.reset_thread_pool(n_threads)
     if total_ram_mb > 0:
         if total_ram_mb < 500:
             raise Exception("In your current configuration, RAM is limited to {} MB."
                             "  Remember to specify RAM in MB, not GB."
                             .format( total_ram_mb ))
         ram = total_ram_mb * 1024**2
         fmt = Memory.format(ram)
         logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
         Memory.setAvailableRam(ram)
예제 #6
0
 def _configure_lazyflow_settings():
     import lazyflow
     import lazyflow.request
     if n_threads is not None:
         logger.info(
             "Resetting lazyflow thread pool with {} threads.".format(
                 n_threads))
         lazyflow.request.Request.reset_thread_pool(n_threads)
     if total_ram_mb > 0:
         if total_ram_mb < 500:
             raise Exception(
                 "In your current configuration, RAM is limited to {} MB."
                 "  Remember to specify RAM in MB, not GB.".format(
                     total_ram_mb))
         ram = total_ram_mb * 1024**2
         fmt = Memory.format(ram)
         logger.info("Configuring lazyflow RAM limit to {}".format(fmt))
         Memory.setAvailableRam(ram)
예제 #7
0
 def tearDown(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
예제 #8
0
 def setUp(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = _CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)
        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)
예제 #10
0
 def teardown_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
예제 #11
0
 def setup_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
예제 #12
0
 def teardown_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
예제 #13
0
 def setup_method(self, method):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
예제 #14
0
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)

        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)
예제 #15
0
파일: testMemory.py 프로젝트: CVML/lazyflow
 def tearDown(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)
예제 #16
0
파일: testMemory.py 프로젝트: CVML/lazyflow
 def setUp(self):
     Memory.setAvailableRam(-1)
     Memory.setAvailableRamCaches(-1)