Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('grayscale', help='example: my-grayscale.h5/volume')
    parser.add_argument('classifier', help='example: my-file.h5/forest')
    parser.add_argument('filter_specs',
                        help='json file containing filter list')
    parser.add_argument('output_path',
                        help='example: my-predictions.h5/volume')
    parser.add_argument('--compute-blockwise',
                        help='Compute blockwise instead of as a whole',
                        action='store_true')
    parser.add_argument('--thread-count',
                        help='The threadpool size',
                        default=0,
                        type=int)
    args = parser.parse_args()

    # Show log messages on the console.
    logger.setLevel(logging.INFO)
    logger.addHandler(logging.StreamHandler(sys.stdout))

    Request.reset_thread_pool(args.thread_count)

    load_and_predict(args.grayscale, args.classifier, args.filter_specs,
                     args.output_path, args.compute_blockwise)
    logger.info("DONE.")
 def teardown_method(self, method):
     # reset cleanup frequency to sane value
     # reset max memory
     Memory.setAvailableRamCaches(-1)
     mgr = _CacheMemoryManager()
     mgr.setRefreshInterval(default_refresh_interval)
     mgr.enable()
     Request.reset_thread_pool()
Esempio n. 3
0
 def teardown_method(self, method):
     # reset cleanup frequency to sane value
     # reset max memory
     Memory.setAvailableRamCaches(-1)
     mgr = CacheMemoryManager()
     mgr.setRefreshInterval(default_refresh_interval)
     mgr.enable()
     Request.reset_thread_pool()
Esempio n. 4
0
def test_RequestLock():
    assert Request.global_thread_pool.num_workers > 0, \
        "This test must be used with the real threadpool."

    lockA = RequestLock()
    lockB = RequestLock()

    def log_request_system_status():
        status = (
            "*************************\n" +
            'lockA.pending: {}\n'.format(len(lockA._pendingRequests)) +
            'lockB.pending: {}\n'.format(len(lockB._pendingRequests))
            #+ "suspended Requests: {}\n".format( len(Request.global_suspend_set) )
            + "global job queue: {}\n".format(
                len(Request.global_thread_pool.unassigned_tasks)))
        for worker in Request.global_thread_pool.workers:
            status += "{} queued tasks: {}\n".format(worker.name,
                                                     len(worker.job_queue))
        status += "*****************************************************"
        logger.debug(status)

    running = [True]

    def periodic_status():
        while running[0]:
            time.sleep(0.5)
            log_request_system_status()

    # Uncomment these lines to print periodic status while the test runs...
    status_thread = threading.Thread(target=periodic_status)
    status_thread.daemon = True
    status_thread.start()

    try:
        _impl_test_lock(lockA, lockB, Request, 1000)
    except:
        log_request_system_status()
        running[0] = False
        status_thread.join()

        global paused
        paused = False

        Request.reset_thread_pool(Request.global_thread_pool.num_workers)

        if lockA.locked():
            lockA.release()
        if lockB.locked():
            lockB.release()

        raise

    log_request_system_status()
    running[0] = False
    status_thread.join()
Esempio n. 5
0
    def setup(self):
        super(SkeletonAssociationProcess, self).setup()
        self.inner_logger.debug(
            'Setting up opPixelClassification and multicut_shell...')
        # todo: replace opPixelClassification with catpy tile-getter
        self.opPixelClassification, self.multicut_shell = setup_classifier_and_multicut(
            *self.setup_args)
        self.inner_logger.debug(
            'opPixelClassification and multicut_shell set up')

        Request.reset_thread_pool(1)
Esempio n. 6
0
def test_RequestLock():
    assert Request.global_thread_pool.num_workers > 0, "This test must be used with the real threadpool."

    lockA = RequestLock()
    lockB = RequestLock()

    def log_request_system_status():
        status = (
            "*************************\n"
            + "lockA.pending: {}\n".format(len(lockA._pendingRequests))
            + "lockB.pending: {}\n".format(len(lockB._pendingRequests))
            # + "suspended Requests: {}\n".format( len(Request.global_suspend_set) )
            + "global job queue: {}\n".format(len(Request.global_thread_pool.unassigned_tasks))
        )
        for worker in Request.global_thread_pool.workers:
            status += "{} queued tasks: {}\n".format(worker.name, len(worker.job_queue))
        status += "*****************************************************"
        logger.debug(status)

    running = [True]

    def periodic_status():
        while running[0]:
            time.sleep(0.5)
            log_request_system_status()

    # Uncomment these lines to print periodic status while the test runs...
    status_thread = threading.Thread(target=periodic_status)
    status_thread.daemon = True
    status_thread.start()

    try:
        _impl_test_lock(lockA, lockB, Request, 1000)
    except:
        log_request_system_status()
        running[0] = False
        status_thread.join()

        global paused
        paused = False

        Request.reset_thread_pool(Request.global_thread_pool.num_workers)

        if lockA.locked():
            lockA.release()
        if lockB.locked():
            lockB.release()

        raise

    log_request_system_status()
    running[0] = False
    status_thread.join()
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('grayscale', help='example: my-grayscale.h5/volume')
    parser.add_argument('classifier', help='example: my-file.h5/forest')
    parser.add_argument('filter_specs', help='json file containing filter list')
    parser.add_argument('output_path', help='example: my-predictions.h5/volume')
    parser.add_argument('--compute-blockwise', help='Compute blockwise instead of as a whole', action='store_true')
    parser.add_argument('--thread-count', help='The threadpool size', default=0, type=int)
    args = parser.parse_args()

    # Show log messages on the console.
    logger.setLevel(logging.INFO)
    logger.addHandler( logging.StreamHandler(sys.stdout) )

    Request.reset_thread_pool(args.thread_count)
    
    load_and_predict( args.grayscale, args.classifier, args.filter_specs, args.output_path, args.compute_blockwise )
    logger.info("DONE.")
def ilastik_simple_predict(gray_vol, mask, classifier_path, filter_specs_path, selected_channels=None, normalize=True, 
                           LAZYFLOW_THREADS=0, LAZYFLOW_TOTAL_RAM_MB=None, logfile="/dev/null"):
    """
    gray_vol: A 3D numpy array with axes zyx

    mask: A binary image where 0 means "no prediction necessary".
         'None' can be given, which means "predict everything".

    classifier_path: Path to a vigra RandomForest classifier, in HDF5.
                     Example: /path/to/myclassifier.h5/classifiers/my_rf

    filter_specs_path: Path to "filter specs" json file.  The json structure is like this:
                       [ ['GaussianSmoothing', 0.3],
                         ['GaussianSmoothing', 0.7],
                         ['LaplacianOfGaussian', 1.6] ]
                       (See ilastik's simple_predict.py for valid filter names.)
     
    selected_channels: A list of channel indexes to select and return from the prediction results.
                       'None' can also be given, which means "return all prediction channels".
                       You may also return a *nested* list, in which case groups of channels can be
                       combined (summed) into their respective output channels.
                       For example: selected_channels=[0,3,[2,4],7] means the output will have 4 channels:
                                    0,3,2+4,7 (channels 5 and 6 are simply dropped).
    
    normalize: Renormalize all outputs so the channels sum to 1 everywhere.
               That is, (predictions.sum(axis=-1) == 1.0).all()
               Note: Pixels with 0.0 in all channels will be simply given a value of 1/N in all channels.
    
    LAZYFLOW_THREADS, LAZYFLOW_TOTAL_RAM_MB: Same meanings as in ilastik_predict_with_array().
                      (although we have to configure them in a different way)
    """
    print("ilastik_simple_predict(): Starting with raw data: dtype={}, shape={}".format(str(gray_vol.dtype), gray_vol.shape))

    import os
    from collections import OrderedDict

    import uuid
    import platform
    import vigra

    from ilastik.utility.simple_predict import load_and_predict
    from lazyflow.request import Request
    
    print("ilastik_simple_predict(): Done with imports")

    _prepare_lazyflow_config(LAZYFLOW_THREADS, LAZYFLOW_TOTAL_RAM_MB, 10)

    Request.reset_thread_pool(LAZYFLOW_THREADS)

    # The process_name argument is prefixed to all log messages.
    # For now, just use the machine name and a uuid
    # FIXME: It would be nice to provide something more descriptive, like the ROI of the current spark job...
    process_name = platform.node() + "-" + str(uuid.uuid1())

    # To avoid conflicts between processes, give each process it's own logfile to write to.
    if logfile != "/dev/null":
        base, ext = os.path.splitext(logfile)
        logfile = base + '.' + process_name + ext

    _init_logging(logfile, process_name)
    
    # Construct an OrderedDict of role-names -> DatasetInfos
    # (See PixelClassificationWorkflow.ROLE_NAMES)
    raw_data_array = vigra.taggedView(gray_vol, 'zyx')
    print("ilastik_simple_predict(): Starting export...")

    predictions = load_and_predict( raw_data_array, classifier_path, filter_specs_path, compute_blockwise=True ) 
    selected_predictions = select_channels(predictions, selected_channels)

    if normalize:
        normalize_channels_in_place(selected_predictions)
    
    return selected_predictions
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = _CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)
        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)
Esempio n. 10
0
 def setup(self):
     super(SynapseDetectionProcess, self).setup()
     self.opPixelClassification = setup_classifier(*self.setup_args)
     Request.reset_thread_pool(0)  # todo: set to 0?
Esempio n. 11
0
    def testBadMemoryConditions(self):
        """
        TestCacheMemoryManager.testBadMemoryConditions

        This test is a proof of the proposition in
            https://github.com/ilastik/lazyflow/issue/185
        which states that, given certain memory constraints, the cache
        cleanup strategy in use is inefficient. An advanced strategy
        should pass the test.
        """

        mgr = CacheMemoryManager()
        mgr.setRefreshInterval(0.01)
        mgr.enable()

        d = 2
        tags = "xy"

        shape = (999,) * d
        blockshape = (333,) * d

        # restrict memory for computation to one block (including fudge
        # factor 2 of bigRequestStreamer)
        cacheMem = np.prod(shape)
        Memory.setAvailableRam(np.prod(blockshape) * 2 + cacheMem)

        # restrict cache memory to the whole volume
        Memory.setAvailableRamCaches(cacheMem)

        # to ease observation, do everything single threaded
        Request.reset_thread_pool(num_workers=1)

        x = np.zeros(shape, dtype=np.uint8)
        x = vigra.taggedView(x, axistags=tags)

        g = Graph()
        pipe = OpArrayPiperWithAccessCount(graph=g)
        pipe.Input.setValue(x)
        pipe.Output.meta.ideal_blockshape = blockshape

        # simulate BlockedArrayCache behaviour without caching
        # cache = OpSplitRequestsBlockwise(True, graph=g)
        # cache.BlockShape.setValue(blockshape)
        # cache.Input.connect(pipe.Output)

        cache = OpBlockedArrayCache(graph=g)
        cache.Input.connect(pipe.Output)
        cache.BlockShape.setValue(blockshape)

        op = OpEnlarge(graph=g)
        op.Input.connect(cache.Output)

        split = OpSplitRequestsBlockwise(True, graph=g)
        split.BlockShape.setValue(blockshape)
        split.Input.connect(op.Output)

        streamer = BigRequestStreamer(split.Output, [(0,) * len(shape), shape])
        streamer.execute()

        # in the worst case, we have 4*4 + 4*6 + 9 = 49 requests to pipe
        # in the best case, we have 9
        np.testing.assert_equal(pipe.accessCount, 9)