コード例 #1
0
ファイル: testRequest.py プロジェクト: thatcher/lazyflow
    def test_basic(self):
        def someWork():
            time.sleep(0.001)
            #print "producer finished"

        def callback(s):
            pass

        def test(s):
            req = Request(someWork)
            req.notify(callback)
            req.wait()
            time.sleep(0.001)
            print s
            return s

        req = Request(test, s="hallo !")
        req.notify(callback)
        assert req.wait() == "hallo !"

        requests = []
        for i in range(10):
            req = Request(test, s="hallo %d" % i)
            requests.append(req)

        for r in requests:
            r.wait()
コード例 #2
0
    def test_pause_unpause(self):
        handlerCounter = [0]
        handlerLock = threading.Lock()
        
        def completionHandler( result, req ):
            handlerLock.acquire()
            handlerCounter[0] += 1
            handlerLock.release()

        requestCounter = [0]
        requestLock = threading.Lock()            
        allRequests = []
        # This closure randomly chooses to either (a) return immediately or (b) fire off more work
        def someWork(depth, force=False, i=-1):
            #print 'depth=', depth, 'i=', i
            if depth > 0 and (force or random.random() > 0.8):
                requests = []
                for i in range(10):
                    req = Request(someWork, depth=depth-1, i=i)
                    req.notify(completionHandler, req=req)
                    requests.append(req)
                    allRequests.append(req)
                    
                    requestLock.acquire()
                    requestCounter[0] += 1
                    requestLock.release()
            

                for r in requests:
                    r.wait()

        req = Request(someWork, depth=6, force=True)

        def blubb(req):
          pass

        req.notify(blubb)
        global_thread_pool.pause()
        req2 = Request(someWork, depth=6, force=True)
        req2.notify(blubb)
        global_thread_pool.unpause()
        assert req2.finished == False
        assert req.finished
        req.wait()

        
        # Handler should have been called once for each request we fired
        assert handlerCounter[0] == requestCounter[0]

        print "finished pause_unpause"
        
        for r in allRequests:
          assert r.finished

        print "waited for all subrequests"
コード例 #3
0
    def _triggerTableUpdate(self):
        # Check that object area is included in selected features
        featureNames = self.topLevelOperatorView.SelectedFeatures.value

        if 'Standard Object Features' not in featureNames or 'Count' not in featureNames[
                'Standard Object Features']:
            box = QMessageBox(
                QMessageBox.Warning, 'Warning',
                'Object area is not a selected feature. Please select this feature on: \"Standard Object Features > Shape > Size in pixels\"',
                QMessageBox.NoButton, self)
            box.show()
            return

        # Clear table
        self.table.clearContents()
        self.table.setRowCount(0)
        self.table.setSortingEnabled(False)
        self.progressBar.show()
        self.computeButton.setEnabled(False)

        # Compute object features and number of labels per frame
        def compute_features():
            features = self.topLevelOperatorView.ObjectFeatures([]).wait()
            labels = self.topLevelOperatorView.LabelInputs([]).wait()
            return features, labels

        req = Request(compute_features)
        req.notify_finished(self._populateTable)
        req.submit()
コード例 #4
0
    def execute(self, slot, subindex, roi, result):
        assert len(roi.start) == len(roi.stop) == len(self.Output.meta.shape)
        assert slot == self.Output

        t_ind = self.RawVolume.meta.axistags.index('t')
        assert t_ind < len(self.RawVolume.meta.shape)

        def compute_features_for_time_slice(res_t_ind, t):
            axes4d = [
                k for k in self.RawVolume.meta.getTaggedShape().keys()
                if k in 'xyzc'
            ]

            # Process entire spatial volume
            s = [slice(None)] * len(self.RawVolume.meta.shape)
            s[t_ind] = slice(t, t + 1)
            s = tuple(s)

            # Request in parallel
            raw_req = self.RawVolume[s]
            raw_req.submit()

            label_req = self.LabelVolume[s]
            label_req.submit()

            if self.Atlas.ready():
                atlasVolume = self.Atlas[s].wait()
                atlasVolume = vigra.taggedView(
                    atlasVolume, axistags=self.Atlas.meta.axistags)
                atlasVolume = atlasVolume.withAxes(*axes4d)
            else:
                atlasVolume = None

            # Get results
            rawVolume = raw_req.wait()
            labelVolume = label_req.wait()

            rawVolume = vigra.taggedView(rawVolume,
                                         axistags=self.RawVolume.meta.axistags)
            labelVolume = vigra.taggedView(
                labelVolume, axistags=self.LabelVolume.meta.axistags)

            # Convert to 4D (preserve axis order)
            rawVolume = rawVolume.withAxes(*axes4d)
            labelVolume = labelVolume.withAxes(*axes4d)
            acc = self._extract(rawVolume, labelVolume, atlasVolume)

            # Copy into the result
            result[res_t_ind] = acc

        # loop over requested time slices
        pool = RequestPool()
        for res_t_ind, t in enumerate(range(roi.start[t_ind],
                                            roi.stop[t_ind])):
            pool.add(
                Request(partial(compute_features_for_time_slice, res_t_ind,
                                t)))

        pool.wait()
        return result
コード例 #5
0
    def execute(self, slot, subindex, roi, result):
        def compute_for_channel(output_channel, input_channel):
            input_roi = numpy.array((roi.start, roi.stop))
            input_roi[:, -1] = (input_channel, input_channel + 1)
            input_req = self.Input(*input_roi)

            # If possible, use the result array itself as a scratch area
            if self.Input.meta.dtype == result.dtype:
                input_req.writeInto(result[...,
                                           output_channel:output_channel + 1])

            input_data = input_req.wait()
            input_data = input_data.astype(numpy.float32,
                                           order='C',
                                           copy=False)
            input_data = input_data[..., 0]  # drop channel axis
            result[..., output_channel] = computeIntegralImage(input_data)

        pool = RequestPool()
        for output_channel, input_channel in enumerate(
                range(roi.start[-1], roi.stop[-1])):
            pool.add(
                Request(
                    partial(compute_for_channel, output_channel,
                            input_channel)))
        pool.wait()
コード例 #6
0
    def _triggerTableUpdate(self):
        # Check that object area is included in selected features
        featureNames = self.topLevelOperatorView.SelectedFeatures.value

        if 'Standard Object Features' not in featureNames or 'Count' not in featureNames[
                'Standard Object Features']:
            box = QMessageBox(
                QMessageBox.Warning, 'Warning',
                'Object area is not a selected feature. Please select this feature on: \"Standard Object Features > Shape > Size in pixels\"',
                QMessageBox.NoButton, self)
            box.show()
            return

        # Clear table
        self.table.clearContents()
        self.table.setRowCount(0)
        self.table.setSortingEnabled(False)
        self.progressBar.show()
        self.computeButton.setEnabled(False)

        def compute_features_for_frame(tIndex, t, features):
            # Compute features and labels (called in parallel from request pool)
            roi = [
                slice(None) for i in range(
                    len(self.topLevelOperatorView.LabelImages.meta.shape))
            ]
            roi[tIndex] = slice(t, t + 1)
            roi = tuple(roi)

            frame = self.topLevelOperatorView.SegmentationImages(roi).wait()
            frame = frame.squeeze().astype(numpy.uint32, copy=False)

            # Dirty trick: We don't care what we're passing here for the 'image' parameter,
            # but vigra insists that we pass *something*, so we'll cast the label image as float32.
            features[t] = vigra.analysis.extractRegionFeatures(
                frame.view(numpy.float32), frame, ['Count'], ignoreLabel=0)

        tIndex = self.topLevelOperatorView.SegmentationImages.meta.axistags.index(
            't')
        tMax = self.topLevelOperatorView.SegmentationImages.meta.shape[tIndex]

        features = {}
        labels = {}

        def compute_all_features():
            # Compute features in parallel
            pool = RequestPool()
            for t in range(tMax):
                pool.add(
                    Request(
                        partial(compute_features_for_frame, tIndex, t,
                                features)))
            pool.wait()

        # Compute labels
        labels = self.topLevelOperatorView.LabelInputs([]).wait()

        req = Request(compute_all_features)
        req.notify_finished(partial(self._populateTable, features, labels))
        req.submit()
コード例 #7
0
    def test_withH5Py(self):
        """
        We have suspicions that greenlet and h5py don't interact well with eachother.
        This tests basic functionality.
        TODO: Expand it for better coverage.
        """
        maxDepth = 5
        maxBreadth = 10

        filename = 'requestTest.h5'
        h5File = h5py.File( filename, 'w' )
        dataset = h5File.create_dataset( 'test/data', data=numpy.zeros( (maxDepth, maxBreadth), dtype=int ))

        def writeToH5Py(result, index, req):
            dataset[index] += 1

        # This closure randomly chooses to either (a) return immediately or (b) fire off more work
        def someWork(depth, force=False, i=0):
            #print 'depth=', depth, 'i=', i
            if depth > 0 and (force or random.random() > 0.5):
                requests = []
                for i in range(maxBreadth):
                    req = Request(someWork, depth=depth-1, i=i)
                    req.notify(writeToH5Py, index=(depth-1, i), req=req)
                    requests.append(req)

                for r in requests:
                    r.wait()

        req = Request(someWork, depth=maxDepth, force=True)
        req.wait()
        h5File.close()

        print "finished testWithH5Py"
        os.remove(filename)
コード例 #8
0
    def run_export(self):
        role_names = self.parentApplet.dataSelectionApplet.role_names

        # Prepare file lists in an OrderedDict
        role_inputs = {
            role_name: self._data_role_widgets[role_name].filepaths
            for role_name in role_names
        }
        if all(len(role_inp) == 0 for role_inp in role_inputs.values()):
            return

        # Run the export in a separate thread
        lane_configs = self.parentApplet.dataSelectionApplet.create_lane_configs(
            role_inputs=role_inputs)

        export_req = Request(
            partial(self.parentApplet.run_export, lane_configs=lane_configs))
        export_req.notify_failed(self.handle_batch_processing_failure)
        export_req.notify_finished(self.handle_batch_processing_finished)
        export_req.notify_cancelled(self.handle_batch_processing_cancelled)
        self.export_req = export_req

        self.parentApplet.busy = True
        self.parentApplet.appletStateUpdateRequested()
        self.cancel_button.setVisible(True)
        self.run_button.setEnabled(False)

        # Start the export
        export_req.submit()
コード例 #9
0
    def create_and_train(self, X, y):
        logger.debug("Training parallel vigra RF")
        # Save for future reference
        known_labels = numpy.unique(y)

        X = numpy.asarray(X, numpy.float32)
        y = numpy.asarray(y, numpy.uint32)
        if y.ndim == 1:
            y = y[:, numpy.newaxis]

        assert X.ndim == 2
        assert len(X) == len(y)

        # Create N forests
        forests = []
        for _ in range(self._num_forests):
            forest = vigra.learning.RandomForest(self._trees_per_forest,
                                                 **self._kwargs)
            forests.append(forest)

        # Train them all in parallel
        pool = RequestPool()
        for forest in forests:
            pool.add(Request(partial(forest.learnRF, X, y)))
        pool.wait()

        return ParallelVigraRfLazyflowClassifier(forests, known_labels)
コード例 #10
0
    def predict_probabilities(self, X):
        logger.debug("Predicting with parallel vigra RF")
        X = numpy.asarray(X, dtype=numpy.float32)
        assert X.ndim == 2

        if self._feature_names is not None:
            # For some reason, vigra doesn't seem to check this for us...
            assert X.shape[1] == len(self._feature_names), \
                "Feature count doesn't match the training data."

        # As each forest completes, aggregate results in a shared array.
        # (Must put in a list so we can update it in this closure.)
        total_predictions = [None]
        prediction_lock = RequestLock()

        def update_predictions(forest, forest_predictions):
            forest_predictions *= forest.treeCount()
            with prediction_lock:
                if total_predictions[0] is None:
                    total_predictions[0] = forest_predictions
                else:
                    total_predictions[0] += forest_predictions

        # Create a request for each forest
        pool = RequestPool()
        for forest in self._forests:
            req = Request(partial(forest.predictProbabilities, X))
            req.notify_finished(partial(update_predictions, forest))
            pool.add(req)
        del req
        pool.wait()

        total_predictions[0] /= self._num_trees
        return total_predictions[0]
コード例 #11
0
    def _label(self, roi, result):
        result = vigra.taggedView(result, axistags=self.Output.meta.axistags)
        # get the background values
        bg = self.Background[...].wait()
        bg = vigra.taggedView(bg, axistags=self.Background.meta.axistags)
        bg = bg.withAxes(*"ct")
        assert np.all(
            self.Background.meta.shape[0] == self.Input.meta.shape[0]
        ), "Shape of background values incompatible to shape of Input"
        assert np.all(
            self.Background.meta.shape[4] == self.Input.meta.shape[4]
        ), "Shape of background values incompatible to shape of Input"

        # do labeling in parallel over channels and time slices
        pool = RequestPool()

        start = np.asarray(roi.start, dtype=np.int)
        stop = np.asarray(roi.stop, dtype=np.int)
        for ti, t in enumerate(range(roi.start[0], roi.stop[0])):
            start[0], stop[0] = t, t + 1
            for ci, c in enumerate(range(roi.start[4], roi.stop[4])):
                start[4], stop[4] = c, c + 1
                newRoi = SubRegion(self.Output, start=tuple(start), stop=tuple(stop))
                resView = result[ti, ..., ci].withAxes(*"xyz")
                req = Request(partial(self._label3d, newRoi, bg[c, t], resView))
                pool.add(req)

        logger.debug("{}: Computing connected components for ROI {} ...".format(self.name, roi))
        pool.wait()
        pool.clean()
        logger.debug("{}: Connected components computed.".format(self.name))
コード例 #12
0
ファイル: testRequest.py プロジェクト: thatcher/lazyflow
 def test(s):
     req = Request(someWork)
     req.notify(callback)
     req.wait()
     time.sleep(0.001)
     print s
     return s
コード例 #13
0
    def predict_probabilities(self, X):
        logger.debug( "Predicting with parallel vigra RF" )
        X = numpy.asarray(X, dtype=numpy.float32)

        # As each forest completes, aggregate results in a shared array.
        # (Must put in a list so we can update it in this closure.)
        total_predictions = [None]
        prediction_lock = RequestLock()
        def update_predictions(forest, forest_predictions):
            forest_predictions *= forest.treeCount()
            with prediction_lock:
                if total_predictions[0] is None:
                    total_predictions[0] = forest_predictions
                else:
                    total_predictions[0] += forest_predictions

        # Create a request for each forest
        pool = RequestPool()
        for forest in self._forests:
            req = Request( partial( forest.predictProbabilities, X ) )
            req.notify_finished( partial(update_predictions, forest) )
            pool.add( req )
        del req
        pool.wait()

        total_predictions[0] /= self._num_trees
        return total_predictions[0]
コード例 #14
0
        def export(self, filename, hypothesesGraph, objectFeaturesSlot,
                   labelImageSlot, rawImageSlot):
            """Export the tracking solution stored in the hypotheses graph as a sequence of H5 files,
            one per frame, containing the label image of that frame and which objects were part
            of a move or a division.
    
            :param filename: string of the FOLDER where to save the result
            :param hypothesesGraph: hytra.core.hypothesesgraph.HypothesesGraph filled with a solution
            :param objectFeaturesSlot: lazyflow.graph.InputSlot, connected to the RegionFeaturesAll output 
                   of ilastik.applets.trackingFeatureExtraction.opTrackingFeatureExtraction.OpTrackingFeatureExtraction
            
            :returns: True on success, False otherwise
            """
            traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = hypothesesGraph.getMappingsBetweenUUIDsAndTraxels(
            )
            timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]

            result = hypothesesGraph.getSolutionDictionary()
            mergers, detections, links, divisions = getMergersDetectionsLinksDivisions(
                result, uuidToTraxelMap)

            # group by timestep for event creation
            mergersPerTimestep = getMergersPerTimestep(mergers, timesteps)
            linksPerTimestep = getLinksPerTimestep(links, timesteps)
            detectionsPerTimestep = getDetectionsPerTimestep(
                detections, timesteps)
            divisionsPerTimestep = getDivisionsPerTimestep(
                divisions, linksPerTimestep, timesteps)

            # save to disk in parallel
            pool = RequestPool()

            timeIndex = labelImageSlot.meta.axistags.index('t')

            for timestep in traxelIdPerTimestepToUniqueIdMap.keys():
                # extract current frame lable image
                roi = [
                    slice(None) for i in range(len(labelImageSlot.meta.shape))
                ]
                roi[timeIndex] = slice(int(timestep), int(timestep) + 1)
                roi = tuple(roi)
                labelImage = labelImageSlot[roi].wait()

                if not os.path.exists(filename + '/H5-Event-Sequence'):
                    os.makedirs(filename + '/H5-Event-Sequence')
                fn = os.path.join(
                    filename,
                    "H5-Event-Sequence/{0:05d}.h5".format(int(timestep)))
                pool.add(
                    Request(
                        partial(writeEvents, int(timestep),
                                linksPerTimestep[timestep],
                                divisionsPerTimestep[timestep],
                                mergersPerTimestep[timestep],
                                detectionsPerTimestep[timestep], fn,
                                labelImage)))
            pool.wait()

            return True
コード例 #15
0
ファイル: simple_predict.py プロジェクト: yutiansut/ilastik
def execute_tasks(tasks):
    """
    Executes the given list of tasks (functions) in the lazyflow threadpool.
    """
    pool = RequestPool()
    for task in tasks:
        pool.add(Request(task))
    pool.wait()
コード例 #16
0
def lots_of_work():
    requests = []
    for i in range(mcount):
        req = Request(functools.partial(empty_func, b=11))
        req.submit()

    for r in requests:
        r.wait()
コード例 #17
0
ファイル: tiling.py プロジェクト: experimentAccount2/volumina
def submit_to_threadpool(fn, priority):
    if USE_LAZYFLOW_THREADPOOL:
        # Tiling requests are less prioritized than most requests.
        root_priority = [1] + list(priority)
        req = Request(fn, root_priority)
        req.submit()
    else:
        get_render_pool().submit(fn, priority)
コード例 #18
0
    def _resolveMergers(self, hypothesesGraph, model):
        '''
        run merger resolution on the hypotheses graph which contains the current solution
        '''
        logger.info("Resolving mergers.")
                
        parameters = self.Parameters.value
        withTracklets = parameters['withTracklets']
        originalGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph
        resolvedMergersDict = {}
        
        # Enable full graph computation for animal tracking workflow
        withFullGraph = False
        if 'withAnimalTracking' in parameters and parameters['withAnimalTracking']: # TODO: Setting this parameter outside of the track() function (on AnimalConservationTrackingWorkflow) is not desirable 
            withFullGraph = True
            logger.info("Computing full graph on merger resolver (Only enabled on animal tracking workflow)")
        
        mergerResolver = IlastikMergerResolver(originalGraph, pluginPaths=self.pluginPaths, withFullGraph=withFullGraph)
        
        # Check if graph contains mergers, otherwise skip merger resolving
        if not mergerResolver.mergerNum:
            logger.info("Graph contains no mergers. Skipping merger resolving.")
        else:        
            # Fit and refine merger nodes using a GMM 
            # It has to be done per time-step in order to aviod loading the whole video on RAM
            traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = getMappingsBetweenUUIDsAndTraxels(model)
            timesteps = [int(t) for t in traxelIdPerTimestepToUniqueIdMap.keys()]
            timesteps.sort()
            
            timeIndex = self.LabelImage.meta.axistags.index('t')
            
            for timestep in timesteps:
                roi = [slice(None) for i in range(len(self.LabelImage.meta.shape))]
                roi[timeIndex] = slice(timestep, timestep+1)
                roi = tuple(roi)
                
                labelImage = self.LabelImage[roi].wait()
                
                # Get coordinates for object IDs in label image. Used by GMM merger fit.
                objectIds = vigra.analysis.unique(labelImage[0,...,0])
                maxObjectId = max(objectIds)
                
                coordinatesForIds = {}
                
                pool = RequestPool()
                for objectId in objectIds:
                    pool.add(Request(partial(mergerResolver.getCoordinatesForObjectId, coordinatesForIds, labelImage[0, ..., 0], timestep, objectId)))                 

                # Run requests to get object ID coordinates
                pool.wait()              
                
                # Fit mergers and store fit info in nodes  
                if coordinatesForIds:
                    mergerResolver.fitAndRefineNodesForTimestep(coordinatesForIds, maxObjectId, timestep)   
                
            # Compute object features, re-run flow solver, update model and result, and get merger dictionary
            resolvedMergersDict = mergerResolver.run()
        return resolvedMergersDict
コード例 #19
0
    def replaceWithStack(self, roleIndex, laneIndex):
        """
        The user clicked the "Import Stack Files" button.
        """
        stackDlg = StackFileSelectionWidget(self)
        stackDlg.exec_()
        if stackDlg.result() != QDialog.Accepted:
            return
        files = stackDlg.selectedFiles
        if len(files) == 0:
            return

        info = DatasetInfo()
        info.filePath = "//".join(files)
        prefix = os.path.commonprefix(files)
        info.nickname = PathComponents(prefix).filenameBase
        # Add an underscore for each wildcard digit
        num_wildcards = len(files[-1]) - len(prefix) - len(
            os.path.splitext(files[-1])[1])
        info.nickname += "_" * num_wildcards

        # Allow labels by default if this gui isn't being used for batch data.
        info.allowLabels = (self.guiMode == GuiMode.Normal)
        info.fromstack = True

        originalNumLanes = len(self.topLevelOperator.DatasetGroup)

        if laneIndex is None:
            laneIndex = self._findFirstEmptyLane(roleIndex)
        if len(self.topLevelOperator.DatasetGroup) < laneIndex + 1:
            self.topLevelOperator.DatasetGroup.resize(laneIndex + 1)

        def importStack():
            self.guiControlSignal.emit(ControlCommand.DisableAll)
            # Serializer will update the operator for us, which will propagate to the GUI.
            try:
                self.serializer.importStackAsLocalDataset(info)
                try:
                    self.topLevelOperator.DatasetGroup[laneIndex][
                        roleIndex].setValue(info)
                except DatasetConstraintError as ex:
                    # Give the user a chance to repair the problem.
                    filename = files[0] + "\n...\n" + files[-1]
                    return_val = [False]
                    self.handleDatasetConstraintError(info, filename, ex,
                                                      roleIndex, laneIndex,
                                                      return_val)
                    if not return_val[0]:
                        # Not successfully repaired.  Roll back the changes and give up.
                        self.topLevelOperator.DatasetGroup.resize(
                            originalNumLanes)
            finally:
                self.guiControlSignal.emit(ControlCommand.Pop)

        req = Request(importStack)
        req.notify_failed(
            partial(self.handleFailedStackLoad, files, originalNumLanes))
        req.submit()
コード例 #20
0
    def read(self, view_roi, result_out):
        """
        roi: (start, stop) tuples, ordered according to description.output_axes
             roi should be relative to the view
        """
        output_axes = self.description.output_axes
        roi_transposed = list(zip(*view_roi))
        roi_dict = dict(list(zip(output_axes, roi_transposed)))
        view_roi = list(zip(*(roi_dict["z"], roi_dict["y"], roi_dict["x"])))

        # First, normalize roi and result to zyx order
        result_out = vigra.taggedView(result_out, output_axes)
        result_out = result_out.withAxes(*"zyx")

        assert numpy.array(view_roi).shape == (2, 3), "Invalid roi for 3D volume: {}".format(view_roi)
        view_roi = numpy.array(view_roi)
        assert (result_out.shape == (view_roi[1] - view_roi[0])).all()

        # User gave roi according to the view output.
        # Now offset it find global roi.
        roi = view_roi + self.description.view_origin_zyx

        tile_blockshape = (1,) + tuple(self.description.tile_shape_2d_yx)
        tile_starts = getIntersectingBlocks(tile_blockshape, roi)

        pool = RequestPool()
        for tile_start in tile_starts:
            tile_roi_in = getBlockBounds(self.description.bounds_zyx, tile_blockshape, tile_start)
            tile_roi_in = numpy.array(tile_roi_in)

            # This tile's portion of the roi
            intersecting_roi = getIntersection(roi, tile_roi_in)
            intersecting_roi = numpy.array(intersecting_roi)

            # Compute slicing within destination array and slicing within this tile
            destination_relative_intersection = numpy.subtract(intersecting_roi, roi[0])
            tile_relative_intersection = intersecting_roi - tile_roi_in[0]

            # Get a view to the output slice
            result_region = result_out[roiToSlice(*destination_relative_intersection)]

            rest_args = self._get_rest_args(tile_blockshape, tile_roi_in)
            if self.description.tile_url_format.startswith("http"):
                retrieval_fn = partial(self._retrieve_remote_tile, rest_args, tile_relative_intersection, result_region)
            else:
                retrieval_fn = partial(self._retrieve_local_tile, rest_args, tile_relative_intersection, result_region)

            PARALLEL_REQ = True
            if PARALLEL_REQ:
                pool.add(Request(retrieval_fn))
            else:
                # execute serially (leave the pool empty)
                retrieval_fn()

        if PARALLEL_REQ:
            with Timer() as timer:
                pool.wait()
            logger.info("Loading {} tiles took a total of {}".format(len(tile_starts), timer.seconds()))
コード例 #21
0
 def compute_all_features():
     # Compute features in parallel
     pool = RequestPool()
     for t in range(tMax):
         pool.add(
             Request(
                 partial(compute_features_for_frame, tIndex, t,
                         features)))
     pool.wait()
コード例 #22
0
ファイル: carvingGui.py プロジェクト: wolny/ilastik
    def _exportMeshes(self, object_names: List[str], obj_filepaths: List[str]) -> Request:
        """Save objects in the mst to .obj files
        
        Args:
            object_names: Names of the objects in the mst
            obj_filepaths: One path for each object in object_names
        
        Returns:
            Returns the request object, used in testing
        """

        def get_label_volume_from_mst(mst, object_name):
            object_supervoxels = mst.object_lut[object_name]
            object_lut = numpy.zeros(mst.nodeNum+1, dtype=numpy.int32)
            object_lut[object_supervoxels] = 1
            supervoxel_volume = mst.supervoxelUint32
            object_volume = object_lut[supervoxel_volume]
            return object_volume

        mst = self.topLevelOperatorView.MST.value

        def exportMeshes(object_names, obj_filepaths):
            n_objects = len(object_names)
            progress_update = 100 / n_objects
            try:
                for obj, obj_path, obj_n in zip(object_names, obj_filepaths, range(n_objects)):
                    object_volume = get_label_volume_from_mst(mst, obj)
                    unique_ids = len(numpy.unique(object_volume))

                    if unique_ids <= 1:
                        logger.info(f"No voxels found for {obj}, skipping")
                        continue
                    elif unique_ids > 2:
                        logger.info(f"Supervoxel segmentation not unique for {obj}, skipping, got {unique_ids}")
                        continue

                    logger.info(f"Generating mesh for {obj}")
                    _, mesh_data = list(labeling_to_mesh(object_volume, [1]))[0]
                    self.parentApplet.progressSignal((obj_n + .5) * progress_update)
                    logger.info(f"Mesh generation for {obj} complete.")

                    logger.info(f"Saving mesh for {obj} to {obj_path}")
                    mesh_to_obj(mesh_data, obj_path, obj)
                    self.parentApplet.progressSignal((obj_n + 1) * progress_update)
            finally:
                self.parentApplet.busy = False
                self.parentApplet.progressSignal(100)
                self.parentApplet.appletStateUpdateRequested()

        self.parentApplet.busy = True
        self.parentApplet.progressSignal(-1)
        self.parentApplet.appletStateUpdateRequested()

        req = Request(partial(exportMeshes, object_names, obj_filepaths))
        req.submit()
        return req
コード例 #23
0
    def addStack(self, roleIndex, laneIndex):
        """
        The user clicked the "Import Stack Files" button.
        """
        stackDlg = StackFileSelectionWidget(self)
        stackDlg.exec_()
        if stackDlg.result() != QDialog.Accepted:
            return
        files = stackDlg.selectedFiles
        sequence_axis = stackDlg.sequence_axis
        if len(files) == 0:
            return

        cwd = self.topLevelOperator.WorkingDirectory.value
        info = DatasetInfo(os.path.pathsep.join(files), cwd=cwd)

        originalNumLanes = len(self.topLevelOperator.DatasetGroup)

        if laneIndex is None or laneIndex == -1:
            laneIndex = len(self.topLevelOperator.DatasetGroup)
        if len(self.topLevelOperator.DatasetGroup) < laneIndex + 1:
            self.topLevelOperator.DatasetGroup.resize(laneIndex + 1)

        def importStack():
            self.parentApplet.busy = True
            self.parentApplet.appletStateUpdateRequested()

            # Serializer will update the operator for us, which will propagate to the GUI.
            try:
                self.serializer.importStackAsLocalDataset(info, sequence_axis)
                try:
                    self.topLevelOperator.DatasetGroup[laneIndex][
                        roleIndex].setValue(info)
                except DatasetConstraintError as ex:
                    # Give the user a chance to repair the problem.
                    filename = files[0] + "\n...\n" + files[-1]
                    return_val = [False]
                    self.parentApplet.busy = False  # required for possible fixing dialogs from DatasetConstraintError
                    self.parentApplet.appletStateUpdateRequested()
                    self.handleDatasetConstraintError(info, filename, ex,
                                                      roleIndex, laneIndex,
                                                      return_val)
                    if not return_val[0]:
                        # Not successfully repaired.  Roll back the changes and give up.
                        self.topLevelOperator.DatasetGroup.resize(
                            originalNumLanes)
            finally:
                self.parentApplet.busy = False
                self.parentApplet.appletStateUpdateRequested()

        req = Request(importStack)
        req.notify_finished(
            lambda result: self.showDataset(laneIndex, roleIndex))
        req.notify_failed(
            partial(self.handleFailedStackLoad, files, originalNumLanes))
        req.submit()
コード例 #24
0
    def testBasic(self):
        """
        Test the SimpleRequestCondition, which is like threading.Condition, but with a subset of the functionality.
        (See the docs for details.)
        """
        # num_workers = Request.global_thread_pool.num_workers
        # Request.reset_thread_pool(num_workers=1)
        N_ELEMENTS = 100

        # It's tempting to simply use threading.Condition here,
        #  but that doesn't quite work if the thread calling wait() is also a worker thread.
        # (threading.Condition uses threading.Lock() as it's 'waiter' lock, which blocks the entire worker.)
        # cond = threading.Condition( RequestLock() )
        cond = SimpleRequestCondition()

        produced = []
        consumed = []

        def wait_for_all():
            def f(i):
                time.sleep(0.2 * random.random())
                with cond:
                    produced.append(i)
                    cond.notify()

            reqs = []
            for i in range(N_ELEMENTS):
                req = Request(partial(f, i))
                reqs.append(req)

            for req in reqs:
                req.submit()

            _consumed = consumed
            with cond:
                while len(_consumed) < N_ELEMENTS:
                    while len(_consumed) == len(produced):
                        cond.wait()
                    logger.debug("copying {} elements".format(
                        len(produced) - len(consumed)))
                    _consumed += produced[len(_consumed):]

        # Force the request to run in a worker thread.
        # This should catch failures that can occur if the Condition's "waiter" lock isn't a request lock.
        req = Request(wait_for_all)
        req.submit()

        # Now block for completion
        req.wait()

        logger.debug("produced: {}".format(produced))
        logger.debug("consumed: {}".format(consumed))
        assert set(consumed) == set(
            range(N_ELEMENTS)
        ), "Expected set(range(N_ELEMENTS)), got {}".format(consumed)
コード例 #25
0
    def exportFinalSegmentation(self,
                                outputPath,
                                axisorder,
                                progressCallback=None):
        assert self.FinalSegmentation.ready(
        ), "Can't export yet: The final segmentation isn't ready!"

        logger.info("Starting Final Segmentation Export...")

        opTranspose = OpReorderAxes(parent=self)
        opTranspose.AxisOrder.setValue(axisorder)
        opTranspose.Input.connect(self.FinalSegmentation)

        f = h5py.File(outputPath, 'w')
        opExporter = OpH5WriterBigDataset(parent=self)
        opExporter.hdf5File.setValue(f)
        opExporter.hdf5Path.setValue('split_result')
        opExporter.Image.connect(opTranspose.Output)
        if progressCallback is not None:
            opExporter.progressSignal.subscribe(progressCallback)

        req = Request(partial(self._runExporter, opExporter))

        def cleanOps():
            opExporter.cleanUp()
            opTranspose.cleanUp()

        def handleFailed(exc, exc_info):
            cleanOps()
            f.close()
            import traceback
            traceback.print_tb(exc_info[2])
            msg = "Final Segmentation export FAILED due to the following error:\n{}".format(
                exc)
            logger.error(msg)

        def handleFinished(result):
            try:
                cleanOps()
                logger.info("FINISHED Final Segmentation Export")
            finally:
                f.close()

        def handleCancelled():
            cleanOps()
            f.close()
            logger.info("Final Segmentation export was cancelled!")

        req.notify_failed(handleFailed)
        req.notify_finished(handleFinished)
        req.notify_cancelled(handleCancelled)

        req.submit()
        return req  # Returned in case the user wants to cancel it.
コード例 #26
0
ファイル: testRequest.py プロジェクト: thatcher/lazyflow
        def someWork(depth, force=False, i=0):
            #print 'depth=', depth, 'i=', i
            if depth > 0 and (force or random.random() > 0.5):
                requests = []
                for i in range(maxBreadth):
                    req = Request(someWork, depth=depth - 1, i=i)
                    req.notify(writeToH5Py, index=(depth - 1, i), req=req)
                    requests.append(req)

                for r in requests:
                    r.wait()
コード例 #27
0
 def _update_rendering(self):
     """
     Override from the base class.
     """
     # This update has to be performed in a different thread to avoid a deadlock
     # (Because this function is running in the context of a dirty notification!)
     req = Request( self.__update_rendering )
     def handle_rendering_failure( exc, exc_info ):
         msg = "Exception raised during volume rendering update.  See traceack above.\n"
         log_exception( logger, msg, exc_info )
     req.notify_failed( handle_rendering_failure )
     req.submit()