def track(model, weights, solver='flow'): ''' solver may be flow or ilp ''' if solver == 'flow': import dpct return dpct.trackFlowBased(model, weights) else: try: import multiHypoTracking_with_cplex as mht except ImportError: try: import multiHypoTracking_with_gurobi as mht except ImportError: raise ImportError( "Could not find multi hypotheses tracking ilp solver") return mht.track(model, weights)
def runTracking(options, trackingGraph, weights=None): """ Track the given graph with the given weights, if None the weights will be loaded from a json file. **Returns** the tracking result dictionary """ getLogger().info("Run tracking...") if weights is None: getLogger().info("Loading weights from " + options.weight_json_filename) with open(options.weight_json_filename, 'r') as f: weights = json.load(f) # if withDivisions: # weights = {"weights" : [10, 10, 10, 500, 500]} # else: # weights = {"weights" : [10, 10, 500, 500]} else: getLogger().info("Using learned weights!") if options.use_flow_solver: import dpct result = dpct.trackFlowBased(trackingGraph.model, weights) else: try: import multiHypoTracking_with_cplex as mht except ImportError: try: import multiHypoTracking_with_gurobi as mht except ImportError: raise ImportError("No version of ILP solver found") result = mht.track(trackingGraph.model, weights) if options.result_json_filename is not None: writeToFormattedJSON(options.result_json_filename, result) return result
def run_pipeline(options, unknown): """ Run the complete tracking pipeline by invoking the different steps. Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run. **Params:** * `options`: the options of the tracking script as returned from argparse * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run. """ params = convertToDict(unknown) if options.do_extract_weights: logging.info("Extracting weights from ilastik project...") weights = hytra.core.ilastik_project_options.extractWeightDictFromIlastikProject(options.ilastik_tracking_project) else: with open(options.weight_filename, 'r') as f: weights = json.load(f) if options.do_create_graph: logging.info("Create hypotheses graph...") import hytra.core.probabilitygenerator as probabilitygenerator from hytra.core.ilastik_project_options import IlastikProjectOptions ilpOptions = IlastikProjectOptions() ilpOptions.labelImagePath = params[str('label-image-path')] ilpOptions.labelImageFilename = params[str('label-image-file')] ilpOptions.rawImagePath = params[str('raw-data-path')] ilpOptions.rawImageFilename = params[str('raw-data-file')] try: ilpOptions.rawImageAxes = params[str('raw-data-axes')] except: ilpOptions.rawImageAxes = 'txyzc' ilpOptions.sizeFilter = [int(params[str('min-size')]), 100000] if 'object-count-classifier-file' in params: ilpOptions.objectCountClassifierFilename = params[str('object-count-classifier-file')] else: ilpOptions.objectCountClassifierFilename = options.ilastik_tracking_project withDivisions = 'without-divisions' not in params if withDivisions: if 'division-classifier-file' in params: ilpOptions.divisionClassifierFilename = params[str('division-classifier-file')] else: ilpOptions.divisionClassifierFilename = options.ilastik_tracking_project else: ilpOptions.divisionClassifierFilename = None probGenerator = probabilitygenerator.IlpProbabilityGenerator(ilpOptions, pluginPaths=[str('../hytra/plugins')], useMultiprocessing=False) # if time_range is not None: # traxelstore.timeRange = time_range probGenerator.fillTraxels(usePgmlink=False) fieldOfView = constructFov(probGenerator.shape, probGenerator.timeRange[0], probGenerator.timeRange[1], [probGenerator.x_scale, probGenerator.y_scale, probGenerator.z_scale]) hypotheses_graph = IlastikHypothesesGraph( probabilityGenerator=probGenerator, timeRange=probGenerator.timeRange, maxNumObjects=int(params[str('max-number-objects')]), numNearestNeighbors=int(params[str('max-nearest-neighbors')]), fieldOfView=fieldOfView, withDivisions=withDivisions, divisionThreshold=0.1 ) withTracklets = True if withTracklets: hypotheses_graph = hypotheses_graph.generateTrackletGraph() hypotheses_graph.insertEnergies() trackingGraph = hypotheses_graph.toTrackingGraph() else: trackingGraph = JsonTrackingGraph(model_filename=options.model_filename) if options.do_convexify: logging.info("Convexifying graph energies...") trackingGraph.convexifyCosts() # get model out of trackingGraph model = trackingGraph.model if options.do_tracking: logging.info("Run tracking...") if options.solver == "flow-based": result = dpct.trackFlowBased(model, weights) elif options.solver == "ilp": try: import multiHypoTracking_with_cplex as mht except ImportError: try: import multiHypoTracking_with_gurobi as mht except ImportError: raise ImportError("Could not find multi hypotheses tracking ilp solver") result = mht.track(model, weights) hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result) if hypotheses_graph: # insert the solution into the hypotheses graph and from that deduce the lineages hypotheses_graph.insertSolution(result) hypotheses_graph.computeLineage() if options.do_merger_resolving: logging.info("Run merger resolving") trackingGraph = JsonTrackingGraph(model=model, result=result) merger_resolver = JsonMergerResolver( trackingGraph, ilpOptions.labelImageFilename, ilpOptions.labelImagePath, params[str('out-label-image-file')], ilpOptions.rawImageFilename, ilpOptions.rawImagePath, ilpOptions.rawImageAxes, [str('../hytra/plugins')], True) ilpOptions.labelImagePath = params[str('label-image-path')] ilpOptions.labelImageFilename = params[str('label-image-file')] ilpOptions.rawImagePath = params[str('raw-data-path')] ilpOptions.rawImageFilename = params[str('raw-data-file')] try: ilpOptions.rawImageAxes = params[str('raw-data-axes')] except: ilpOptions.rawImageAxes = 'txyzc' merger_resolver.run(None, None)
def track(self, time_range, x_range, y_range, z_range, size_range=(0, 100000), x_scale=1.0, y_scale=1.0, z_scale=1.0, maxDist=30, maxObj=2, divThreshold=0.5, avgSize=[0], withTracklets=False, sizeDependent=True, detWeight=10.0, divWeight=10.0, transWeight=10.0, withDivisions=True, withOpticalCorrection=True, withClassifierPrior=False, ndim=3, cplex_timeout=None, withMergerResolution=True, borderAwareWidth=0.0, withArmaCoordinates=True, appearance_cost=500, disappearance_cost=500, motionModelWeight=10.0, force_build_hypotheses_graph=False, max_nearest_neighbors=1, numFramesPerSplit=0, withBatchProcessing=False, solverName="Flow-based", progressWindow=None, progressVisitor=CommandLineProgressVisitor()): """ Main conservation tracking function. Runs tracking solver, generates hypotheses graph, and resolves mergers. """ self.progressWindow = progressWindow self.progressVisitor = progressVisitor if not self.Parameters.ready(): self.raiseException(self.progressWindow, "Parameter slot is not ready") # it is assumed that the self.Parameters object is changed only at this # place (ugly assumption). Therefore we can track any changes in the # parameters as done in the following lines: If the same value for the # key is already written in the parameters dictionary, the # paramters_changed dictionary will get a "False" entry for this key, # otherwise it is set to "True" parameters = self.Parameters.value parameters['maxDist'] = maxDist parameters['maxObj'] = maxObj parameters['divThreshold'] = divThreshold parameters['avgSize'] = avgSize parameters['withTracklets'] = withTracklets parameters['sizeDependent'] = sizeDependent parameters['detWeight'] = detWeight parameters['divWeight'] = divWeight parameters['transWeight'] = transWeight parameters['withDivisions'] = withDivisions parameters['withOpticalCorrection'] = withOpticalCorrection parameters['withClassifierPrior'] = withClassifierPrior parameters['withMergerResolution'] = withMergerResolution parameters['borderAwareWidth'] = borderAwareWidth parameters['withArmaCoordinates'] = withArmaCoordinates parameters['appearanceCost'] = appearance_cost parameters['disappearanceCost'] = disappearance_cost parameters['scales'] = [x_scale, y_scale, z_scale] parameters['time_range'] = [min(time_range), max(time_range)] parameters['x_range'] = x_range parameters['y_range'] = y_range parameters['z_range'] = z_range parameters['max_nearest_neighbors'] = max_nearest_neighbors parameters['numFramesPerSplit'] = numFramesPerSplit parameters['solver'] = str(solverName) # Set a size range with a minimum area equal to the max number of objects (since the GMM throws an error if we try to fit more gaussians than the number of pixels in the object) size_range = (max(maxObj, size_range[0]), size_range[1]) parameters['size_range'] = size_range if cplex_timeout: parameters['cplex_timeout'] = cplex_timeout else: parameters['cplex_timeout'] = '' cplex_timeout = float(1e75) self.Parameters.setValue(parameters, check_changed=False) if withClassifierPrior: if not self.DetectionProbabilities.ready() or len( self.DetectionProbabilities([0]).wait()[0]) == 0: self.raiseDatasetConstraintError( self.progressWindow, 'Tracking', 'Classifier not ready yet. Did you forget to train the Object Count Classifier?' ) if not self.NumLabels.ready() or self.NumLabels.value < (maxObj + 1): self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\ 'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\ 'one training example for each class.') if len(self.DetectionProbabilities( [0]).wait()[0][0]) < (maxObj + 1): self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\ 'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\ 'one training example for each class.') hypothesesGraph = self._createHypothesesGraph() hypothesesGraph.allowLengthOneTracks = True if withTracklets: hypothesesGraph = hypothesesGraph.generateTrackletGraph() hypothesesGraph.insertEnergies() trackingGraph = hypothesesGraph.toTrackingGraph() trackingGraph.convexifyCosts() model = trackingGraph.model model['settings']['allowLengthOneTracks'] = True detWeight = 10.0 # FIXME: Should we store this weight in the parameters slot? weights = trackingGraph.weightsListToDict([ transWeight, detWeight, divWeight, appearance_cost, disappearance_cost ]) stepStr = solverName + " tracking solver" self.progressVisitor.showState(stepStr) self.progressVisitor.showProgress(0) if solverName == 'Flow-based' and dpct: if numFramesPerSplit: # Run solver with frame splits (split, solve, and stitch video to improve running-time) from hytra.core.splittracking import SplitTracking result = SplitTracking.trackFlowBasedWithSplits( model, weights, numFramesPerSplit=numFramesPerSplit) else: # casting weights to float (raised TypeError on Windows before) weights['weights'] = [float(w) for w in weights['weights']] result = dpct.trackFlowBased(model, weights) elif solverName == 'ILP' and mht: result = mht.track(model, weights) else: raise ValueError("Invalid tracking solver selected") self.progressVisitor.showProgress(1.0) # Insert the solution into the hypotheses graph and from that deduce the lineages if hypothesesGraph: hypothesesGraph.insertSolution(result) # Merger resolution resolvedMergersDict = {} if withMergerResolution: stepStr = "Merger resolution" self.progressVisitor.showState(stepStr) resolvedMergersDict = self._resolveMergers(hypothesesGraph, model) # Set value of resolved mergers slot (Should be empty if mergers are disabled) self.ResolvedMergers.setValue(resolvedMergersDict, check_changed=False) # Computing tracking lineage IDs from within Hytra hypothesesGraph.computeLineage() if self.progressWindow is not None: self.progressWindow.onTrackDone() self.progressVisitor.showProgress(1.0) # Uncomment to export a hypothese graph diagram #logger.info("Exporting hypotheses graph diagram") #from hytra.util.hypothesesgraphdiagram import HypothesesGraphDiagram #hgv = HypothesesGraphDiagram(hypothesesGraph._graph, timeRange=(0, 10), fileName='HypothesesGraph.png' ) # Set value of hypotheses grap slot (use referenceTraxelGraph if using tracklets) hypothesesGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph self.HypothesesGraph.setValue(hypothesesGraph, check_changed=False) # Set all the output slots dirty (See execute() function) self.Output.setDirty() self.MergerOutput.setDirty() self.RelabeledImage.setDirty() return result
def run_pipeline(options, unknown): """ Run the complete tracking pipeline by invoking the scripts as subprocesses. Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run. **Params:** * `options`: the options of the tracking script as returned from argparse * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run. """ if options.do_ctc_groundtruth_conversion: logging.info("Convert CTC groundtruth to our format...") check_call(["python", os.path.abspath("ctc/ctc_gt_to_hdf5.py"), "--config", options.config_file]) if options.do_ctc_raw_data_conversion: logging.info("Convert CTC raw data to HDF5...") check_call(["python", os.path.abspath("ctc/stack_to_h5.py"), "--config", options.config_file]) if options.do_ctc_segmentation_conversion: logging.info("Convert CTC segmentation to HDF5...") check_call(["python", os.path.abspath("ctc/segmentation_to_hdf5.py"), "--config", options.config_file]) if options.do_train_transition_classifier: logging.info("Train transition classifier...") check_call(["python", os.path.abspath("train_transition_classifier.py"), "--config", options.config_file]) if options.do_extract_weights: logging.info("Extracting weights from ilastik project...") check_call(["python", os.path.abspath("tracking_ilp_to_weights.py"), "--config", options.config_file]) if options.do_create_graph: logging.info("Create hypotheses graph...") check_call(["python", os.path.abspath("hypotheses_graph_to_json.py"), "--config", options.config_file]) if options.do_convexify: logging.info("Convexifying graph energies...") check_call(["python", os.path.abspath("convexify_costs.py"), "--config", options.config_file]) if options.do_tracking: logging.info("Run tracking...") if options.tracking_executable is not None: check_call([options.tracking_executable, "-m", options.model_filename, "-w", options.weight_filename, "-o", options.result_filename]) else: try: import commentjson as json except ImportError: import json import hytra.core.jsongraph with open(options.model_filename, 'r') as f: model = json.load(f) with open(options.weight_filename, 'r') as f: weights = json.load(f) if options.solver == "flow-based": import dpct result = dpct.trackFlowBased(model, weights) elif options.solver == "ilp": try: import multiHypoTracking_with_cplex as mht except ImportError: try: import multiHypoTracking_with_gurobi as mht except ImportError: raise ImportError("Could not find multi hypotheses tracking ilp solver") result = mht.track(model, weights) hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result) extra_params = [] if options.do_merger_resolving: logging.info("Run merger resolving") check_call(["python", os.path.abspath("run_merger_resolving.py"), "--config", options.config_file]) for p in ["--out-graph-json-file", "--out-label-image-file", "--out-result-json-file"]: index = unknown.index(p) extra_params.append(p.replace('--out-', '--')) extra_params.append(unknown[index + 1]) if options.export_format is not None: logging.info("Convert result to {}...".format(options.export_format)) if options.export_format in ['ilastikH5', 'ctc']: check_call(["python", os.path.abspath("json_result_to_events.py"), "--config", options.config_file] + extra_params) if options.export_format == 'ctc': check_call(["python", os.path.abspath("ctc/hdf5_to_ctc.py"), "--config", options.config_file] + extra_params) elif options.export_format == 'labelimage': check_call(["python", os.path.abspath("json_result_to_labelimage.py"), "--config", options.config_file] + extra_params) elif options.export_format is not None: logging.error("Unknown export format chosen!") raise ValueError("Unknown export format chosen!")
def track(self, time_range, x_range, y_range, z_range, size_range=(0, 100000), x_scale=1.0, y_scale=1.0, z_scale=1.0, maxDist=30, maxObj=2, divThreshold=0.5, avgSize=[0], withTracklets=False, sizeDependent=True, divWeight=10.0, transWeight=10.0, withDivisions=True, withOpticalCorrection=True, withClassifierPrior=False, ndim=3, cplex_timeout=None, withMergerResolution=True, borderAwareWidth = 0.0, withArmaCoordinates = True, appearance_cost = 500, disappearance_cost = 500, motionModelWeight=10.0, force_build_hypotheses_graph = False, max_nearest_neighbors = 2, withBatchProcessing = False, solverName="Flow-based" ): """ Main conservation tracking function. Runs tracking solver, generates hypotheses graph, and resolves mergers. """ if not self.Parameters.ready(): raise Exception("Parameter slot is not ready") # it is assumed that the self.Parameters object is changed only at this # place (ugly assumption). Therefore we can track any changes in the # parameters as done in the following lines: If the same value for the # key is already written in the parameters dictionary, the # paramters_changed dictionary will get a "False" entry for this key, # otherwise it is set to "True" parameters = self.Parameters.value parameters['maxDist'] = maxDist parameters['maxObj'] = maxObj parameters['divThreshold'] = divThreshold parameters['avgSize'] = avgSize parameters['withTracklets'] = withTracklets parameters['sizeDependent'] = sizeDependent parameters['divWeight'] = divWeight parameters['transWeight'] = transWeight parameters['withDivisions'] = withDivisions parameters['withOpticalCorrection'] = withOpticalCorrection parameters['withClassifierPrior'] = withClassifierPrior parameters['withMergerResolution'] = withMergerResolution parameters['borderAwareWidth'] = borderAwareWidth parameters['withArmaCoordinates'] = withArmaCoordinates parameters['appearanceCost'] = appearance_cost parameters['disappearanceCost'] = disappearance_cost parameters['scales'] = [x_scale, y_scale, z_scale] parameters['time_range'] = [min(time_range), max(time_range)] parameters['x_range'] = x_range parameters['y_range'] = y_range parameters['z_range'] = z_range parameters['max_nearest_neighbors'] = max_nearest_neighbors parameters['solver'] = str(solverName) # Set a size range with a minimum area equal to the max number of objects (since the GMM throws an error if we try to fit more gaussians than the number of pixels in the object) size_range = (max(maxObj, size_range[0]), size_range[1]) parameters['size_range'] = size_range if cplex_timeout: parameters['cplex_timeout'] = cplex_timeout else: parameters['cplex_timeout'] = '' cplex_timeout = float(1e75) self.Parameters.setValue(parameters, check_changed=False) if withClassifierPrior: if not self.DetectionProbabilities.ready() or len(self.DetectionProbabilities([0]).wait()[0]) == 0: raise DatasetConstraintError('Tracking', 'Classifier not ready yet. Did you forget to train the Object Count Classifier?') if not self.NumLabels.ready() or self.NumLabels.value < (maxObj + 1): raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\ 'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\ 'one training example for each class.') if len(self.DetectionProbabilities([0]).wait()[0][0]) < (maxObj + 1): raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\ 'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\ 'one training example for each class.') hypothesesGraph = self._createHypothesesGraph() if withTracklets: hypothesesGraph = hypothesesGraph.generateTrackletGraph() hypothesesGraph.insertEnergies() trackingGraph = hypothesesGraph.toTrackingGraph() trackingGraph.convexifyCosts() model = trackingGraph.model detWeight = 10.0 # FIXME: Should we store this weight in the parameters slot? weights = trackingGraph.weightsListToDict([transWeight, detWeight, divWeight, appearance_cost, disappearance_cost]) if solverName == 'Flow-based' and dpct: result = dpct.trackFlowBased(model, weights) elif solverName == 'ILP' and mht: result = mht.track(model, weights) else: raise ValueError("Invalid tracking solver selected") # Insert the solution into the hypotheses graph and from that deduce the lineages if hypothesesGraph: hypothesesGraph.insertSolution(result) # Merger resolution resolvedMergersDict = {} if withMergerResolution: resolvedMergersDict = self._resolveMergers(hypothesesGraph, model) # Set value of resolved mergers slot (Should be empty if mergers are disabled) self.ResolvedMergers.setValue(resolvedMergersDict, check_changed=False) # Computing tracking lineage IDs from within Hytra logger.info("Computing hypotheses graph lineages") hypothesesGraph.computeLineage() # Uncomment to export a hypothese graph diagram #logger.info("Exporting hypotheses graph diagram") #from hytra.util.hypothesesgraphdiagram import HypothesesGraphDiagram #hgv = HypothesesGraphDiagram(hypothesesGraph._graph, timeRange=(0, 10), fileName='HypothesesGraph.png' ) # Set value of hypotheses grap slot (use referenceTraxelGraph if using tracklets) hypothesesGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph self.HypothesesGraph.setValue(hypothesesGraph, check_changed=False) # Set all the output slots dirty (See execute() function) self.Output.setDirty() self.MergerOutput.setDirty() self.RelabeledImage.setDirty() # Get events vector (only used when saving old h5 events file) events = self._getEventsVector(result, model) self.EventsVector.setValue(events, check_changed=False)
def run_pipeline(options, unknown): """ Run the complete tracking pipeline by invoking the different steps. Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run. **Params:** * `options`: the options of the tracking script as returned from argparse * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run. """ params = convertToDict(unknown) if options.do_extract_weights: logging.info("Extracting weights from ilastik project...") weights = hytra.core.ilastik_project_options.extractWeightDictFromIlastikProject( options.ilastik_tracking_project) else: with open(options.weight_filename, 'r') as f: weights = json.load(f) if options.do_create_graph: logging.info("Create hypotheses graph...") import hytra.core.probabilitygenerator as probabilitygenerator from hytra.core.ilastik_project_options import IlastikProjectOptions ilpOptions = IlastikProjectOptions() ilpOptions.labelImagePath = params[str('label-image-path')] ilpOptions.labelImageFilename = params[str('label-image-file')] ilpOptions.rawImagePath = params[str('raw-data-path')] ilpOptions.rawImageFilename = params[str('raw-data-file')] try: ilpOptions.rawImageAxes = params[str('raw-data-axes')] except: ilpOptions.rawImageAxes = 'txyzc' ilpOptions.sizeFilter = [int(params[str('min-size')]), 100000] if 'object-count-classifier-file' in params: ilpOptions.objectCountClassifierFilename = params[str( 'object-count-classifier-file')] else: ilpOptions.objectCountClassifierFilename = options.ilastik_tracking_project withDivisions = 'without-divisions' not in params if withDivisions: if 'division-classifier-file' in params: ilpOptions.divisionClassifierFilename = params[str( 'division-classifier-file')] else: ilpOptions.divisionClassifierFilename = options.ilastik_tracking_project else: ilpOptions.divisionClassifierFilename = None probGenerator = probabilitygenerator.IlpProbabilityGenerator( ilpOptions, pluginPaths=[str('../hytra/plugins')], useMultiprocessing=False) # if time_range is not None: # traxelstore.timeRange = time_range probGenerator.fillTraxels(usePgmlink=False) fieldOfView = constructFov( probGenerator.shape, probGenerator.timeRange[0], probGenerator.timeRange[1], [ probGenerator.x_scale, probGenerator.y_scale, probGenerator.z_scale ]) hypotheses_graph = IlastikHypothesesGraph( probabilityGenerator=probGenerator, timeRange=probGenerator.timeRange, maxNumObjects=int(params[str('max-number-objects')]), numNearestNeighbors=int(params[str('max-nearest-neighbors')]), fieldOfView=fieldOfView, withDivisions=withDivisions, divisionThreshold=0.1) withTracklets = True if withTracklets: hypotheses_graph = hypotheses_graph.generateTrackletGraph() hypotheses_graph.insertEnergies() trackingGraph = hypotheses_graph.toTrackingGraph() else: trackingGraph = JsonTrackingGraph( model_filename=options.model_filename) if options.do_convexify: logging.info("Convexifying graph energies...") trackingGraph.convexifyCosts() # get model out of trackingGraph model = trackingGraph.model if options.do_tracking: logging.info("Run tracking...") if options.solver == "flow-based": result = dpct.trackFlowBased(model, weights) elif options.solver == "ilp": try: import multiHypoTracking_with_cplex as mht except ImportError: try: import multiHypoTracking_with_gurobi as mht except ImportError: raise ImportError( "Could not find multi hypotheses tracking ilp solver") result = mht.track(model, weights) hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result) if hypotheses_graph: # insert the solution into the hypotheses graph and from that deduce the lineages hypotheses_graph.insertSolution(result) hypotheses_graph.computeLineage() if options.do_merger_resolving: logging.info("Run merger resolving") trackingGraph = JsonTrackingGraph(model=model, result=result) merger_resolver = JsonMergerResolver( trackingGraph, ilpOptions.labelImageFilename, ilpOptions.labelImagePath, params[str('out-label-image-file')], ilpOptions.rawImageFilename, ilpOptions.rawImagePath, ilpOptions.rawImageAxes, [str('../hytra/plugins')], True) ilpOptions.labelImagePath = params[str('label-image-path')] ilpOptions.labelImageFilename = params[str('label-image-file')] ilpOptions.rawImagePath = params[str('raw-data-path')] ilpOptions.rawImageFilename = params[str('raw-data-file')] try: ilpOptions.rawImageAxes = params[str('raw-data-axes')] except: ilpOptions.rawImageAxes = 'txyzc' merger_resolver.run(None, None)
def test_twoSegmentations(): # set up ConflictingSegmentsProbabilityGenerator ilpOptions = IlastikProjectOptions() ilpOptions.divisionClassifierPath = None ilpOptions.divisionClassifierFilename = None ilpOptions.rawImageFilename = 'tests/multiSegmentationHypothesesTestDataset/Raw.h5' ilpOptions.rawImagePath = 'exported_data' ilpOptions.rawImageAxes = 'txyzc' ilpOptions.labelImageFilename = 'tests/multiSegmentationHypothesesTestDataset/segmentation.h5' ilpOptions.objectCountClassifierFilename = 'tests/multiSegmentationHypothesesTestDataset/tracking.ilp' additionalLabelImageFilenames = ['tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5'] additionalLabelImagePaths = [ilpOptions.labelImagePath] probabilityGenerator = ConflictingSegmentsProbabilityGenerator( ilpOptions, additionalLabelImageFilenames, additionalLabelImagePaths, useMultiprocessing=False, verbose=False) probabilityGenerator.fillTraxels(usePgmlink=False) assert(len(probabilityGenerator.TraxelsPerFrame[0]) == 4) assert(len(probabilityGenerator.TraxelsPerFrame[1]) == 3) assert(len(probabilityGenerator.TraxelsPerFrame[2]) == 3) assert(len(probabilityGenerator.TraxelsPerFrame[3]) == 4) filenamesPerTraxel = [t.segmentationFilename for t in probabilityGenerator.TraxelsPerFrame[3].values()] idsPerTraxel = [t.idInSegmentation for t in probabilityGenerator.TraxelsPerFrame[3].values()] assert(idsPerTraxel.count(1) == 2) assert(idsPerTraxel.count(2) == 2) assert(filenamesPerTraxel.count('tests/multiSegmentationHypothesesTestDataset/segmentation.h5') == 2) assert(filenamesPerTraxel.count('tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5') == 2) # build hypotheses graph, check that conflicting traxels are properly detected fieldOfView = constructFov(probabilityGenerator.shape, probabilityGenerator.timeRange[0], probabilityGenerator.timeRange[1], [probabilityGenerator.x_scale, probabilityGenerator.y_scale, probabilityGenerator.z_scale]) hypotheses_graph = IlastikHypothesesGraph( probabilityGenerator=probabilityGenerator, timeRange=probabilityGenerator.timeRange, maxNumObjects=1, numNearestNeighbors=2, fieldOfView=fieldOfView, withDivisions=False, divisionThreshold=0.1 ) assert(hypotheses_graph.countNodes() == 14) assert(hypotheses_graph.countArcs() == 23) assert(hypotheses_graph._graph.node[(0, 1)]['traxel'].conflictingTraxelIds == [3]) assert(hypotheses_graph._graph.node[(0, 3)]['traxel'].conflictingTraxelIds == [1]) assert(hypotheses_graph._graph.node[(0, 2)]['traxel'].conflictingTraxelIds == [4]) assert(hypotheses_graph._graph.node[(0, 4)]['traxel'].conflictingTraxelIds == [2]) assert(hypotheses_graph._graph.node[(1, 1)]['traxel'].conflictingTraxelIds == [2, 3]) assert(hypotheses_graph._graph.node[(1, 2)]['traxel'].conflictingTraxelIds == [1]) assert(hypotheses_graph._graph.node[(1, 3)]['traxel'].conflictingTraxelIds == [1]) # track, but check that the right exclusion constraints are present hypotheses_graph.insertEnergies() trackingGraph = hypotheses_graph.toTrackingGraph() assert(len(trackingGraph.model['exclusions']) == 8) for exclusionSet in trackingGraph.model['exclusions']: assert(len(exclusionSet) == 2) # use multiHypoTracking, insert exclusion constraints! if mht is not None: result = mht.track(trackingGraph.model, {"weights": [10, 10, 500, 500]}) else: return # standard dpct cannot handle exclusion constraints yet result = dpct.trackFlowBased(trackingGraph.model, {"weights": [10, 10, 500, 500]}) hypotheses_graph.insertSolution(result) # hypotheses_graph.computeLineage() numActivePerFrame = {} for node in hypotheses_graph.nodeIterator(): timeframe = node[0] if 'value' in hypotheses_graph._graph.node[node]: value = hypotheses_graph._graph.node[node]['value'] else: value = 0 numActivePerFrame.setdefault(timeframe, []).append(value) for _, v in numActivePerFrame.iteritems(): assert(sum(v) == 2) edgeFlow = 0 for edge in hypotheses_graph.arcIterator(): if 'value' in hypotheses_graph._graph.edge[edge[0]][edge[1]]: edgeFlow += hypotheses_graph._graph.edge[edge[0]][edge[1]]['value'] assert(edgeFlow == 6)
def test_twoSegmentations(): # set up ConflictingSegmentsProbabilityGenerator ilpOptions = IlastikProjectOptions() ilpOptions.divisionClassifierPath = None ilpOptions.divisionClassifierFilename = None ilpOptions.rawImageFilename = 'tests/multiSegmentationHypothesesTestDataset/Raw.h5' ilpOptions.rawImagePath = 'exported_data' ilpOptions.rawImageAxes = 'txyzc' ilpOptions.labelImageFilename = 'tests/multiSegmentationHypothesesTestDataset/segmentation.h5' ilpOptions.objectCountClassifierFilename = 'tests/multiSegmentationHypothesesTestDataset/tracking.ilp' additionalLabelImageFilenames = [ 'tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5' ] additionalLabelImagePaths = [ilpOptions.labelImagePath] probabilityGenerator = ConflictingSegmentsProbabilityGenerator( ilpOptions, additionalLabelImageFilenames, additionalLabelImagePaths, useMultiprocessing=False, verbose=False) probabilityGenerator.fillTraxels(usePgmlink=False) assert (len(probabilityGenerator.TraxelsPerFrame[0]) == 4) assert (len(probabilityGenerator.TraxelsPerFrame[1]) == 3) assert (len(probabilityGenerator.TraxelsPerFrame[2]) == 3) assert (len(probabilityGenerator.TraxelsPerFrame[3]) == 4) filenamesPerTraxel = [ t.segmentationFilename for t in probabilityGenerator.TraxelsPerFrame[3].values() ] idsPerTraxel = [ t.idInSegmentation for t in probabilityGenerator.TraxelsPerFrame[3].values() ] assert (idsPerTraxel.count(1) == 2) assert (idsPerTraxel.count(2) == 2) assert (filenamesPerTraxel.count( 'tests/multiSegmentationHypothesesTestDataset/segmentation.h5') == 2) assert (filenamesPerTraxel.count( 'tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5') == 2 ) # build hypotheses graph, check that conflicting traxels are properly detected fieldOfView = constructFov( probabilityGenerator.shape, probabilityGenerator.timeRange[0], probabilityGenerator.timeRange[1], [ probabilityGenerator.x_scale, probabilityGenerator.y_scale, probabilityGenerator.z_scale ]) hypotheses_graph = IlastikHypothesesGraph( probabilityGenerator=probabilityGenerator, timeRange=probabilityGenerator.timeRange, maxNumObjects=1, numNearestNeighbors=2, fieldOfView=fieldOfView, withDivisions=False, divisionThreshold=0.1) assert (hypotheses_graph.countNodes() == 14) assert (hypotheses_graph.countArcs() == 23) assert (hypotheses_graph._graph.node[( 0, 1)]['traxel'].conflictingTraxelIds == [3]) assert (hypotheses_graph._graph.node[( 0, 3)]['traxel'].conflictingTraxelIds == [1]) assert (hypotheses_graph._graph.node[( 0, 2)]['traxel'].conflictingTraxelIds == [4]) assert (hypotheses_graph._graph.node[( 0, 4)]['traxel'].conflictingTraxelIds == [2]) assert (hypotheses_graph._graph.node[( 1, 1)]['traxel'].conflictingTraxelIds == [2, 3]) assert (hypotheses_graph._graph.node[( 1, 2)]['traxel'].conflictingTraxelIds == [1]) assert (hypotheses_graph._graph.node[( 1, 3)]['traxel'].conflictingTraxelIds == [1]) # track, but check that the right exclusion constraints are present hypotheses_graph.insertEnergies() trackingGraph = hypotheses_graph.toTrackingGraph() assert (len(trackingGraph.model['exclusions']) == 8) for exclusionSet in trackingGraph.model['exclusions']: assert (len(exclusionSet) == 2) # use multiHypoTracking, insert exclusion constraints! if mht is not None: result = mht.track(trackingGraph.model, {"weights": [10, 10, 500, 500]}) else: result = dpct.trackFlowBased(trackingGraph.model, {"weights": [10, 10, 500, 500]}) hypotheses_graph.insertSolution(result) # hypotheses_graph.computeLineage() numActivePerFrame = {} for node in hypotheses_graph.nodeIterator(): timeframe = node[0] if 'value' in hypotheses_graph._graph.node[node]: value = hypotheses_graph._graph.node[node]['value'] else: value = 0 numActivePerFrame.setdefault(timeframe, []).append(value) for _, v in numActivePerFrame.iteritems(): assert (sum(v) == 2) edgeFlow = 0 for edge in hypotheses_graph.arcIterator(): if 'value' in hypotheses_graph._graph.edge[edge[0]][edge[1]]: edgeFlow += hypotheses_graph._graph.edge[edge[0]][edge[1]]['value'] assert (edgeFlow == 6)