Beispiel #1
0
def track(model, weights, solver='flow'):
    ''' solver may be flow or ilp '''
    if solver == 'flow':
        import dpct
        return dpct.trackFlowBased(model, weights)
    else:
        try:
            import multiHypoTracking_with_cplex as mht
        except ImportError:
            try:
                import multiHypoTracking_with_gurobi as mht
            except ImportError:
                raise ImportError(
                    "Could not find multi hypotheses tracking ilp solver")
        return mht.track(model, weights)
Beispiel #2
0
def runTracking(options, trackingGraph, weights=None):
    """
    Track the given graph with the given weights, if None the weights will be loaded from a json file.
    **Returns** the tracking result dictionary
    """

    getLogger().info("Run tracking...")
    if weights is None:
        getLogger().info("Loading weights from " +
                         options.weight_json_filename)
        with open(options.weight_json_filename, 'r') as f:
            weights = json.load(f)

        # if withDivisions:
        #     weights = {"weights" : [10, 10, 10, 500, 500]}
        # else:
        #     weights = {"weights" : [10, 10, 500, 500]}
    else:
        getLogger().info("Using learned weights!")

    if options.use_flow_solver:
        import dpct
        result = dpct.trackFlowBased(trackingGraph.model, weights)
    else:
        try:
            import multiHypoTracking_with_cplex as mht
        except ImportError:
            try:
                import multiHypoTracking_with_gurobi as mht
            except ImportError:
                raise ImportError("No version of ILP solver found")
        result = mht.track(trackingGraph.model, weights)

    if options.result_json_filename is not None:
        writeToFormattedJSON(options.result_json_filename, result)

    return result
def runTracking(options, trackingGraph, weights=None):
    """
    Track the given graph with the given weights, if None the weights will be loaded from a json file.
    **Returns** the tracking result dictionary
    """

    getLogger().info("Run tracking...")
    if weights is None:
        getLogger().info("Loading weights from " + options.weight_json_filename)
        with open(options.weight_json_filename, 'r') as f:
            weights = json.load(f)

        # if withDivisions:
        #     weights = {"weights" : [10, 10, 10, 500, 500]}
        # else:
        #     weights = {"weights" : [10, 10, 500, 500]}
    else:
        getLogger().info("Using learned weights!")

    if options.use_flow_solver:
        import dpct
        result = dpct.trackFlowBased(trackingGraph.model, weights)
    else:
        try:
            import multiHypoTracking_with_cplex as mht
        except ImportError:
            try:
                import multiHypoTracking_with_gurobi as mht
            except ImportError:
                raise ImportError("No version of ILP solver found")
        result = mht.track(trackingGraph.model, weights)
    
    if options.result_json_filename is not None:
        writeToFormattedJSON(options.result_json_filename, result)

    return result
def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the different steps.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    params = convertToDict(unknown)
    
    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        weights = hytra.core.ilastik_project_options.extractWeightDictFromIlastikProject(options.ilastik_tracking_project)
    else:
        with open(options.weight_filename, 'r') as f:
            weights = json.load(f)

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")

        import hytra.core.probabilitygenerator as probabilitygenerator
        from hytra.core.ilastik_project_options import IlastikProjectOptions
        ilpOptions = IlastikProjectOptions()
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'

        ilpOptions.sizeFilter = [int(params[str('min-size')]), 100000]

        if 'object-count-classifier-file' in params:
            ilpOptions.objectCountClassifierFilename = params[str('object-count-classifier-file')]
        else:
            ilpOptions.objectCountClassifierFilename = options.ilastik_tracking_project

        withDivisions = 'without-divisions' not in params
        if withDivisions:
            if 'division-classifier-file' in params:
                ilpOptions.divisionClassifierFilename = params[str('division-classifier-file')]
            else:
                ilpOptions.divisionClassifierFilename = options.ilastik_tracking_project
        else:
            ilpOptions.divisionClassifierFilename = None

        probGenerator = probabilitygenerator.IlpProbabilityGenerator(ilpOptions, 
                                              pluginPaths=[str('../hytra/plugins')],
                                              useMultiprocessing=False)

        # if time_range is not None:
        #     traxelstore.timeRange = time_range

        probGenerator.fillTraxels(usePgmlink=False)
        fieldOfView = constructFov(probGenerator.shape,
                                   probGenerator.timeRange[0],
                                   probGenerator.timeRange[1],
                                   [probGenerator.x_scale,
                                   probGenerator.y_scale,
                                   probGenerator.z_scale])

        hypotheses_graph = IlastikHypothesesGraph(
            probabilityGenerator=probGenerator,
            timeRange=probGenerator.timeRange,
            maxNumObjects=int(params[str('max-number-objects')]),
            numNearestNeighbors=int(params[str('max-nearest-neighbors')]),
            fieldOfView=fieldOfView,
            withDivisions=withDivisions,
            divisionThreshold=0.1
        )

        withTracklets = True
        if withTracklets:
            hypotheses_graph = hypotheses_graph.generateTrackletGraph()

        hypotheses_graph.insertEnergies()
        trackingGraph = hypotheses_graph.toTrackingGraph()
    else:
        trackingGraph = JsonTrackingGraph(model_filename=options.model_filename)

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        trackingGraph.convexifyCosts()

    # get model out of trackingGraph
    model = trackingGraph.model

    if options.do_tracking:
        logging.info("Run tracking...")
        if options.solver == "flow-based":
            result = dpct.trackFlowBased(model, weights)
        elif options.solver == "ilp":
            try:
                import multiHypoTracking_with_cplex as mht
            except ImportError:
                try:
                    import multiHypoTracking_with_gurobi as mht
                except ImportError:
                    raise ImportError("Could not find multi hypotheses tracking ilp solver")
            result = mht.track(model, weights)
            
        hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result)
        
        if hypotheses_graph:
            # insert the solution into the hypotheses graph and from that deduce the lineages
            hypotheses_graph.insertSolution(result)
            hypotheses_graph.computeLineage()

    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        trackingGraph = JsonTrackingGraph(model=model, result=result)
        merger_resolver = JsonMergerResolver(
            trackingGraph,
            ilpOptions.labelImageFilename,
            ilpOptions.labelImagePath,
            params[str('out-label-image-file')],
            ilpOptions.rawImageFilename,
            ilpOptions.rawImagePath,
            ilpOptions.rawImageAxes,
            [str('../hytra/plugins')],
            True)
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'
        merger_resolver.run(None,  None)
    def track(self,
              time_range,
              x_range,
              y_range,
              z_range,
              size_range=(0, 100000),
              x_scale=1.0,
              y_scale=1.0,
              z_scale=1.0,
              maxDist=30,
              maxObj=2,
              divThreshold=0.5,
              avgSize=[0],
              withTracklets=False,
              sizeDependent=True,
              detWeight=10.0,
              divWeight=10.0,
              transWeight=10.0,
              withDivisions=True,
              withOpticalCorrection=True,
              withClassifierPrior=False,
              ndim=3,
              cplex_timeout=None,
              withMergerResolution=True,
              borderAwareWidth=0.0,
              withArmaCoordinates=True,
              appearance_cost=500,
              disappearance_cost=500,
              motionModelWeight=10.0,
              force_build_hypotheses_graph=False,
              max_nearest_neighbors=1,
              numFramesPerSplit=0,
              withBatchProcessing=False,
              solverName="Flow-based",
              progressWindow=None,
              progressVisitor=CommandLineProgressVisitor()):
        """
        Main conservation tracking function. Runs tracking solver, generates hypotheses graph, and resolves mergers.
        """

        self.progressWindow = progressWindow
        self.progressVisitor = progressVisitor

        if not self.Parameters.ready():
            self.raiseException(self.progressWindow,
                                "Parameter slot is not ready")

        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['detWeight'] = detWeight
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost
        parameters['scales'] = [x_scale, y_scale, z_scale]
        parameters['time_range'] = [min(time_range), max(time_range)]
        parameters['x_range'] = x_range
        parameters['y_range'] = y_range
        parameters['z_range'] = z_range
        parameters['max_nearest_neighbors'] = max_nearest_neighbors
        parameters['numFramesPerSplit'] = numFramesPerSplit
        parameters['solver'] = str(solverName)

        # Set a size range with a minimum area equal to the max number of objects (since the GMM throws an error if we try to fit more gaussians than the number of pixels in the object)
        size_range = (max(maxObj, size_range[0]), size_range[1])
        parameters['size_range'] = size_range

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)

        self.Parameters.setValue(parameters, check_changed=False)

        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(
                    self.DetectionProbabilities([0]).wait()[0]) == 0:
                self.raiseDatasetConstraintError(
                    self.progressWindow, 'Tracking',
                    'Classifier not ready yet. Did you forget to train the Object Count Classifier?'
                )
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj +
                                                                     1):
                self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities(
                [0]).wait()[0][0]) < (maxObj + 1):
                self.raiseDatasetConstraintError(self.progressWindow, 'Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')

        hypothesesGraph = self._createHypothesesGraph()
        hypothesesGraph.allowLengthOneTracks = True

        if withTracklets:
            hypothesesGraph = hypothesesGraph.generateTrackletGraph()

        hypothesesGraph.insertEnergies()
        trackingGraph = hypothesesGraph.toTrackingGraph()
        trackingGraph.convexifyCosts()
        model = trackingGraph.model
        model['settings']['allowLengthOneTracks'] = True

        detWeight = 10.0  # FIXME: Should we store this weight in the parameters slot?
        weights = trackingGraph.weightsListToDict([
            transWeight, detWeight, divWeight, appearance_cost,
            disappearance_cost
        ])

        stepStr = solverName + " tracking solver"
        self.progressVisitor.showState(stepStr)
        self.progressVisitor.showProgress(0)

        if solverName == 'Flow-based' and dpct:
            if numFramesPerSplit:
                # Run solver with frame splits (split, solve, and stitch video to improve running-time)
                from hytra.core.splittracking import SplitTracking
                result = SplitTracking.trackFlowBasedWithSplits(
                    model, weights, numFramesPerSplit=numFramesPerSplit)
            else:
                # casting weights to float (raised TypeError on Windows before)
                weights['weights'] = [float(w) for w in weights['weights']]
                result = dpct.trackFlowBased(model, weights)

        elif solverName == 'ILP' and mht:
            result = mht.track(model, weights)
        else:
            raise ValueError("Invalid tracking solver selected")

        self.progressVisitor.showProgress(1.0)
        # Insert the solution into the hypotheses graph and from that deduce the lineages
        if hypothesesGraph:
            hypothesesGraph.insertSolution(result)

        # Merger resolution
        resolvedMergersDict = {}
        if withMergerResolution:
            stepStr = "Merger resolution"
            self.progressVisitor.showState(stepStr)
            resolvedMergersDict = self._resolveMergers(hypothesesGraph, model)

        # Set value of resolved mergers slot (Should be empty if mergers are disabled)
        self.ResolvedMergers.setValue(resolvedMergersDict, check_changed=False)

        # Computing tracking lineage IDs from within Hytra
        hypothesesGraph.computeLineage()

        if self.progressWindow is not None:
            self.progressWindow.onTrackDone()
        self.progressVisitor.showProgress(1.0)
        # Uncomment to export a hypothese graph diagram
        #logger.info("Exporting hypotheses graph diagram")
        #from hytra.util.hypothesesgraphdiagram import HypothesesGraphDiagram
        #hgv = HypothesesGraphDiagram(hypothesesGraph._graph, timeRange=(0, 10), fileName='HypothesesGraph.png' )

        # Set value of hypotheses grap slot (use referenceTraxelGraph if using tracklets)
        hypothesesGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph
        self.HypothesesGraph.setValue(hypothesesGraph, check_changed=False)

        # Set all the output slots dirty (See execute() function)
        self.Output.setDirty()
        self.MergerOutput.setDirty()
        self.RelabeledImage.setDirty()

        return result
Beispiel #6
0
    def trackFlowBasedWithSplits(model, weights, numFramesPerSplit, numThreads=None, withMergerResolver=None):   
        '''
        Splits video and runs tracking separately for each sub-section, followed by stitching together the results.
        '''     
        logging.basicConfig(level=logging.INFO)

        _ , uuidToTraxelMap = hytra.core.jsongraph.getMappingsBetweenUUIDsAndTraxels(model)
        
        detectionTimestepTuples = [(timestepIdTuple, entry) for entry in model['segmentationHypotheses'] for timestepIdTuple in uuidToTraxelMap[int(entry['id'])]]
        detectionsPerTimestep = {}
        for timestep_id, detection in detectionTimestepTuples:
            detectionsPerTimestep.setdefault(int(timestep_id[0]), []).append(detection)
            
        firstFrame = min(detectionsPerTimestep.keys())
        lastFrame = max(detectionsPerTimestep.keys())

        # Run tracking on full video if he have less splits than 2
        if (lastFrame - firstFrame) <= numFramesPerSplit*2:
            _getLogger().info("WARNING: Running flow-based tracking without splits")
            if withMergerResolver:
                return dpct.trackMaxFlow(model, weights)
            else:
                return dpct.trackFlowBased(model, weights)

        numSplits = (lastFrame - firstFrame) // numFramesPerSplit 
    
        nonSingletonCostsPerFrame = []
        detectionsById = {}
        linksByIdTuple = {}
    
        for t in detectionsPerTimestep.keys():
            nonSingletonCosts = []
            for d in detectionsPerTimestep[t]:
                d['nid'] = uuidToTraxelMap[d['id']][0]
                detectionsById[d['id']] = d
                f = d['features'][:]
                del f[1]
                nonSingletonCosts.extend(f)
            nonSingletonCostsPerFrame.append(min(nonSingletonCosts)[0])
    
        for l in model['linkingHypotheses']:
            linksByIdTuple[(l['src'], l['dest'])] = l
    
        # create a list of the sum of 2 neighboring elements (has len = len(nonSingletonCostsPerFrame) - 1)
        nonSingletonCostsPerFrameGap = [i + j for i, j in zip(nonSingletonCostsPerFrame[:-1], nonSingletonCostsPerFrame[1:])]
    
        # Check that number of frames per split is more than 2
        assert numFramesPerSplit > 2 , "The number of splits is too large; submodel has less than 2 frames"
    
        # find points where TWO consecutive frames have a low merger score together!
        # find split points in a range of 10 frames before/after the desired split location
        # TODO: also consider divisions!
        splitPoints = []
        border = 10 #TODO: Adjust border according to size of video and number of prames per split
        if numFramesPerSplit < border*2:
            border = 1
    
        for s in range(1, numSplits):
            desiredSplitPoint = s * numFramesPerSplit
            subrange = np.array(nonSingletonCostsPerFrameGap[desiredSplitPoint - border : desiredSplitPoint + border])
            splitPoints.append(desiredSplitPoint - border + np.argmax(subrange))
  
        _getLogger().info("Going to split hypotheses graph at frames {}".format(splitPoints))

        # split graph
        def getSubmodel(startTime, endTime):
            # for each split: take detections from detectionsPerTimestep, store a list of the uuids, then add links by filtering for the uuids
            # also make sure that appearance/disappearance costs are zero at the beginning/end of each submodel
    
            # TODO: tracklets that reach over the gap must be split into two!
            submodel = {}
            segmentationHypotheses = []
            for f in range(startTime, endTime):
                if f == startTime:
                    for d in detectionsPerTimestep[f]:
                        newD = copy.deepcopy(d)
                        newD['appearanceFeatures'] = [[0.0000001 * sum(range(i+1))] for i in range(len(d['features']))]
                        segmentationHypotheses.append(newD)
                elif f+1 == endTime:
                    for d in detectionsPerTimestep[f]:
                        newD = copy.deepcopy(d)
                        newD['disappearanceFeatures'] = [[0.0000001 * sum(range(i+1))] for i in range(len(d['features']))]
                        segmentationHypotheses.append(newD)
                else:
                    segmentationHypotheses.extend(detectionsPerTimestep[f])
    
            submodel['segmentationHypotheses'] = segmentationHypotheses
            uuidsInSubmodel = set([d['id'] for f in range(startTime, endTime) for d in detectionsPerTimestep[f]]) # TODO: This line can be optimized 
            submodel['linkingHypotheses'] = [l for l in model['linkingHypotheses'] if (l['src'] in uuidsInSubmodel) and (l['dest'] in uuidsInSubmodel)]
            submodel['divisionHypotheses'] = []
            submodel['settings'] = model['settings']
            return submodel
    
        submodels = []
        lastSplit = 0
        splitPoints.append(lastFrame) # so that we get the last split as well
        for splitPoint in splitPoints:
            _getLogger().info("Creating submodel from t={} to t={}...".format(lastSplit, splitPoint + 1))
            submodels.append(getSubmodel(lastSplit, splitPoint + 1))
            _getLogger().info("\t contains {} nodes and {} edges".format(len(submodels[-1]['segmentationHypotheses']), len(submodels[-1]['linkingHypotheses'])))
            lastSplit = splitPoint + 1
            
        # Will store submodel results
        results = []
        
        if numThreads:
            _getLogger().info("Using {} threads for solver".format(numThreads))
        
            # dummy replicates the multiprocessing API using the threading module
            # this is necessary to to prevent multiprocessing pickling error
            # see: http://stackoverflow.com/questions/8804830/python-multiprocessing-pickling-error
            from multiprocessing.dummy import Pool 
        
            # callback function
            def result_callback(result):
                results.append(result)
            
            pool = Pool(numThreads)
            for i, submodel in enumerate(submodels):
                # TODO: be robust against changes of num weights!
                # TODO: release GIL in tracking python wrappers to allow parallel solving!!
                _getLogger().info("Tracking submodel {}/{}".format(i, len(submodels)))
    
                if withMergerResolver:
                    pool.apply_async(dpct.trackMaxFlow, args=(submodel, weights), callback=result_callback)
                else:
                    pool.apply_async(dpct.trackFlowBased, args=(submodel, weights), callback=result_callback)
                
            # Close pool and run async tasks    
            pool.close()
            pool.join()
        
        else:
            for i, submodel in enumerate(submodels):
                # TODO: be robust against changes of num weights!
                # TODO: release GIL in tracking python wrappers to allow parallel solving!!
                _getLogger().info("Tracking submodel {}/{}".format(i, len(submodels)))
                
                if withMergerResolver:
                    results.append(dpct.trackMaxFlow(submodel, weights))
                else:
                    results.append(dpct.trackFlowBased(submodel, weights))
            
        # merge results
        # make detection weight higher, or accumulate energy over tracks (but what to do with mergers then?),
        # or contract everything where source-node, link and destination have the same number of objects?
        # We choose the last option.
        _getLogger().info("Setting up model for stitching")
        tracklets = []
        links = []
        stitchingModel = {'segmentationHypotheses': tracklets, 'linkingHypotheses': links, 'divisionHypotheses' : [], 'settings' : model['settings']}
        nodeIdRemapping = {}
        valuePerDetection = {}
    
        modelIdx = 0
        for submodel, result in zip(submodels, results):            
            divisionsPerDetection = {}
            
            # find connected components of graph where edges are only inserted if the value of the nodes agrees with the value along the link
            g = nx.Graph()
            for d in result['detectionResults']:
                valuePerDetection[d['id']] = d['value']
                if 'divisionValue' in d and d['divisionValue']:
                    divisionsPerDetection[d['id']] = True
                else:
                    divisionsPerDetection[d['id']] = False
                g.add_node(d['id'])
    
            for l in result['linkingResults']:
                s, d = l['src'], l['dest']
                if divisionsPerDetection[s] is False and valuePerDetection[s] == l['value'] and valuePerDetection[d] == l['value']:
                    g.add_edge(s, d)
    
            # for every connected component, insert a node into the stitching graph
            connectedComponents = nx.connected_components(g)
            _getLogger().info("Contracting tracks of submodel {}/{}".format(modelIdx, len(submodels)))
    
            for c in connectedComponents:
                # sum over features of dets + links
                linkFeatures = [link['features'] for idTuple, link in linksByIdTuple.items() if idTuple[0] in c and idTuple[1] in c]
                detFeatures = [detectionsById[i]['features'] for i in c]
                accumulatedFeatures = np.sum([hytra.core.jsongraph.delistify(f) for f in linkFeatures + detFeatures], axis=0)
    
                # Get tracklet ids from nodes at start and end times of tracklets
                minTime = None
                maxTime = None
    
                for n in c:
                    if maxTime is None or detectionsById[n]['nid'][0] > maxTime:
                        maxTime = detectionsById[n]['nid'][0]
                        maxTrackletId = n
                        
                    if minTime is None or detectionsById[n]['nid'][0] < minTime:
                        minTime = detectionsById[n]['nid'][0]
                        minTrackletId = n 
                    
                contractedNode = {
                    'id' : minTrackletId, 
                    'contains' : c,
                    'nid' : detectionsById[minTrackletId]['nid'],
                    'minUid' : minTrackletId,
                    'maxUid' : maxTrackletId,
                    'features' : hytra.core.jsongraph.listify(accumulatedFeatures)#,
                }
                                
                # Add appearance/disappearance features of endpoints
                # TODO: Check if this is correct for the case of mergers
                if 'appearanceFeatures' in detectionsById[minTrackletId]:#min(c)]:
                    contractedNode['appearanceFeatures'] = detectionsById[minTrackletId]['appearanceFeatures']
                if 'disappearanceFeatures' in detectionsById[maxTrackletId]:#max(c)
                    contractedNode['disappearanceFeatures'] = detectionsById[maxTrackletId]['disappearanceFeatures']
                            
                if 'divisionFeatures' in detectionsById[max(c)]:
                    contractedNode['divisionFeatures'] = detectionsById[max(c)]['divisionFeatures']
                tracklets.append(contractedNode)
    
                for n in c:
                    nodeIdRemapping[n] = minTrackletId
    
            # add the remaining links to the stitching graph with adjusted source and destination
            for l in result['linkingResults']:
                s, d = l['src'], l['dest']
                if l['value'] > 0 and (valuePerDetection[s] != l['value'] or valuePerDetection[d] != l['value'] or divisionsPerDetection[s]):
                    newL = {
                        'src' : nodeIdRemapping[s],
                        'dest' : nodeIdRemapping[d],
                        'features' : linksByIdTuple[(s, d)]['features']
                    }
    
                    links.append(newL)
            modelIdx += 1
        _getLogger().info("\tgot {} links from within the submodels".format(len(links)))
    
        # insert all edges crossing the splits that connect active detections
        detectionIdsPerTimestep = dict( [(k, [d['id'] for d in v]) for k, v in detectionsPerTimestep.items()])
        for splitPoint in splitPoints[:-1]:
            for idTuple, link in linksByIdTuple.items():
                s, d = idTuple
                if s in detectionIdsPerTimestep[splitPoint] and d in detectionIdsPerTimestep[splitPoint + 1] and valuePerDetection[s] > 0 and valuePerDetection[d] > 0:
                    newL = copy.deepcopy(link)
                    newL['src'] = nodeIdRemapping[s]
                    newL['dest'] = nodeIdRemapping[d]
                    links.append(newL)
    
        # Running solver for compressed tracklet model
        _getLogger().info("\t contains {} nodes and {} edges".format(len(tracklets), len(links)))
        if withMergerResolver:
            stitchingResult = dpct.trackMaxFlow(stitchingModel, weights)
        else:
            stitchingResult = dpct.trackFlowBased(stitchingModel, weights)
        
        # Extracting full result
        trackletsById = dict([(t['id'], t) for t in tracklets])
        fullResult = {'detectionResults' : [], 'linkingResults' : [], 'divisionResults' : []}
        
        for dr in stitchingResult['detectionResults']:
            v = dr['value'] 
            t = trackletsById[dr['id']]
            if v > 0:
                for originalUuid in t['contains']:
                    fullResult['detectionResults'].append({'id': originalUuid, 'value': v})
                for s, d in linksByIdTuple.keys():
                    if s in t['contains'] and d in t['contains']:
                        fullResult['linkingResults'].append({'src': s, 'dest' : d, 'value': v})
            else:
                _getLogger().warning("Skipped detection {} while stitching!".format(t))
    
        for lr in stitchingResult['linkingResults']:
            v = lr['value'] 
            st = trackletsById[lr['src']]
            dt = trackletsById[lr['dest']]

            if v > 0:
                fullResult['linkingResults'].append({'src': st['maxUid'], 'dest' : dt['minUid'], 'value': v})

        # Adding missing links with value set to 0 to the final result
        nodeFlowMap = dict([(int(d['id']), int(d['value'])) for d in fullResult['detectionResults']])
        arcFlowMap = dict([((int(l['src']), int(l['dest'])), int(l['value'])) for l in fullResult['linkingResults']])
        
        for detection in model['segmentationHypotheses']:
            if int(detection['id']) not in nodeFlowMap:
                fullResult['detectionResults'].append({'id': detection['id'], 'value': 0})
        
        for link in model['linkingHypotheses']:
            if (int(link['src']), int(link['dest'])) not in arcFlowMap:
                fullResult['linkingResults'].append({'src': link['src'], 'dest' : link['dest'], 'value': 0})
    
        return fullResult
Beispiel #7
0
def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the scripts as subprocesses.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    if options.do_ctc_groundtruth_conversion:
        logging.info("Convert CTC groundtruth to our format...")
        check_call([
            "python",
            os.path.abspath("ctc/ctc_gt_to_hdf5.py"), "--config",
            options.config_file
        ])

    if options.do_ctc_raw_data_conversion:
        logging.info("Convert CTC raw data to HDF5...")
        check_call([
            "python",
            os.path.abspath("ctc/stack_to_h5.py"), "--config",
            options.config_file
        ])

    if options.do_ctc_segmentation_conversion:
        logging.info("Convert CTC segmentation to HDF5...")
        check_call([
            "python",
            os.path.abspath("ctc/segmentation_to_hdf5.py"), "--config",
            options.config_file
        ])

    if options.do_train_transition_classifier:
        logging.info("Train transition classifier...")
        check_call([
            "python",
            os.path.abspath("train_transition_classifier.py"), "--config",
            options.config_file
        ])

    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        check_call([
            "python",
            os.path.abspath("tracking_ilp_to_weights.py"), "--config",
            options.config_file
        ])

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")
        check_call([
            "python",
            os.path.abspath("hypotheses_graph_to_json.py"), "--config",
            options.config_file
        ])

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        check_call([
            "python",
            os.path.abspath("convexify_costs.py"), "--config",
            options.config_file
        ])

    if options.do_tracking:
        logging.info("Run tracking...")

        if options.tracking_executable is not None:
            check_call([
                options.tracking_executable, "-m", options.model_filename,
                "-w", options.weight_filename, "-o", options.result_filename
            ])
        else:
            import commentjson as json
            import dpct
            import hytra.core.jsongraph

            with open(options.model_filename, 'r') as f:
                model = json.load(f)

            with open(options.weight_filename, 'r') as f:
                weights = json.load(f)

            result = dpct.trackFlowBased(model, weights)
            hytra.core.jsongraph.writeToFormattedJSON(options.result_filename,
                                                      result)

    extra_params = []
    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        check_call([
            "python",
            os.path.abspath("run_merger_resolving.py"), "--config",
            options.config_file
        ])

        for p in [
                "--out-graph-json-file", "--out-label-image-file",
                "--out-result-json-file"
        ]:
            index = unknown.index(p)
            extra_params.append(p.replace('--out-', '--'))
            extra_params.append(unknown[index + 1])

    if options.export_format is not None:
        logging.info("Convert result to {}...".format(options.export_format))
        if options.export_format in ['ilastikH5', 'ctc']:
            check_call([
                "python",
                os.path.abspath("json_result_to_events.py"), "--config",
                options.config_file
            ] + extra_params)
            if options.export_format == 'ctc':
                check_call([
                    "python",
                    os.path.abspath("ctc/hdf5_to_ctc.py"), "--config",
                    options.config_file
                ] + extra_params)
        elif options.export_format == 'labelimage':
            check_call([
                "python",
                os.path.abspath("json_result_to_labelimage.py"), "--config",
                options.config_file
            ] + extra_params)
        elif options.export_format is not None:
            logging.error("Unknown export format chosen!")
            raise ValueError("Unknown export format chosen!")
Beispiel #8
0
def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the scripts as subprocesses.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    if options.do_ctc_groundtruth_conversion:
        logging.info("Convert CTC groundtruth to our format...")
        check_call(["python", os.path.abspath("ctc/ctc_gt_to_hdf5.py"), "--config", options.config_file])

    if options.do_ctc_raw_data_conversion:
        logging.info("Convert CTC raw data to HDF5...")
        check_call(["python", os.path.abspath("ctc/stack_to_h5.py"), "--config", options.config_file])
    if options.do_ctc_segmentation_conversion:

        logging.info("Convert CTC segmentation to HDF5...")
        check_call(["python", os.path.abspath("ctc/segmentation_to_hdf5.py"), "--config", options.config_file])

    if options.do_train_transition_classifier:
        logging.info("Train transition classifier...")
        check_call(["python", os.path.abspath("train_transition_classifier.py"), "--config", options.config_file])

    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        check_call(["python", os.path.abspath("tracking_ilp_to_weights.py"), "--config", options.config_file])

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")
        check_call(["python", os.path.abspath("hypotheses_graph_to_json.py"), "--config", options.config_file])

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        check_call(["python", os.path.abspath("convexify_costs.py"), "--config", options.config_file])

    if options.do_tracking:
        logging.info("Run tracking...")

        if options.tracking_executable is not None:
            check_call([options.tracking_executable,
                        "-m", options.model_filename,
                        "-w", options.weight_filename,
                        "-o", options.result_filename])
        else:
            try:
                import commentjson as json
            except ImportError:
                import json
            
            import hytra.core.jsongraph
            with open(options.model_filename, 'r') as f:
                model = json.load(f)

            with open(options.weight_filename, 'r') as f:
                weights = json.load(f)
            
            if options.solver == "flow-based":
                import dpct
                result = dpct.trackFlowBased(model, weights)
            elif options.solver == "ilp":
                try:
                    import multiHypoTracking_with_cplex as mht
                except ImportError:
                    try:
                        import multiHypoTracking_with_gurobi as mht
                    except ImportError:
                        raise ImportError("Could not find multi hypotheses tracking ilp solver")
                result = mht.track(model, weights)

            hytra.core.jsongraph.writeToFormattedJSON(options.result_filename, result)

    extra_params = []
    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        check_call(["python", os.path.abspath("run_merger_resolving.py"), "--config", options.config_file])

        for p in ["--out-graph-json-file", "--out-label-image-file", "--out-result-json-file"]:
            index = unknown.index(p)
            extra_params.append(p.replace('--out-', '--'))
            extra_params.append(unknown[index + 1])

    if options.export_format is not None:
        logging.info("Convert result to {}...".format(options.export_format))
        if options.export_format in ['ilastikH5', 'ctc']:
            check_call(["python", os.path.abspath("json_result_to_events.py"), "--config", options.config_file] + extra_params)
            if options.export_format == 'ctc':
                check_call(["python", os.path.abspath("ctc/hdf5_to_ctc.py"), "--config", options.config_file] + extra_params)
        elif options.export_format == 'labelimage':
            check_call(["python", os.path.abspath("json_result_to_labelimage.py"), "--config", options.config_file] + extra_params)
        elif options.export_format is not None:
            logging.error("Unknown export format chosen!")
            raise ValueError("Unknown export format chosen!")
Beispiel #9
0
    def track(self,
            time_range,
            x_range,
            y_range,
            z_range,
            size_range=(0, 100000),
            x_scale=1.0,
            y_scale=1.0,
            z_scale=1.0,
            maxDist=30,     
            maxObj=2,       
            divThreshold=0.5,
            avgSize=[0],                        
            withTracklets=False,
            sizeDependent=True,
            divWeight=10.0,
            transWeight=10.0,
            withDivisions=True,
            withOpticalCorrection=True,
            withClassifierPrior=False,
            ndim=3,
            cplex_timeout=None,
            withMergerResolution=True,
            borderAwareWidth = 0.0,
            withArmaCoordinates = True,
            appearance_cost = 500,
            disappearance_cost = 500,
            motionModelWeight=10.0,
            force_build_hypotheses_graph = False,
            max_nearest_neighbors = 2,
            withBatchProcessing = False,
            solverName="Flow-based"
            ):
        """
        Main conservation tracking function. Runs tracking solver, generates hypotheses graph, and resolves mergers.
        """
        
        if not self.Parameters.ready():
            raise Exception("Parameter slot is not ready")
        
        # it is assumed that the self.Parameters object is changed only at this
        # place (ugly assumption). Therefore we can track any changes in the
        # parameters as done in the following lines: If the same value for the
        # key is already written in the parameters dictionary, the
        # paramters_changed dictionary will get a "False" entry for this key,
        # otherwise it is set to "True"
        parameters = self.Parameters.value

        parameters['maxDist'] = maxDist
        parameters['maxObj'] = maxObj
        parameters['divThreshold'] = divThreshold
        parameters['avgSize'] = avgSize
        parameters['withTracklets'] = withTracklets
        parameters['sizeDependent'] = sizeDependent
        parameters['divWeight'] = divWeight
        parameters['transWeight'] = transWeight
        parameters['withDivisions'] = withDivisions
        parameters['withOpticalCorrection'] = withOpticalCorrection
        parameters['withClassifierPrior'] = withClassifierPrior
        parameters['withMergerResolution'] = withMergerResolution
        parameters['borderAwareWidth'] = borderAwareWidth
        parameters['withArmaCoordinates'] = withArmaCoordinates
        parameters['appearanceCost'] = appearance_cost
        parameters['disappearanceCost'] = disappearance_cost       
        parameters['scales'] = [x_scale, y_scale, z_scale]
        parameters['time_range'] = [min(time_range), max(time_range)]
        parameters['x_range'] = x_range
        parameters['y_range'] = y_range
        parameters['z_range'] = z_range
        parameters['max_nearest_neighbors'] = max_nearest_neighbors
        parameters['solver'] = str(solverName)

        # Set a size range with a minimum area equal to the max number of objects (since the GMM throws an error if we try to fit more gaussians than the number of pixels in the object)
        size_range = (max(maxObj, size_range[0]), size_range[1])
        parameters['size_range'] = size_range

        if cplex_timeout:
            parameters['cplex_timeout'] = cplex_timeout
        else:
            parameters['cplex_timeout'] = ''
            cplex_timeout = float(1e75)
        
        self.Parameters.setValue(parameters, check_changed=False)
        
        if withClassifierPrior:
            if not self.DetectionProbabilities.ready() or len(self.DetectionProbabilities([0]).wait()[0]) == 0:
                raise DatasetConstraintError('Tracking', 'Classifier not ready yet. Did you forget to train the Object Count Classifier?')
            if not self.NumLabels.ready() or self.NumLabels.value < (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
            if len(self.DetectionProbabilities([0]).wait()[0][0]) < (maxObj + 1):
                raise DatasetConstraintError('Tracking', 'The max. number of objects must be consistent with the number of labels given in Object Count Classification.\n' +\
                    'Check whether you have (i) the correct number of label names specified in Object Count Classification, and (ii) provided at least ' +\
                    'one training example for each class.')
        
        hypothesesGraph = self._createHypothesesGraph()

        if withTracklets:
            hypothesesGraph = hypothesesGraph.generateTrackletGraph()

        hypothesesGraph.insertEnergies()
        trackingGraph = hypothesesGraph.toTrackingGraph()
        trackingGraph.convexifyCosts()
        model = trackingGraph.model

        detWeight = 10.0 # FIXME: Should we store this weight in the parameters slot?
        weights = trackingGraph.weightsListToDict([transWeight, detWeight, divWeight, appearance_cost, disappearance_cost])

        if solverName == 'Flow-based' and dpct:
            result = dpct.trackFlowBased(model, weights)
        elif solverName == 'ILP' and mht:
            result = mht.track(model, weights)
        else:
            raise ValueError("Invalid tracking solver selected")
        
        # Insert the solution into the hypotheses graph and from that deduce the lineages
        if hypothesesGraph:
            hypothesesGraph.insertSolution(result)
            
        # Merger resolution
        resolvedMergersDict = {}
        if withMergerResolution:
            resolvedMergersDict = self._resolveMergers(hypothesesGraph, model)
        
        # Set value of resolved mergers slot (Should be empty if mergers are disabled)         
        self.ResolvedMergers.setValue(resolvedMergersDict, check_changed=False)
                
        # Computing tracking lineage IDs from within Hytra
        logger.info("Computing hypotheses graph lineages")
        hypothesesGraph.computeLineage()

        # Uncomment to export a hypothese graph diagram
        #logger.info("Exporting hypotheses graph diagram")
        #from hytra.util.hypothesesgraphdiagram import HypothesesGraphDiagram
        #hgv = HypothesesGraphDiagram(hypothesesGraph._graph, timeRange=(0, 10), fileName='HypothesesGraph.png' )
                
        # Set value of hypotheses grap slot (use referenceTraxelGraph if using tracklets)
        hypothesesGraph = hypothesesGraph.referenceTraxelGraph if withTracklets else hypothesesGraph
        self.HypothesesGraph.setValue(hypothesesGraph, check_changed=False)

        # Set all the output slots dirty (See execute() function)
        self.Output.setDirty()
        self.MergerOutput.setDirty()
        self.RelabeledImage.setDirty()

        # Get events vector (only used when saving old h5 events file)
        events = self._getEventsVector(result, model)
        self.EventsVector.setValue(events, check_changed=False)
Beispiel #10
0
        _getLogger().info("Creating submodel from t={} to t={}...".format(
            lastSplit, splitPoint + 1))
        submodels.append(getSubmodel(lastSplit, splitPoint + 1))
        _getLogger().info("\t contains {} nodes and {} edges".format(
            len(submodels[-1]['segmentationHypotheses']),
            len(submodels[-1]['linkingHypotheses'])))
        lastSplit = splitPoint + 1

    # run tracking (in parallel??)
    results = []
    import dpct
    for i, submodel in enumerate(submodels):
        # TODO: be robust against changes of num weights!
        # TODO: release GIL in tracking python wrappers to allow parallel solving!!
        _getLogger().info("Tracking submodel {}/{}".format(i, len(submodels)))
        results.append(dpct.trackFlowBased(submodel, weights))

    # merge results
    # make detection weight higher, or accumulate energy over tracks (but what to do with mergers then?),
    # or contract everything where source-node, link and destination have the same number of objects?
    # We choose the last option.
    _getLogger().info("Setting up model for stitching")
    tracklets = []
    links = []
    stitchingModel = {
        'segmentationHypotheses': tracklets,
        'linkingHypotheses': links,
        'divisionHypotheses': [],
        'settings': model['settings']
    }
    nodeIdRemapping = {}
Beispiel #11
0
def run_pipeline(options, unknown):
    """
    Run the complete tracking pipeline by invoking the different steps.
    Using the `do-SOMETHING` switches one can configure which parts of the pipeline are run.

    **Params:**

    * `options`: the options of the tracking script as returned from argparse
    * `unknown`: unknown parameters read from the config file, needed in case merger resolving is supposed to be run.

    """

    params = convertToDict(unknown)

    if options.do_extract_weights:
        logging.info("Extracting weights from ilastik project...")
        weights = hytra.core.ilastik_project_options.extractWeightDictFromIlastikProject(
            options.ilastik_tracking_project)
    else:
        with open(options.weight_filename, 'r') as f:
            weights = json.load(f)

    if options.do_create_graph:
        logging.info("Create hypotheses graph...")

        import hytra.core.probabilitygenerator as probabilitygenerator
        from hytra.core.ilastik_project_options import IlastikProjectOptions
        ilpOptions = IlastikProjectOptions()
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'

        ilpOptions.sizeFilter = [int(params[str('min-size')]), 100000]

        if 'object-count-classifier-file' in params:
            ilpOptions.objectCountClassifierFilename = params[str(
                'object-count-classifier-file')]
        else:
            ilpOptions.objectCountClassifierFilename = options.ilastik_tracking_project

        withDivisions = 'without-divisions' not in params
        if withDivisions:
            if 'division-classifier-file' in params:
                ilpOptions.divisionClassifierFilename = params[str(
                    'division-classifier-file')]
            else:
                ilpOptions.divisionClassifierFilename = options.ilastik_tracking_project
        else:
            ilpOptions.divisionClassifierFilename = None

        probGenerator = probabilitygenerator.IlpProbabilityGenerator(
            ilpOptions,
            pluginPaths=[str('../hytra/plugins')],
            useMultiprocessing=False)

        # if time_range is not None:
        #     traxelstore.timeRange = time_range

        probGenerator.fillTraxels(usePgmlink=False)
        fieldOfView = constructFov(
            probGenerator.shape, probGenerator.timeRange[0],
            probGenerator.timeRange[1], [
                probGenerator.x_scale, probGenerator.y_scale,
                probGenerator.z_scale
            ])

        hypotheses_graph = IlastikHypothesesGraph(
            probabilityGenerator=probGenerator,
            timeRange=probGenerator.timeRange,
            maxNumObjects=int(params[str('max-number-objects')]),
            numNearestNeighbors=int(params[str('max-nearest-neighbors')]),
            fieldOfView=fieldOfView,
            withDivisions=withDivisions,
            divisionThreshold=0.1)

        withTracklets = True
        if withTracklets:
            hypotheses_graph = hypotheses_graph.generateTrackletGraph()

        hypotheses_graph.insertEnergies()
        trackingGraph = hypotheses_graph.toTrackingGraph()
    else:
        trackingGraph = JsonTrackingGraph(
            model_filename=options.model_filename)

    if options.do_convexify:
        logging.info("Convexifying graph energies...")
        trackingGraph.convexifyCosts()

    # get model out of trackingGraph
    model = trackingGraph.model

    if options.do_tracking:
        logging.info("Run tracking...")
        if options.solver == "flow-based":
            result = dpct.trackFlowBased(model, weights)
        elif options.solver == "ilp":
            try:
                import multiHypoTracking_with_cplex as mht
            except ImportError:
                try:
                    import multiHypoTracking_with_gurobi as mht
                except ImportError:
                    raise ImportError(
                        "Could not find multi hypotheses tracking ilp solver")
            result = mht.track(model, weights)

        hytra.core.jsongraph.writeToFormattedJSON(options.result_filename,
                                                  result)

        if hypotheses_graph:
            # insert the solution into the hypotheses graph and from that deduce the lineages
            hypotheses_graph.insertSolution(result)
            hypotheses_graph.computeLineage()

    if options.do_merger_resolving:
        logging.info("Run merger resolving")
        trackingGraph = JsonTrackingGraph(model=model, result=result)
        merger_resolver = JsonMergerResolver(
            trackingGraph, ilpOptions.labelImageFilename,
            ilpOptions.labelImagePath, params[str('out-label-image-file')],
            ilpOptions.rawImageFilename, ilpOptions.rawImagePath,
            ilpOptions.rawImageAxes, [str('../hytra/plugins')], True)
        ilpOptions.labelImagePath = params[str('label-image-path')]
        ilpOptions.labelImageFilename = params[str('label-image-file')]
        ilpOptions.rawImagePath = params[str('raw-data-path')]
        ilpOptions.rawImageFilename = params[str('raw-data-file')]
        try:
            ilpOptions.rawImageAxes = params[str('raw-data-axes')]
        except:
            ilpOptions.rawImageAxes = 'txyzc'
        merger_resolver.run(None, None)
def test_twoSegmentations():
    # set up ConflictingSegmentsProbabilityGenerator
    ilpOptions = IlastikProjectOptions()
    ilpOptions.divisionClassifierPath = None
    ilpOptions.divisionClassifierFilename = None
    
    ilpOptions.rawImageFilename = 'tests/multiSegmentationHypothesesTestDataset/Raw.h5'
    ilpOptions.rawImagePath = 'exported_data'
    ilpOptions.rawImageAxes = 'txyzc'

    ilpOptions.labelImageFilename = 'tests/multiSegmentationHypothesesTestDataset/segmentation.h5'

    ilpOptions.objectCountClassifierFilename = 'tests/multiSegmentationHypothesesTestDataset/tracking.ilp'

    additionalLabelImageFilenames = ['tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5']
    additionalLabelImagePaths = [ilpOptions.labelImagePath]

    probabilityGenerator = ConflictingSegmentsProbabilityGenerator(
        ilpOptions, 
        additionalLabelImageFilenames,
        additionalLabelImagePaths,
        useMultiprocessing=False,
        verbose=False)
    probabilityGenerator.fillTraxels(usePgmlink=False)

    assert(len(probabilityGenerator.TraxelsPerFrame[0]) == 4)
    assert(len(probabilityGenerator.TraxelsPerFrame[1]) == 3)
    assert(len(probabilityGenerator.TraxelsPerFrame[2]) == 3)
    assert(len(probabilityGenerator.TraxelsPerFrame[3]) == 4)
    filenamesPerTraxel = [t.segmentationFilename for t in probabilityGenerator.TraxelsPerFrame[3].values()]
    idsPerTraxel = [t.idInSegmentation for t in probabilityGenerator.TraxelsPerFrame[3].values()]
    assert(idsPerTraxel.count(1) == 2)
    assert(idsPerTraxel.count(2) == 2)
    assert(filenamesPerTraxel.count('tests/multiSegmentationHypothesesTestDataset/segmentation.h5') == 2)
    assert(filenamesPerTraxel.count('tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5') == 2)

    # build hypotheses graph, check that conflicting traxels are properly detected
    fieldOfView = constructFov(probabilityGenerator.shape,
                               probabilityGenerator.timeRange[0],
                               probabilityGenerator.timeRange[1],
                               [probabilityGenerator.x_scale,
                                probabilityGenerator.y_scale,
                                probabilityGenerator.z_scale])
 
    hypotheses_graph = IlastikHypothesesGraph(
        probabilityGenerator=probabilityGenerator,
        timeRange=probabilityGenerator.timeRange,
        maxNumObjects=1,
        numNearestNeighbors=2,
        fieldOfView=fieldOfView,
        withDivisions=False,
        divisionThreshold=0.1
    )

    assert(hypotheses_graph.countNodes() == 14)
    assert(hypotheses_graph.countArcs() == 23)
    assert(hypotheses_graph._graph.node[(0, 1)]['traxel'].conflictingTraxelIds == [3])
    assert(hypotheses_graph._graph.node[(0, 3)]['traxel'].conflictingTraxelIds == [1])
    assert(hypotheses_graph._graph.node[(0, 2)]['traxel'].conflictingTraxelIds == [4])
    assert(hypotheses_graph._graph.node[(0, 4)]['traxel'].conflictingTraxelIds == [2])
    assert(hypotheses_graph._graph.node[(1, 1)]['traxel'].conflictingTraxelIds == [2, 3])
    assert(hypotheses_graph._graph.node[(1, 2)]['traxel'].conflictingTraxelIds == [1])
    assert(hypotheses_graph._graph.node[(1, 3)]['traxel'].conflictingTraxelIds == [1])

    # track, but check that the right exclusion constraints are present
    hypotheses_graph.insertEnergies()
    trackingGraph = hypotheses_graph.toTrackingGraph()

    assert(len(trackingGraph.model['exclusions']) == 8)
    for exclusionSet in trackingGraph.model['exclusions']:
        assert(len(exclusionSet) == 2)

    # use multiHypoTracking, insert exclusion constraints!
    if mht is not None:
        result = mht.track(trackingGraph.model, {"weights": [10, 10, 500, 500]})
    else:
        return
        # standard dpct cannot handle exclusion constraints yet
        result = dpct.trackFlowBased(trackingGraph.model, {"weights": [10, 10, 500, 500]})

    hypotheses_graph.insertSolution(result)
    # hypotheses_graph.computeLineage()

    numActivePerFrame = {}

    for node in hypotheses_graph.nodeIterator():
        timeframe = node[0]
        if 'value' in hypotheses_graph._graph.node[node]: 
            value = hypotheses_graph._graph.node[node]['value']
        else:
            value = 0 
        numActivePerFrame.setdefault(timeframe, []).append(value) 

    for _, v in numActivePerFrame.iteritems():
        assert(sum(v) == 2)        

    edgeFlow = 0
    for edge in hypotheses_graph.arcIterator():
        if 'value' in hypotheses_graph._graph.edge[edge[0]][edge[1]]:
            edgeFlow += hypotheses_graph._graph.edge[edge[0]][edge[1]]['value']
    assert(edgeFlow == 6)
def test_twoSegmentations():
    # set up ConflictingSegmentsProbabilityGenerator
    ilpOptions = IlastikProjectOptions()
    ilpOptions.divisionClassifierPath = None
    ilpOptions.divisionClassifierFilename = None

    ilpOptions.rawImageFilename = 'tests/multiSegmentationHypothesesTestDataset/Raw.h5'
    ilpOptions.rawImagePath = 'exported_data'
    ilpOptions.rawImageAxes = 'txyzc'

    ilpOptions.labelImageFilename = 'tests/multiSegmentationHypothesesTestDataset/segmentation.h5'

    ilpOptions.objectCountClassifierFilename = 'tests/multiSegmentationHypothesesTestDataset/tracking.ilp'

    additionalLabelImageFilenames = [
        'tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5'
    ]
    additionalLabelImagePaths = [ilpOptions.labelImagePath]

    probabilityGenerator = ConflictingSegmentsProbabilityGenerator(
        ilpOptions,
        additionalLabelImageFilenames,
        additionalLabelImagePaths,
        useMultiprocessing=False,
        verbose=False)
    probabilityGenerator.fillTraxels(usePgmlink=False)

    assert (len(probabilityGenerator.TraxelsPerFrame[0]) == 4)
    assert (len(probabilityGenerator.TraxelsPerFrame[1]) == 3)
    assert (len(probabilityGenerator.TraxelsPerFrame[2]) == 3)
    assert (len(probabilityGenerator.TraxelsPerFrame[3]) == 4)
    filenamesPerTraxel = [
        t.segmentationFilename
        for t in probabilityGenerator.TraxelsPerFrame[3].values()
    ]
    idsPerTraxel = [
        t.idInSegmentation
        for t in probabilityGenerator.TraxelsPerFrame[3].values()
    ]
    assert (idsPerTraxel.count(1) == 2)
    assert (idsPerTraxel.count(2) == 2)
    assert (filenamesPerTraxel.count(
        'tests/multiSegmentationHypothesesTestDataset/segmentation.h5') == 2)
    assert (filenamesPerTraxel.count(
        'tests/multiSegmentationHypothesesTestDataset/segmentationAlt.h5') == 2
            )

    # build hypotheses graph, check that conflicting traxels are properly detected
    fieldOfView = constructFov(
        probabilityGenerator.shape, probabilityGenerator.timeRange[0],
        probabilityGenerator.timeRange[1], [
            probabilityGenerator.x_scale, probabilityGenerator.y_scale,
            probabilityGenerator.z_scale
        ])

    hypotheses_graph = IlastikHypothesesGraph(
        probabilityGenerator=probabilityGenerator,
        timeRange=probabilityGenerator.timeRange,
        maxNumObjects=1,
        numNearestNeighbors=2,
        fieldOfView=fieldOfView,
        withDivisions=False,
        divisionThreshold=0.1)

    assert (hypotheses_graph.countNodes() == 14)
    assert (hypotheses_graph.countArcs() == 23)
    assert (hypotheses_graph._graph.node[(
        0, 1)]['traxel'].conflictingTraxelIds == [3])
    assert (hypotheses_graph._graph.node[(
        0, 3)]['traxel'].conflictingTraxelIds == [1])
    assert (hypotheses_graph._graph.node[(
        0, 2)]['traxel'].conflictingTraxelIds == [4])
    assert (hypotheses_graph._graph.node[(
        0, 4)]['traxel'].conflictingTraxelIds == [2])
    assert (hypotheses_graph._graph.node[(
        1, 1)]['traxel'].conflictingTraxelIds == [2, 3])
    assert (hypotheses_graph._graph.node[(
        1, 2)]['traxel'].conflictingTraxelIds == [1])
    assert (hypotheses_graph._graph.node[(
        1, 3)]['traxel'].conflictingTraxelIds == [1])

    # track, but check that the right exclusion constraints are present
    hypotheses_graph.insertEnergies()
    trackingGraph = hypotheses_graph.toTrackingGraph()

    assert (len(trackingGraph.model['exclusions']) == 8)
    for exclusionSet in trackingGraph.model['exclusions']:
        assert (len(exclusionSet) == 2)

    # use multiHypoTracking, insert exclusion constraints!
    if mht is not None:
        result = mht.track(trackingGraph.model,
                           {"weights": [10, 10, 500, 500]})
    else:
        result = dpct.trackFlowBased(trackingGraph.model,
                                     {"weights": [10, 10, 500, 500]})

    hypotheses_graph.insertSolution(result)
    # hypotheses_graph.computeLineage()

    numActivePerFrame = {}

    for node in hypotheses_graph.nodeIterator():
        timeframe = node[0]
        if 'value' in hypotheses_graph._graph.node[node]:
            value = hypotheses_graph._graph.node[node]['value']
        else:
            value = 0
        numActivePerFrame.setdefault(timeframe, []).append(value)

    for _, v in numActivePerFrame.iteritems():
        assert (sum(v) == 2)

    edgeFlow = 0
    for edge in hypotheses_graph.arcIterator():
        if 'value' in hypotheses_graph._graph.edge[edge[0]][edge[1]]:
            edgeFlow += hypotheses_graph._graph.edge[edge[0]][edge[1]]['value']
    assert (edgeFlow == 6)
Beispiel #14
0
		},
		{ "id" : 3, "timestep" : [1,1], "features" : [[1.0], [0.0]], "divisionFeatures" : [[0], [-5]], "appearanceFeatures" : [[0], [0]], "disappearanceFeatures" : [[0], [50]]},
		{ "id" : 4, "timestep" : [2,2], "features" : [[1.0], [0.0]], "appearanceFeatures" : [[0], [50]], "disappearanceFeatures" : [[0], [-2]]},
		{ "id" : 5, "timestep" : [2,2], "features" : [[1.0], [0.0]], "appearanceFeatures" : [[0], [50]], "disappearanceFeatures" : [[0], [-2]]},
		{ "id" : 6, "timestep" : [2,2], "features" : [[1.0], [0.0]], "appearanceFeatures" : [[0], [50]], "disappearanceFeatures" : [[0], [-4]]}
	],

	"linkingHypotheses" : [
		{ "src" : 2, "dest" : 4, "features" : [[0], [-4]]},
		{ "src" : 2, "dest" : 5, "features" : [[0], [-3]]},
		{ "src" : 3, "dest" : 5, "features" : [[0], [-1]]},
		{ "src" : 3, "dest" : 6, "features" : [[0], [-4]]}
	]
}

res = dpct.trackFlowBased(graph, weights)

expectedResult = {'detectionResults': [{'id': 2, 'value': 1},
  {'id': 3, 'value': 1},
  {'id': 4, 'value': 1},
  {'id': 5, 'value': 1},
  {'id': 6, 'value': 1}],
 'divisionResults': [{'id': 2, 'value': True},
  {'id': 3, 'value': False},
  {'id': 4, 'value': False},
  {'id': 5, 'value': False},
  {'id': 6, 'value': False}],
 'linkingResults': [{'dest': 4, 'src': 2, 'value': 1},
  {'dest': 5, 'src': 2, 'value': 1},
  {'dest': 5, 'src': 3, 'value': 0},
  {'dest': 6, 'src': 3, 'value': 1}]}